1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996-2016 Free Software Foundation, Inc.
3 Contributed by Jeff Law (law@cygnus.com).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
37 #include "diagnostic-core.h"
39 #include "stor-layout.h"
43 #include "insn-attr.h"
47 #include "tm-constrs.h"
52 /* This file should be included last. */
53 #include "target-def.h"
55 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
56 names are not prefixed by underscores, to tell whether to prefix a
57 label with a plus sign or not, so that the assembler can tell
58 symbol names from register names. */
59 int mn10300_protect_label
;
61 /* Selected processor type for tuning. */
62 enum processor_type mn10300_tune_cpu
= PROCESSOR_DEFAULT
;
69 static int cc_flags_for_mode(machine_mode
);
70 static int cc_flags_for_code(enum rtx_code
);
72 /* Implement TARGET_OPTION_OVERRIDE. */
74 mn10300_option_override (void)
77 target_flags
&= ~MASK_MULT_BUG
;
80 /* Disable scheduling for the MN10300 as we do
81 not have timing information available for it. */
82 flag_schedule_insns
= 0;
83 flag_schedule_insns_after_reload
= 0;
85 /* Force enable splitting of wide types, as otherwise it is trivial
86 to run out of registers. Indeed, this works so well that register
87 allocation problems are now more common *without* optimization,
88 when this flag is not enabled by default. */
89 flag_split_wide_types
= 1;
92 if (mn10300_tune_string
)
94 if (strcasecmp (mn10300_tune_string
, "mn10300") == 0)
95 mn10300_tune_cpu
= PROCESSOR_MN10300
;
96 else if (strcasecmp (mn10300_tune_string
, "am33") == 0)
97 mn10300_tune_cpu
= PROCESSOR_AM33
;
98 else if (strcasecmp (mn10300_tune_string
, "am33-2") == 0)
99 mn10300_tune_cpu
= PROCESSOR_AM33_2
;
100 else if (strcasecmp (mn10300_tune_string
, "am34") == 0)
101 mn10300_tune_cpu
= PROCESSOR_AM34
;
103 error ("-mtune= expects mn10300, am33, am33-2, or am34");
108 mn10300_file_start (void)
110 default_file_start ();
113 fprintf (asm_out_file
, "\t.am33_2\n");
114 else if (TARGET_AM33
)
115 fprintf (asm_out_file
, "\t.am33\n");
118 /* Note: This list must match the liw_op attribute in mn10300.md. */
120 static const char *liw_op_names
[] =
122 "add", "cmp", "sub", "mov",
128 /* Print operand X using operand code CODE to assembly language output file
132 mn10300_print_operand (FILE *file
, rtx x
, int code
)
138 unsigned int liw_op
= UINTVAL (x
);
140 gcc_assert (TARGET_ALLOW_LIW
);
141 gcc_assert (liw_op
< LIW_OP_MAX
);
142 fputs (liw_op_names
[liw_op
], file
);
149 enum rtx_code cmp
= GET_CODE (x
);
150 machine_mode mode
= GET_MODE (XEXP (x
, 0));
155 cmp
= reverse_condition (cmp
);
156 have_flags
= cc_flags_for_mode (mode
);
167 /* bge is smaller than bnc. */
168 str
= (have_flags
& CC_FLAG_V
? "ge" : "nc");
171 str
= (have_flags
& CC_FLAG_V
? "lt" : "ns");
219 gcc_checking_assert ((cc_flags_for_code (cmp
) & ~have_flags
) == 0);
225 /* This is used for the operand to a call instruction;
226 if it's a REG, enclose it in parens, else output
227 the operand normally. */
231 mn10300_print_operand (file
, x
, 0);
235 mn10300_print_operand (file
, x
, 0);
239 switch (GET_CODE (x
))
243 output_address (GET_MODE (x
), XEXP (x
, 0));
248 fprintf (file
, "fd%d", REGNO (x
) - 18);
256 /* These are the least significant word in a 64bit value. */
258 switch (GET_CODE (x
))
262 output_address (GET_MODE (x
), XEXP (x
, 0));
267 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
271 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
278 switch (GET_MODE (x
))
281 REAL_VALUE_TO_TARGET_DOUBLE
282 (*CONST_DOUBLE_REAL_VALUE (x
), val
);
283 fprintf (file
, "0x%lx", val
[0]);
286 REAL_VALUE_TO_TARGET_SINGLE
287 (*CONST_DOUBLE_REAL_VALUE (x
), val
[0]);
288 fprintf (file
, "0x%lx", val
[0]);
292 mn10300_print_operand_address (file
,
293 GEN_INT (CONST_DOUBLE_LOW (x
)));
304 split_double (x
, &low
, &high
);
305 fprintf (file
, "%ld", (long)INTVAL (low
));
314 /* Similarly, but for the most significant word. */
316 switch (GET_CODE (x
))
320 x
= adjust_address (x
, SImode
, 4);
321 output_address (GET_MODE (x
), XEXP (x
, 0));
326 fprintf (file
, "%s", reg_names
[REGNO (x
) + 1]);
330 fprintf (file
, "%s", reg_names
[subreg_regno (x
) + 1]);
337 switch (GET_MODE (x
))
340 REAL_VALUE_TO_TARGET_DOUBLE
341 (*CONST_DOUBLE_REAL_VALUE (x
), val
);
342 fprintf (file
, "0x%lx", val
[1]);
348 mn10300_print_operand_address (file
,
349 GEN_INT (CONST_DOUBLE_HIGH (x
)));
360 split_double (x
, &low
, &high
);
361 fprintf (file
, "%ld", (long)INTVAL (high
));
372 if (REG_P (XEXP (x
, 0)))
373 output_address (VOIDmode
, gen_rtx_PLUS (SImode
,
374 XEXP (x
, 0), const0_rtx
));
376 output_address (VOIDmode
, XEXP (x
, 0));
381 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
382 fprintf (file
, "%d", (int)((~INTVAL (x
)) & 0xff));
386 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
387 fprintf (file
, "%d", (int)(INTVAL (x
) & 0xff));
390 /* For shift counts. The hardware ignores the upper bits of
391 any immediate, but the assembler will flag an out of range
392 shift count as an error. So we mask off the high bits
393 of the immediate here. */
397 fprintf (file
, "%d", (int)(INTVAL (x
) & 0x1f));
403 switch (GET_CODE (x
))
407 output_address (GET_MODE (x
), XEXP (x
, 0));
412 output_address (VOIDmode
, x
);
416 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
420 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
423 /* This will only be single precision.... */
428 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), val
);
429 fprintf (file
, "0x%lx", val
);
439 mn10300_print_operand_address (file
, x
);
448 /* Output assembly language output for the address ADDR to FILE. */
451 mn10300_print_operand_address (FILE *file
, rtx addr
)
453 switch (GET_CODE (addr
))
456 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
461 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
464 mn10300_print_operand (file
, XEXP (addr
, 1), 0);
468 mn10300_print_operand (file
, addr
, 0);
472 rtx base
= XEXP (addr
, 0);
473 rtx index
= XEXP (addr
, 1);
475 if (REG_P (index
) && !REG_OK_FOR_INDEX_P (index
))
481 gcc_assert (REG_P (index
) && REG_OK_FOR_INDEX_P (index
));
483 gcc_assert (REG_OK_FOR_BASE_P (base
));
485 mn10300_print_operand (file
, index
, 0);
487 mn10300_print_operand (file
, base
, 0);
491 output_addr_const (file
, addr
);
494 output_addr_const (file
, addr
);
499 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
501 Used for PIC-specific UNSPECs. */
504 mn10300_asm_output_addr_const_extra (FILE *file
, rtx x
)
506 if (GET_CODE (x
) == UNSPEC
)
511 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
512 output_addr_const (file
, XVECEXP (x
, 0, 0));
515 output_addr_const (file
, XVECEXP (x
, 0, 0));
516 fputs ("@GOT", file
);
519 output_addr_const (file
, XVECEXP (x
, 0, 0));
520 fputs ("@GOTOFF", file
);
523 output_addr_const (file
, XVECEXP (x
, 0, 0));
524 fputs ("@PLT", file
);
526 case UNSPEC_GOTSYM_OFF
:
527 assemble_name (file
, GOT_SYMBOL_NAME
);
529 output_addr_const (file
, XVECEXP (x
, 0, 0));
541 /* Count the number of FP registers that have to be saved. */
543 fp_regs_to_save (void)
550 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
551 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
557 /* Print a set of registers in the format required by "movm" and "ret".
558 Register K is saved if bit K of MASK is set. The data and address
559 registers can be stored individually, but the extended registers cannot.
560 We assume that the mask already takes that into account. For instance,
561 bits 14 to 17 must have the same value. */
564 mn10300_print_reg_list (FILE *file
, int mask
)
572 for (i
= 0; i
< FIRST_EXTENDED_REGNUM
; i
++)
573 if ((mask
& (1 << i
)) != 0)
577 fputs (reg_names
[i
], file
);
581 if ((mask
& 0x3c000) != 0)
583 gcc_assert ((mask
& 0x3c000) == 0x3c000);
586 fputs ("exreg1", file
);
593 /* If the MDR register is never clobbered, we can use the RETF instruction
594 which takes the address from the MDR register. This is 3 cycles faster
595 than having to load the address from the stack. */
598 mn10300_can_use_retf_insn (void)
600 /* Don't bother if we're not optimizing. In this case we won't
601 have proper access to df_regs_ever_live_p. */
605 /* EH returns alter the saved return address; MDR is not current. */
606 if (crtl
->calls_eh_return
)
609 /* Obviously not if MDR is ever clobbered. */
610 if (df_regs_ever_live_p (MDR_REG
))
613 /* ??? Careful not to use this during expand_epilogue etc. */
614 gcc_assert (!in_sequence_p ());
615 return leaf_function_p ();
619 mn10300_can_use_rets_insn (void)
621 return !mn10300_initial_offset (ARG_POINTER_REGNUM
, STACK_POINTER_REGNUM
);
624 /* Returns the set of live, callee-saved registers as a bitmask. The
625 callee-saved extended registers cannot be stored individually, so
626 all of them will be included in the mask if any one of them is used.
627 Also returns the number of bytes in the registers in the mask if
628 BYTES_SAVED is not NULL. */
631 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved
)
638 for (i
= 0; i
<= LAST_EXTENDED_REGNUM
; i
++)
639 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
645 if ((mask
& 0x3c000) != 0)
647 for (i
= 0x04000; i
< 0x40000; i
<<= 1)
655 * bytes_saved
= count
* UNITS_PER_WORD
;
663 RTX_FRAME_RELATED_P (r
) = 1;
667 /* Generate an instruction that pushes several registers onto the stack.
668 Register K will be saved if bit K in MASK is set. The function does
669 nothing if MASK is zero.
671 To be compatible with the "movm" instruction, the lowest-numbered
672 register must be stored in the lowest slot. If MASK is the set
673 { R1,...,RN }, where R1...RN are ordered least first, the generated
674 instruction will have the form:
677 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
678 (set (mem:SI (plus:SI (reg:SI 9)
682 (set (mem:SI (plus:SI (reg:SI 9)
687 mn10300_gen_multiple_store (unsigned int mask
)
689 /* The order in which registers are stored, from SP-4 through SP-N*4. */
690 static const unsigned int store_order
[8] = {
691 /* e2, e3: never saved */
692 FIRST_EXTENDED_REGNUM
+ 4,
693 FIRST_EXTENDED_REGNUM
+ 5,
694 FIRST_EXTENDED_REGNUM
+ 6,
695 FIRST_EXTENDED_REGNUM
+ 7,
696 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
697 FIRST_DATA_REGNUM
+ 2,
698 FIRST_DATA_REGNUM
+ 3,
699 FIRST_ADDRESS_REGNUM
+ 2,
700 FIRST_ADDRESS_REGNUM
+ 3,
701 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
711 for (i
= count
= 0; i
< ARRAY_SIZE(store_order
); ++i
)
713 unsigned regno
= store_order
[i
];
715 if (((mask
>> regno
) & 1) == 0)
719 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
720 x
= gen_frame_mem (SImode
, x
);
721 x
= gen_rtx_SET (x
, gen_rtx_REG (SImode
, regno
));
724 /* Remove the register from the mask so that... */
725 mask
&= ~(1u << regno
);
728 /* ... we can make sure that we didn't try to use a register
729 not listed in the store order. */
730 gcc_assert (mask
== 0);
732 /* Create the instruction that updates the stack pointer. */
733 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
734 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
737 /* We need one PARALLEL element to update the stack pointer and
738 an additional element for each register that is stored. */
739 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (count
+ 1, elts
));
743 static inline unsigned int
744 popcount (unsigned int mask
)
746 unsigned int count
= 0;
751 mask
&= ~ (mask
& - mask
);
757 mn10300_expand_prologue (void)
759 HOST_WIDE_INT size
= mn10300_frame_size ();
762 mask
= mn10300_get_live_callee_saved_regs (NULL
);
763 /* If we use any of the callee-saved registers, save them now. */
764 mn10300_gen_multiple_store (mask
);
766 if (flag_stack_usage_info
)
767 current_function_static_stack_size
= size
+ popcount (mask
) * 4;
769 if (TARGET_AM33_2
&& fp_regs_to_save ())
771 int num_regs_to_save
= fp_regs_to_save (), i
;
777 save_sp_partial_merge
,
781 unsigned int strategy_size
= (unsigned)-1, this_strategy_size
;
784 if (flag_stack_usage_info
)
785 current_function_static_stack_size
+= num_regs_to_save
* 4;
787 /* We have several different strategies to save FP registers.
788 We can store them using SP offsets, which is beneficial if
789 there are just a few registers to save, or we can use `a0' in
790 post-increment mode (`a0' is the only call-clobbered address
791 register that is never used to pass information to a
792 function). Furthermore, if we don't need a frame pointer, we
793 can merge the two SP adds into a single one, but this isn't
794 always beneficial; sometimes we can just split the two adds
795 so that we don't exceed a 16-bit constant size. The code
796 below will select which strategy to use, so as to generate
797 smallest code. Ties are broken in favor or shorter sequences
798 (in terms of number of instructions). */
800 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
801 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
802 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
803 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
805 /* We add 0 * (S) in two places to promote to the type of S,
806 so that all arms of the conditional have the same type. */
807 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
808 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
809 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
810 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
812 #define SIZE_FMOV_SP_(S,N) \
813 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
814 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
815 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
816 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
818 /* Consider alternative save_sp_merge only if we don't need the
819 frame pointer and size is nonzero. */
820 if (! frame_pointer_needed
&& size
)
822 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
823 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
824 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
825 this_strategy_size
+= SIZE_FMOV_SP (size
, num_regs_to_save
);
827 if (this_strategy_size
< strategy_size
)
829 strategy
= save_sp_merge
;
830 strategy_size
= this_strategy_size
;
834 /* Consider alternative save_sp_no_merge unconditionally. */
835 /* Insn: add -4 * num_regs_to_save, sp. */
836 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
837 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
838 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
841 /* Insn: add -size, sp. */
842 this_strategy_size
+= SIZE_ADD_SP (-size
);
845 if (this_strategy_size
< strategy_size
)
847 strategy
= save_sp_no_merge
;
848 strategy_size
= this_strategy_size
;
851 /* Consider alternative save_sp_partial_merge only if we don't
852 need a frame pointer and size is reasonably large. */
853 if (! frame_pointer_needed
&& size
+ 4 * num_regs_to_save
> 128)
855 /* Insn: add -128, sp. */
856 this_strategy_size
= SIZE_ADD_SP (-128);
857 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
858 this_strategy_size
+= SIZE_FMOV_SP (128 - 4 * num_regs_to_save
,
862 /* Insn: add 128-size, sp. */
863 this_strategy_size
+= SIZE_ADD_SP (128 - size
);
866 if (this_strategy_size
< strategy_size
)
868 strategy
= save_sp_partial_merge
;
869 strategy_size
= this_strategy_size
;
873 /* Consider alternative save_a0_merge only if we don't need a
874 frame pointer, size is nonzero and the user hasn't
875 changed the calling conventions of a0. */
876 if (! frame_pointer_needed
&& size
877 && call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
878 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
880 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
881 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
882 /* Insn: mov sp, a0. */
883 this_strategy_size
++;
886 /* Insn: add size, a0. */
887 this_strategy_size
+= SIZE_ADD_AX (size
);
889 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
890 this_strategy_size
+= 3 * num_regs_to_save
;
892 if (this_strategy_size
< strategy_size
)
894 strategy
= save_a0_merge
;
895 strategy_size
= this_strategy_size
;
899 /* Consider alternative save_a0_no_merge if the user hasn't
900 changed the calling conventions of a0. */
901 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
902 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
904 /* Insn: add -4 * num_regs_to_save, sp. */
905 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
906 /* Insn: mov sp, a0. */
907 this_strategy_size
++;
908 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
909 this_strategy_size
+= 3 * num_regs_to_save
;
912 /* Insn: add -size, sp. */
913 this_strategy_size
+= SIZE_ADD_SP (-size
);
916 if (this_strategy_size
< strategy_size
)
918 strategy
= save_a0_no_merge
;
919 strategy_size
= this_strategy_size
;
923 /* Emit the initial SP add, common to all strategies. */
926 case save_sp_no_merge
:
927 case save_a0_no_merge
:
928 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
930 GEN_INT (-4 * num_regs_to_save
))));
934 case save_sp_partial_merge
:
935 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
938 xsize
= 128 - 4 * num_regs_to_save
;
944 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
946 GEN_INT (-(size
+ 4 * num_regs_to_save
)))));
947 /* We'll have to adjust FP register saves according to the
950 /* Since we've already created the stack frame, don't do it
951 again at the end of the function. */
959 /* Now prepare register a0, if we have decided to use it. */
963 case save_sp_no_merge
:
964 case save_sp_partial_merge
:
969 case save_a0_no_merge
:
970 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
);
971 F (emit_insn (gen_movsi (reg
, stack_pointer_rtx
)));
973 F (emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (xsize
))));
974 reg
= gen_rtx_POST_INC (SImode
, reg
);
981 /* Now actually save the FP registers. */
982 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
983 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
991 /* If we aren't using `a0', use an SP offset. */
994 addr
= gen_rtx_PLUS (SImode
,
999 addr
= stack_pointer_rtx
;
1004 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode
, addr
),
1005 gen_rtx_REG (SFmode
, i
))));
1009 /* Now put the frame pointer into the frame pointer register. */
1010 if (frame_pointer_needed
)
1011 F (emit_move_insn (frame_pointer_rtx
, stack_pointer_rtx
));
1013 /* Allocate stack for this frame. */
1015 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1019 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
1020 emit_insn (gen_load_pic ());
1024 mn10300_expand_epilogue (void)
1026 HOST_WIDE_INT size
= mn10300_frame_size ();
1027 unsigned int reg_save_bytes
;
1029 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1031 if (TARGET_AM33_2
&& fp_regs_to_save ())
1033 int num_regs_to_save
= fp_regs_to_save (), i
;
1036 /* We have several options to restore FP registers. We could
1037 load them from SP offsets, but, if there are enough FP
1038 registers to restore, we win if we use a post-increment
1041 /* If we have a frame pointer, it's the best option, because we
1042 already know it has the value we want. */
1043 if (frame_pointer_needed
)
1044 reg
= gen_rtx_REG (SImode
, FRAME_POINTER_REGNUM
);
1045 /* Otherwise, we may use `a1', since it's call-clobbered and
1046 it's never used for return values. But only do so if it's
1047 smaller than using SP offsets. */
1050 enum { restore_sp_post_adjust
,
1051 restore_sp_pre_adjust
,
1052 restore_sp_partial_adjust
,
1053 restore_a1
} strategy
;
1054 unsigned int this_strategy_size
, strategy_size
= (unsigned)-1;
1056 /* Consider using sp offsets before adjusting sp. */
1057 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1058 this_strategy_size
= SIZE_FMOV_SP (size
, num_regs_to_save
);
1059 /* If size is too large, we'll have to adjust SP with an
1061 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1063 /* Insn: add size + 4 * num_regs_to_save, sp. */
1064 this_strategy_size
+= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
);
1066 /* If we don't have to restore any non-FP registers,
1067 we'll be able to save one byte by using rets. */
1068 if (! reg_save_bytes
)
1069 this_strategy_size
--;
1071 if (this_strategy_size
< strategy_size
)
1073 strategy
= restore_sp_post_adjust
;
1074 strategy_size
= this_strategy_size
;
1077 /* Consider using sp offsets after adjusting sp. */
1078 /* Insn: add size, sp. */
1079 this_strategy_size
= SIZE_ADD_SP (size
);
1080 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1081 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
1082 /* We're going to use ret to release the FP registers
1083 save area, so, no savings. */
1085 if (this_strategy_size
< strategy_size
)
1087 strategy
= restore_sp_pre_adjust
;
1088 strategy_size
= this_strategy_size
;
1091 /* Consider using sp offsets after partially adjusting sp.
1092 When size is close to 32Kb, we may be able to adjust SP
1093 with an imm16 add instruction while still using fmov
1095 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1097 /* Insn: add size + 4 * num_regs_to_save
1098 + reg_save_bytes - 252,sp. */
1099 this_strategy_size
= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
1100 + (int) reg_save_bytes
- 252);
1101 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1102 this_strategy_size
+= SIZE_FMOV_SP (252 - reg_save_bytes
1103 - 4 * num_regs_to_save
,
1105 /* We're going to use ret to release the FP registers
1106 save area, so, no savings. */
1108 if (this_strategy_size
< strategy_size
)
1110 strategy
= restore_sp_partial_adjust
;
1111 strategy_size
= this_strategy_size
;
1115 /* Consider using a1 in post-increment mode, as long as the
1116 user hasn't changed the calling conventions of a1. */
1117 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
+ 1]
1118 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
+1])
1120 /* Insn: mov sp,a1. */
1121 this_strategy_size
= 1;
1124 /* Insn: add size,a1. */
1125 this_strategy_size
+= SIZE_ADD_AX (size
);
1127 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1128 this_strategy_size
+= 3 * num_regs_to_save
;
1129 /* If size is large enough, we may be able to save a
1131 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1133 /* Insn: mov a1,sp. */
1134 this_strategy_size
+= 2;
1136 /* If we don't have to restore any non-FP registers,
1137 we'll be able to save one byte by using rets. */
1138 if (! reg_save_bytes
)
1139 this_strategy_size
--;
1141 if (this_strategy_size
< strategy_size
)
1143 strategy
= restore_a1
;
1144 strategy_size
= this_strategy_size
;
1150 case restore_sp_post_adjust
:
1153 case restore_sp_pre_adjust
:
1154 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1160 case restore_sp_partial_adjust
:
1161 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1163 GEN_INT (size
+ 4 * num_regs_to_save
1164 + reg_save_bytes
- 252)));
1165 size
= 252 - reg_save_bytes
- 4 * num_regs_to_save
;
1169 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
+ 1);
1170 emit_insn (gen_movsi (reg
, stack_pointer_rtx
));
1172 emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (size
)));
1180 /* Adjust the selected register, if any, for post-increment. */
1182 reg
= gen_rtx_POST_INC (SImode
, reg
);
1184 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
1185 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
1193 /* If we aren't using a post-increment register, use an
1195 addr
= gen_rtx_PLUS (SImode
,
1200 addr
= stack_pointer_rtx
;
1204 emit_insn (gen_movsf (gen_rtx_REG (SFmode
, i
),
1205 gen_rtx_MEM (SFmode
, addr
)));
1208 /* If we were using the restore_a1 strategy and the number of
1209 bytes to be released won't fit in the `ret' byte, copy `a1'
1210 to `sp', to avoid having to use `add' to adjust it. */
1211 if (! frame_pointer_needed
&& reg
&& size
+ reg_save_bytes
> 255)
1213 emit_move_insn (stack_pointer_rtx
, XEXP (reg
, 0));
1218 /* Maybe cut back the stack, except for the register save area.
1220 If the frame pointer exists, then use the frame pointer to
1223 If the stack size + register save area is more than 255 bytes,
1224 then the stack must be cut back here since the size + register
1225 save size is too big for a ret/retf instruction.
1227 Else leave it alone, it will be cut back as part of the
1228 ret/retf instruction, or there wasn't any stack to begin with.
1230 Under no circumstances should the register save area be
1231 deallocated here, that would leave a window where an interrupt
1232 could occur and trash the register save area. */
1233 if (frame_pointer_needed
)
1235 emit_move_insn (stack_pointer_rtx
, frame_pointer_rtx
);
1238 else if (size
+ reg_save_bytes
> 255)
1240 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1246 /* Adjust the stack and restore callee-saved registers, if any. */
1247 if (mn10300_can_use_rets_insn ())
1248 emit_jump_insn (ret_rtx
);
1250 emit_jump_insn (gen_return_ret (GEN_INT (size
+ reg_save_bytes
)));
1253 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1254 This function is for MATCH_PARALLEL and so assumes OP is known to be
1255 parallel. If OP is a multiple store, return a mask indicating which
1256 registers it saves. Return 0 otherwise. */
1259 mn10300_store_multiple_regs (rtx op
)
1267 count
= XVECLEN (op
, 0);
1271 /* Check that first instruction has the form (set (sp) (plus A B)) */
1272 elt
= XVECEXP (op
, 0, 0);
1273 if (GET_CODE (elt
) != SET
1274 || (! REG_P (SET_DEST (elt
)))
1275 || REGNO (SET_DEST (elt
)) != STACK_POINTER_REGNUM
1276 || GET_CODE (SET_SRC (elt
)) != PLUS
)
1279 /* Check that A is the stack pointer and B is the expected stack size.
1280 For OP to match, each subsequent instruction should push a word onto
1281 the stack. We therefore expect the first instruction to create
1282 COUNT-1 stack slots. */
1283 elt
= SET_SRC (elt
);
1284 if ((! REG_P (XEXP (elt
, 0)))
1285 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1286 || (! CONST_INT_P (XEXP (elt
, 1)))
1287 || INTVAL (XEXP (elt
, 1)) != -(count
- 1) * 4)
1291 for (i
= 1; i
< count
; i
++)
1293 /* Check that element i is a (set (mem M) R). */
1294 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1295 Remember: the ordering is *not* monotonic. */
1296 elt
= XVECEXP (op
, 0, i
);
1297 if (GET_CODE (elt
) != SET
1298 || (! MEM_P (SET_DEST (elt
)))
1299 || (! REG_P (SET_SRC (elt
))))
1302 /* Remember which registers are to be saved. */
1303 last
= REGNO (SET_SRC (elt
));
1304 mask
|= (1 << last
);
1306 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1307 elt
= XEXP (SET_DEST (elt
), 0);
1308 if (GET_CODE (elt
) != PLUS
1309 || (! REG_P (XEXP (elt
, 0)))
1310 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1311 || (! CONST_INT_P (XEXP (elt
, 1)))
1312 || INTVAL (XEXP (elt
, 1)) != -i
* 4)
1316 /* All or none of the callee-saved extended registers must be in the set. */
1317 if ((mask
& 0x3c000) != 0
1318 && (mask
& 0x3c000) != 0x3c000)
1324 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1327 mn10300_preferred_reload_class (rtx x
, reg_class_t rclass
)
1329 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1330 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1333 && !HARD_REGISTER_P (x
))
1334 || (GET_CODE (x
) == SUBREG
1335 && REG_P (SUBREG_REG (x
))
1336 && !HARD_REGISTER_P (SUBREG_REG (x
))))
1337 return LIMIT_RELOAD_CLASS (GET_MODE (x
), rclass
);
1342 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1345 mn10300_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
1347 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1348 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1352 /* Implement TARGET_SECONDARY_RELOAD. */
1355 mn10300_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1356 machine_mode mode
, secondary_reload_info
*sri
)
1358 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1359 enum reg_class xclass
= NO_REGS
;
1360 unsigned int xregno
= INVALID_REGNUM
;
1365 if (xregno
>= FIRST_PSEUDO_REGISTER
)
1366 xregno
= true_regnum (x
);
1367 if (xregno
!= INVALID_REGNUM
)
1368 xclass
= REGNO_REG_CLASS (xregno
);
1373 /* Memory load/stores less than a full word wide can't have an
1374 address or stack pointer destination. They must use a data
1375 register as an intermediate register. */
1376 if (rclass
!= DATA_REGS
1377 && (mode
== QImode
|| mode
== HImode
)
1378 && xclass
== NO_REGS
)
1381 /* We can only move SP to/from an address register. */
1383 && rclass
== SP_REGS
1384 && xclass
!= ADDRESS_REGS
)
1385 return ADDRESS_REGS
;
1387 && xclass
== SP_REGS
1388 && rclass
!= ADDRESS_REGS
1389 && rclass
!= SP_OR_ADDRESS_REGS
)
1390 return ADDRESS_REGS
;
1393 /* We can't directly load sp + const_int into a register;
1394 we must use an address register as an scratch. */
1396 && rclass
!= SP_REGS
1397 && rclass
!= SP_OR_ADDRESS_REGS
1398 && rclass
!= SP_OR_GENERAL_REGS
1399 && GET_CODE (x
) == PLUS
1400 && (XEXP (x
, 0) == stack_pointer_rtx
1401 || XEXP (x
, 1) == stack_pointer_rtx
))
1403 sri
->icode
= CODE_FOR_reload_plus_sp_const
;
1407 /* We can only move MDR to/from a data register. */
1408 if (rclass
== MDR_REGS
&& xclass
!= DATA_REGS
)
1410 if (xclass
== MDR_REGS
&& rclass
!= DATA_REGS
)
1413 /* We can't load/store an FP register from a constant address. */
1415 && (rclass
== FP_REGS
|| xclass
== FP_REGS
)
1416 && (xclass
== NO_REGS
|| rclass
== NO_REGS
))
1420 if (xregno
>= FIRST_PSEUDO_REGISTER
&& xregno
!= INVALID_REGNUM
)
1422 addr
= reg_equiv_mem (xregno
);
1424 addr
= XEXP (addr
, 0);
1429 if (addr
&& CONSTANT_ADDRESS_P (addr
))
1430 return GENERAL_REGS
;
1432 /* Otherwise assume no secondary reloads are needed. */
1437 mn10300_frame_size (void)
1439 /* size includes the fixed stack space needed for function calls. */
1440 int size
= get_frame_size () + crtl
->outgoing_args_size
;
1442 /* And space for the return pointer. */
1443 size
+= crtl
->outgoing_args_size
? 4 : 0;
1449 mn10300_initial_offset (int from
, int to
)
1453 gcc_assert (from
== ARG_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
);
1454 gcc_assert (to
== FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
1456 if (to
== STACK_POINTER_REGNUM
)
1457 diff
= mn10300_frame_size ();
1459 /* The difference between the argument pointer and the frame pointer
1460 is the size of the callee register save area. */
1461 if (from
== ARG_POINTER_REGNUM
)
1463 unsigned int reg_save_bytes
;
1465 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1466 diff
+= reg_save_bytes
;
1467 diff
+= 4 * fp_regs_to_save ();
1473 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1476 mn10300_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
1478 /* Return values > 8 bytes in length in memory. */
1479 return (int_size_in_bytes (type
) > 8
1480 || int_size_in_bytes (type
) == 0
1481 || TYPE_MODE (type
) == BLKmode
);
1484 /* Flush the argument registers to the stack for a stdarg function;
1485 return the new argument pointer. */
1487 mn10300_builtin_saveregs (void)
1490 tree fntype
= TREE_TYPE (current_function_decl
);
1491 int argadj
= ((!stdarg_p (fntype
))
1492 ? UNITS_PER_WORD
: 0);
1493 alias_set_type set
= get_varargs_alias_set ();
1496 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
1498 offset
= crtl
->args
.arg_offset_rtx
;
1500 mem
= gen_rtx_MEM (SImode
, crtl
->args
.internal_arg_pointer
);
1501 set_mem_alias_set (mem
, set
);
1502 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
1504 mem
= gen_rtx_MEM (SImode
,
1505 plus_constant (Pmode
,
1506 crtl
->args
.internal_arg_pointer
, 4));
1507 set_mem_alias_set (mem
, set
);
1508 emit_move_insn (mem
, gen_rtx_REG (SImode
, 1));
1510 return copy_to_reg (expand_binop (Pmode
, add_optab
,
1511 crtl
->args
.internal_arg_pointer
,
1512 offset
, 0, 0, OPTAB_LIB_WIDEN
));
1516 mn10300_va_start (tree valist
, rtx nextarg
)
1518 nextarg
= expand_builtin_saveregs ();
1519 std_expand_builtin_va_start (valist
, nextarg
);
1522 /* Return true when a parameter should be passed by reference. */
1525 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
1526 machine_mode mode
, const_tree type
,
1527 bool named ATTRIBUTE_UNUSED
)
1529 unsigned HOST_WIDE_INT size
;
1532 size
= int_size_in_bytes (type
);
1534 size
= GET_MODE_SIZE (mode
);
1536 return (size
> 8 || size
== 0);
1539 /* Return an RTX to represent where a value with mode MODE will be returned
1540 from a function. If the result is NULL_RTX, the argument is pushed. */
1543 mn10300_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
1544 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1546 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1547 rtx result
= NULL_RTX
;
1550 /* We only support using 2 data registers as argument registers. */
1553 /* Figure out the size of the object to be passed. */
1554 if (mode
== BLKmode
)
1555 size
= int_size_in_bytes (type
);
1557 size
= GET_MODE_SIZE (mode
);
1559 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1561 /* Don't pass this arg via a register if all the argument registers
1563 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1566 /* Don't pass this arg via a register if it would be split between
1567 registers and memory. */
1568 if (type
== NULL_TREE
1569 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1572 switch (cum
->nbytes
/ UNITS_PER_WORD
)
1575 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
);
1578 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
+ 1);
1587 /* Update the data in CUM to advance over an argument
1588 of mode MODE and data type TYPE.
1589 (TYPE is null for libcalls where that information may not be available.) */
1592 mn10300_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
1593 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1595 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1597 cum
->nbytes
+= (mode
!= BLKmode
1598 ? (GET_MODE_SIZE (mode
) + 3) & ~3
1599 : (int_size_in_bytes (type
) + 3) & ~3);
1602 /* Return the number of bytes of registers to use for an argument passed
1603 partially in registers and partially in memory. */
1606 mn10300_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
1607 tree type
, bool named ATTRIBUTE_UNUSED
)
1609 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1612 /* We only support using 2 data registers as argument registers. */
1615 /* Figure out the size of the object to be passed. */
1616 if (mode
== BLKmode
)
1617 size
= int_size_in_bytes (type
);
1619 size
= GET_MODE_SIZE (mode
);
1621 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1623 /* Don't pass this arg via a register if all the argument registers
1625 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1628 if (cum
->nbytes
+ size
<= nregs
* UNITS_PER_WORD
)
1631 /* Don't pass this arg via a register if it would be split between
1632 registers and memory. */
1633 if (type
== NULL_TREE
1634 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1637 return nregs
* UNITS_PER_WORD
- cum
->nbytes
;
1640 /* Return the location of the function's value. This will be either
1641 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1642 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1643 we only return the PARALLEL for outgoing values; we do not want
1644 callers relying on this extra copy. */
1647 mn10300_function_value (const_tree valtype
,
1648 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1652 machine_mode mode
= TYPE_MODE (valtype
);
1654 if (! POINTER_TYPE_P (valtype
))
1655 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1656 else if (! TARGET_PTR_A0D0
|| ! outgoing
1657 || cfun
->returns_struct
)
1658 return gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
);
1660 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (2));
1662 = gen_rtx_EXPR_LIST (VOIDmode
,
1663 gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
),
1667 = gen_rtx_EXPR_LIST (VOIDmode
,
1668 gen_rtx_REG (mode
, FIRST_DATA_REGNUM
),
1673 /* Implements TARGET_LIBCALL_VALUE. */
1676 mn10300_libcall_value (machine_mode mode
,
1677 const_rtx fun ATTRIBUTE_UNUSED
)
1679 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1682 /* Implements FUNCTION_VALUE_REGNO_P. */
1685 mn10300_function_value_regno_p (const unsigned int regno
)
1687 return (regno
== FIRST_DATA_REGNUM
|| regno
== FIRST_ADDRESS_REGNUM
);
1690 /* Output an addition operation. */
1693 mn10300_output_add (rtx operands
[3], bool need_flags
)
1695 rtx dest
, src1
, src2
;
1696 unsigned int dest_regnum
, src1_regnum
, src2_regnum
;
1697 enum reg_class src1_class
, src2_class
, dest_class
;
1703 dest_regnum
= true_regnum (dest
);
1704 src1_regnum
= true_regnum (src1
);
1706 dest_class
= REGNO_REG_CLASS (dest_regnum
);
1707 src1_class
= REGNO_REG_CLASS (src1_regnum
);
1709 if (CONST_INT_P (src2
))
1711 gcc_assert (dest_regnum
== src1_regnum
);
1713 if (src2
== const1_rtx
&& !need_flags
)
1715 if (INTVAL (src2
) == 4 && !need_flags
&& dest_class
!= DATA_REGS
)
1718 gcc_assert (!need_flags
|| dest_class
!= SP_REGS
);
1721 else if (CONSTANT_P (src2
))
1724 src2_regnum
= true_regnum (src2
);
1725 src2_class
= REGNO_REG_CLASS (src2_regnum
);
1727 if (dest_regnum
== src1_regnum
)
1729 if (dest_regnum
== src2_regnum
)
1732 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1733 this directly, as below, but when optimizing for space we can sometimes
1734 do better by using a mov+add. For MN103, we claimed that we could
1735 implement a three-operand add because the various move and add insns
1736 change sizes across register classes, and we can often do better than
1737 reload in choosing which operand to move. */
1738 if (TARGET_AM33
&& optimize_insn_for_speed_p ())
1739 return "add %2,%1,%0";
1741 /* Catch cases where no extended register was used. */
1742 if (src1_class
!= EXTENDED_REGS
1743 && src2_class
!= EXTENDED_REGS
1744 && dest_class
!= EXTENDED_REGS
)
1746 /* We have to copy one of the sources into the destination, then
1747 add the other source to the destination.
1749 Carefully select which source to copy to the destination; a
1750 naive implementation will waste a byte when the source classes
1751 are different and the destination is an address register.
1752 Selecting the lowest cost register copy will optimize this
1754 if (src1_class
== dest_class
)
1755 return "mov %1,%0\n\tadd %2,%0";
1757 return "mov %2,%0\n\tadd %1,%0";
1760 /* At least one register is an extended register. */
1762 /* The three operand add instruction on the am33 is a win iff the
1763 output register is an extended register, or if both source
1764 registers are extended registers. */
1765 if (dest_class
== EXTENDED_REGS
|| src1_class
== src2_class
)
1766 return "add %2,%1,%0";
1768 /* It is better to copy one of the sources to the destination, then
1769 perform a 2 address add. The destination in this case must be
1770 an address or data register and one of the sources must be an
1771 extended register and the remaining source must not be an extended
1774 The best code for this case is to copy the extended reg to the
1775 destination, then emit a two address add. */
1776 if (src1_class
== EXTENDED_REGS
)
1777 return "mov %1,%0\n\tadd %2,%0";
1779 return "mov %2,%0\n\tadd %1,%0";
1782 /* Return 1 if X contains a symbolic expression. We know these
1783 expressions will have one of a few well defined forms, so
1784 we need only check those forms. */
1787 mn10300_symbolic_operand (rtx op
,
1788 machine_mode mode ATTRIBUTE_UNUSED
)
1790 switch (GET_CODE (op
))
1797 return ((GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
1798 || GET_CODE (XEXP (op
, 0)) == LABEL_REF
)
1799 && CONST_INT_P (XEXP (op
, 1)));
1805 /* Try machine dependent ways of modifying an illegitimate address
1806 to be legitimate. If we find one, return the new valid address.
1807 This macro is used in only one place: `memory_address' in explow.c.
1809 OLDX is the address as it was before break_out_memory_refs was called.
1810 In some cases it is useful to look at this to decide what needs to be done.
1812 Normally it is always safe for this macro to do nothing. It exists to
1813 recognize opportunities to optimize the output.
1815 But on a few ports with segmented architectures and indexed addressing
1816 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1819 mn10300_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1820 machine_mode mode ATTRIBUTE_UNUSED
)
1822 if (flag_pic
&& ! mn10300_legitimate_pic_operand_p (x
))
1823 x
= mn10300_legitimize_pic_address (oldx
, NULL_RTX
);
1825 /* Uh-oh. We might have an address for x[n-100000]. This needs
1826 special handling to avoid creating an indexed memory address
1827 with x-100000 as the base. */
1828 if (GET_CODE (x
) == PLUS
1829 && mn10300_symbolic_operand (XEXP (x
, 1), VOIDmode
))
1831 /* Ugly. We modify things here so that the address offset specified
1832 by the index expression is computed first, then added to x to form
1833 the entire address. */
1835 rtx regx1
, regy1
, regy2
, y
;
1837 /* Strip off any CONST. */
1839 if (GET_CODE (y
) == CONST
)
1842 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1844 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1845 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1846 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1847 regx1
= force_reg (Pmode
,
1848 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
, regx1
,
1850 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1856 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1857 @GOTOFF in `reg'. */
1860 mn10300_legitimize_pic_address (rtx orig
, rtx reg
)
1864 if (GET_CODE (orig
) == LABEL_REF
1865 || (GET_CODE (orig
) == SYMBOL_REF
1866 && (CONSTANT_POOL_ADDRESS_P (orig
)
1867 || ! MN10300_GLOBAL_P (orig
))))
1870 reg
= gen_reg_rtx (Pmode
);
1872 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOTOFF
);
1873 x
= gen_rtx_CONST (SImode
, x
);
1874 emit_move_insn (reg
, x
);
1876 x
= emit_insn (gen_addsi3 (reg
, reg
, pic_offset_table_rtx
));
1878 else if (GET_CODE (orig
) == SYMBOL_REF
)
1881 reg
= gen_reg_rtx (Pmode
);
1883 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOT
);
1884 x
= gen_rtx_CONST (SImode
, x
);
1885 x
= gen_rtx_PLUS (SImode
, pic_offset_table_rtx
, x
);
1886 x
= gen_const_mem (SImode
, x
);
1888 x
= emit_move_insn (reg
, x
);
1893 set_unique_reg_note (x
, REG_EQUAL
, orig
);
1897 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1898 isn't protected by a PIC unspec; nonzero otherwise. */
1901 mn10300_legitimate_pic_operand_p (rtx x
)
1906 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1909 if (GET_CODE (x
) == UNSPEC
1910 && (XINT (x
, 1) == UNSPEC_PIC
1911 || XINT (x
, 1) == UNSPEC_GOT
1912 || XINT (x
, 1) == UNSPEC_GOTOFF
1913 || XINT (x
, 1) == UNSPEC_PLT
1914 || XINT (x
, 1) == UNSPEC_GOTSYM_OFF
))
1917 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1918 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1924 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1925 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x
, i
, j
)))
1928 else if (fmt
[i
] == 'e'
1929 && ! mn10300_legitimate_pic_operand_p (XEXP (x
, i
)))
1936 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1937 legitimate, and FALSE otherwise.
1939 On the mn10300, the value in the address register must be
1940 in the same memory space/segment as the effective address.
1942 This is problematical for reload since it does not understand
1943 that base+index != index+base in a memory reference.
1945 Note it is still possible to use reg+reg addressing modes,
1946 it's just much more difficult. For a discussion of a possible
1947 workaround and solution, see the comments in pa.c before the
1948 function record_unscaled_index_insn_codes. */
1951 mn10300_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1955 if (CONSTANT_ADDRESS_P (x
))
1956 return !flag_pic
|| mn10300_legitimate_pic_operand_p (x
);
1958 if (RTX_OK_FOR_BASE_P (x
, strict
))
1961 if (TARGET_AM33
&& (mode
== SImode
|| mode
== SFmode
|| mode
== HImode
))
1963 if (GET_CODE (x
) == POST_INC
)
1964 return RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
);
1965 if (GET_CODE (x
) == POST_MODIFY
)
1966 return (RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
)
1967 && CONSTANT_ADDRESS_P (XEXP (x
, 1)));
1970 if (GET_CODE (x
) != PLUS
)
1974 index
= XEXP (x
, 1);
1980 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1981 addressing is hard to satisfy. */
1985 return (REGNO_GENERAL_P (REGNO (base
), strict
)
1986 && REGNO_GENERAL_P (REGNO (index
), strict
));
1989 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base
), strict
))
1992 if (CONST_INT_P (index
))
1993 return IN_RANGE (INTVAL (index
), -1 - 0x7fffffff, 0x7fffffff);
1995 if (CONSTANT_ADDRESS_P (index
))
1996 return !flag_pic
|| mn10300_legitimate_pic_operand_p (index
);
2002 mn10300_regno_in_class_p (unsigned regno
, int rclass
, bool strict
)
2004 if (regno
>= FIRST_PSEUDO_REGISTER
)
2010 regno
= reg_renumber
[regno
];
2011 if (regno
== INVALID_REGNUM
)
2014 return TEST_HARD_REG_BIT (reg_class_contents
[rclass
], regno
);
2018 mn10300_legitimize_reload_address (rtx x
,
2019 machine_mode mode ATTRIBUTE_UNUSED
,
2020 int opnum
, int type
,
2021 int ind_levels ATTRIBUTE_UNUSED
)
2023 bool any_change
= false;
2025 /* See above re disabling reg+reg addressing for MN103. */
2029 if (GET_CODE (x
) != PLUS
)
2032 if (XEXP (x
, 0) == stack_pointer_rtx
)
2034 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2035 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2036 opnum
, (enum reload_type
) type
);
2039 if (XEXP (x
, 1) == stack_pointer_rtx
)
2041 push_reload (XEXP (x
, 1), NULL_RTX
, &XEXP (x
, 1), NULL
,
2042 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2043 opnum
, (enum reload_type
) type
);
2047 return any_change
? x
: NULL_RTX
;
2050 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2051 constant. Note that some "constants" aren't valid, such as TLS
2052 symbols and unconverted GOT-based references, so we eliminate
2056 mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2058 switch (GET_CODE (x
))
2063 if (GET_CODE (x
) == PLUS
)
2065 if (! CONST_INT_P (XEXP (x
, 1)))
2070 /* Only some unspecs are valid as "constants". */
2071 if (GET_CODE (x
) == UNSPEC
)
2073 switch (XINT (x
, 1))
2085 /* We must have drilled down to a symbol. */
2086 if (! mn10300_symbolic_operand (x
, Pmode
))
2097 /* Undo pic address legitimization for the benefit of debug info. */
2100 mn10300_delegitimize_address (rtx orig_x
)
2102 rtx x
= orig_x
, ret
, addend
= NULL
;
2107 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
2110 if (XEXP (x
, 0) == pic_offset_table_rtx
)
2112 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2113 some odd-looking "addresses" that were never valid in the first place.
2114 We need to look harder to avoid warnings being emitted. */
2115 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
2117 rtx x0
= XEXP (x
, 0);
2118 rtx x00
= XEXP (x0
, 0);
2119 rtx x01
= XEXP (x0
, 1);
2121 if (x00
== pic_offset_table_rtx
)
2123 else if (x01
== pic_offset_table_rtx
)
2133 if (GET_CODE (x
) != CONST
)
2136 if (GET_CODE (x
) != UNSPEC
)
2139 ret
= XVECEXP (x
, 0, 0);
2140 if (XINT (x
, 1) == UNSPEC_GOTOFF
)
2142 else if (XINT (x
, 1) == UNSPEC_GOT
)
2147 gcc_assert (GET_CODE (ret
) == SYMBOL_REF
);
2148 if (need_mem
!= MEM_P (orig_x
))
2150 if (need_mem
&& addend
)
2153 ret
= gen_rtx_PLUS (Pmode
, addend
, ret
);
2157 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2158 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2159 with an address register. */
2162 mn10300_address_cost (rtx x
, machine_mode mode ATTRIBUTE_UNUSED
,
2163 addr_space_t as ATTRIBUTE_UNUSED
, bool speed
)
2168 switch (GET_CODE (x
))
2173 /* We assume all of these require a 32-bit constant, even though
2174 some symbol and label references can be relaxed. */
2175 return speed
? 1 : 4;
2183 /* Assume any symbolic offset is a 32-bit constant. */
2184 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2185 if (IN_RANGE (i
, -128, 127))
2186 return speed
? 0 : 1;
2189 if (IN_RANGE (i
, -0x800000, 0x7fffff))
2195 index
= XEXP (x
, 1);
2196 if (register_operand (index
, SImode
))
2198 /* Attempt to minimize the number of registers in the address.
2199 This is similar to what other ports do. */
2200 if (register_operand (base
, SImode
))
2204 index
= XEXP (x
, 0);
2207 /* Assume any symbolic offset is a 32-bit constant. */
2208 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2209 if (IN_RANGE (i
, -128, 127))
2210 return speed
? 0 : 1;
2211 if (IN_RANGE (i
, -32768, 32767))
2212 return speed
? 0 : 2;
2213 return speed
? 2 : 6;
2216 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
2220 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2222 Recall that the base value of 2 is required by assumptions elsewhere
2223 in the body of the compiler, and that cost 2 is special-cased as an
2224 early exit from reload meaning no work is required. */
2227 mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2228 reg_class_t ifrom
, reg_class_t ito
)
2230 enum reg_class from
= (enum reg_class
) ifrom
;
2231 enum reg_class to
= (enum reg_class
) ito
;
2232 enum reg_class scratch
, test
;
2234 /* Simplify the following code by unifying the fp register classes. */
2235 if (to
== FP_ACC_REGS
)
2237 if (from
== FP_ACC_REGS
)
2240 /* Diagnose invalid moves by costing them as two moves. */
2245 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2246 else if (to
== MDR_REGS
)
2247 scratch
= DATA_REGS
;
2248 else if (to
== FP_REGS
&& to
!= from
)
2249 scratch
= GENERAL_REGS
;
2253 if (from
== SP_REGS
)
2254 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2255 else if (from
== MDR_REGS
)
2256 scratch
= DATA_REGS
;
2257 else if (from
== FP_REGS
&& to
!= from
)
2258 scratch
= GENERAL_REGS
;
2260 if (scratch
!= NO_REGS
&& !reg_class_subset_p (test
, scratch
))
2261 return (mn10300_register_move_cost (VOIDmode
, from
, scratch
)
2262 + mn10300_register_move_cost (VOIDmode
, scratch
, to
));
2264 /* From here on, all we need consider are legal combinations. */
2268 /* The scale here is bytes * 2. */
2270 if (from
== to
&& (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2273 if (from
== SP_REGS
)
2274 return (to
== ADDRESS_REGS
? 2 : 6);
2276 /* For MN103, all remaining legal moves are two bytes. */
2281 return (from
== ADDRESS_REGS
? 4 : 6);
2283 if ((from
== ADDRESS_REGS
|| from
== DATA_REGS
)
2284 && (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2287 if (to
== EXTENDED_REGS
)
2288 return (to
== from
? 6 : 4);
2290 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2295 /* The scale here is cycles * 2. */
2299 if (from
== FP_REGS
)
2302 /* All legal moves between integral registers are single cycle. */
2307 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2309 Given lack of the form of the address, this must be speed-relative,
2310 though we should never be less expensive than a size-relative register
2311 move cost above. This is not a problem. */
2314 mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2315 reg_class_t iclass
, bool in ATTRIBUTE_UNUSED
)
2317 enum reg_class rclass
= (enum reg_class
) iclass
;
2319 if (rclass
== FP_REGS
)
2324 /* Implement the TARGET_RTX_COSTS hook.
2326 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2327 to represent cycles. Size-relative costs are in bytes. */
2330 mn10300_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
2331 int opno ATTRIBUTE_UNUSED
, int *ptotal
, bool speed
)
2333 /* This value is used for SYMBOL_REF etc where we want to pretend
2334 we have a full 32-bit constant. */
2335 HOST_WIDE_INT i
= 0x12345678;
2337 int code
= GET_CODE (x
);
2346 if (outer_code
== SET
)
2348 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2349 if (IN_RANGE (i
, -32768, 32767))
2350 total
= COSTS_N_INSNS (1);
2352 total
= COSTS_N_INSNS (2);
2356 /* 16-bit integer operands don't affect latency;
2357 24-bit and 32-bit operands add a cycle. */
2358 if (IN_RANGE (i
, -32768, 32767))
2361 total
= COSTS_N_INSNS (1);
2366 if (outer_code
== SET
)
2370 else if (IN_RANGE (i
, -128, 127))
2372 else if (IN_RANGE (i
, -32768, 32767))
2379 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2380 if (IN_RANGE (i
, -128, 127))
2382 else if (IN_RANGE (i
, -32768, 32767))
2384 else if (TARGET_AM33
&& IN_RANGE (i
, -0x01000000, 0x00ffffff))
2396 /* We assume all of these require a 32-bit constant, even though
2397 some symbol and label references can be relaxed. */
2401 switch (XINT (x
, 1))
2407 case UNSPEC_GOTSYM_OFF
:
2408 /* The PIC unspecs also resolve to a 32-bit constant. */
2412 /* Assume any non-listed unspec is some sort of arithmetic. */
2413 goto do_arith_costs
;
2417 /* Notice the size difference of INC and INC4. */
2418 if (!speed
&& outer_code
== SET
&& CONST_INT_P (XEXP (x
, 1)))
2420 i
= INTVAL (XEXP (x
, 1));
2421 if (i
== 1 || i
== 4)
2423 total
= 1 + rtx_cost (XEXP (x
, 0), mode
, PLUS
, 0, speed
);
2427 goto do_arith_costs
;
2441 total
= (speed
? COSTS_N_INSNS (1) : 2);
2445 /* Notice the size difference of ASL2 and variants. */
2446 if (!speed
&& CONST_INT_P (XEXP (x
, 1)))
2447 switch (INTVAL (XEXP (x
, 1)))
2462 total
= (speed
? COSTS_N_INSNS (1) : 3);
2466 total
= (speed
? COSTS_N_INSNS (3) : 2);
2473 total
= (speed
? COSTS_N_INSNS (39)
2474 /* Include space to load+retrieve MDR. */
2475 : code
== MOD
|| code
== UMOD
? 6 : 4);
2479 total
= mn10300_address_cost (XEXP (x
, 0), mode
,
2480 MEM_ADDR_SPACE (x
), speed
);
2482 total
= COSTS_N_INSNS (2 + total
);
2486 /* Probably not implemented. Assume external call. */
2487 total
= (speed
? COSTS_N_INSNS (10) : 7);
2499 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2500 may access it using GOTOFF instead of GOT. */
2503 mn10300_encode_section_info (tree decl
, rtx rtl
, int first
)
2507 default_encode_section_info (decl
, rtl
, first
);
2512 symbol
= XEXP (rtl
, 0);
2513 if (GET_CODE (symbol
) != SYMBOL_REF
)
2517 SYMBOL_REF_FLAG (symbol
) = (*targetm
.binds_local_p
) (decl
);
2520 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2521 and readonly data size. So we crank up the case threshold value to
2522 encourage a series of if/else comparisons to implement many small switch
2523 statements. In theory, this value could be increased much more if we
2524 were solely optimizing for space, but we keep it "reasonable" to avoid
2525 serious code efficiency lossage. */
2528 mn10300_case_values_threshold (void)
2533 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2536 mn10300_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
2538 rtx mem
, disp
, fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
2540 /* This is a strict alignment target, which means that we play
2541 some games to make sure that the locations at which we need
2542 to store <chain> and <disp> wind up at aligned addresses.
2545 0xfc 0xdd mov chain,a1
2547 0xf8 0xed 0x00 btst 0,d1
2551 Note that the two extra insns are effectively nops; they
2552 clobber the flags but do not affect the contents of D0 or D1. */
2554 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
2555 plus_constant (Pmode
, XEXP (m_tramp
, 0), 11),
2556 NULL_RTX
, 1, OPTAB_DIRECT
);
2558 mem
= adjust_address (m_tramp
, SImode
, 0);
2559 emit_move_insn (mem
, gen_int_mode (0xddfc0028, SImode
));
2560 mem
= adjust_address (m_tramp
, SImode
, 4);
2561 emit_move_insn (mem
, chain_value
);
2562 mem
= adjust_address (m_tramp
, SImode
, 8);
2563 emit_move_insn (mem
, gen_int_mode (0xdc00edf8, SImode
));
2564 mem
= adjust_address (m_tramp
, SImode
, 12);
2565 emit_move_insn (mem
, disp
);
2568 /* Output the assembler code for a C++ thunk function.
2569 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2570 is the decl for the target function. DELTA is an immediate constant
2571 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2572 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2573 additionally added to THIS. Finally jump to the entry point of
2577 mn10300_asm_output_mi_thunk (FILE * file
,
2578 tree thunk_fndecl ATTRIBUTE_UNUSED
,
2579 HOST_WIDE_INT delta
,
2580 HOST_WIDE_INT vcall_offset
,
2585 /* Get the register holding the THIS parameter. Handle the case
2586 where there is a hidden first argument for a returned structure. */
2587 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
2588 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
+ 1];
2590 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
];
2592 fprintf (file
, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START
);
2595 fprintf (file
, "\tadd %d, %s\n", (int) delta
, _this
);
2599 const char * scratch
= reg_names
[FIRST_ADDRESS_REGNUM
+ 1];
2601 fprintf (file
, "\tmov %s, %s\n", _this
, scratch
);
2602 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2603 fprintf (file
, "\tadd %d, %s\n", (int) vcall_offset
, scratch
);
2604 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2605 fprintf (file
, "\tadd %s, %s\n", scratch
, _this
);
2608 fputs ("\tjmp ", file
);
2609 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
2613 /* Return true if mn10300_output_mi_thunk would be able to output the
2614 assembler code for the thunk function specified by the arguments
2615 it is passed, and false otherwise. */
2618 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED
,
2619 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
2620 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
2621 const_tree function ATTRIBUTE_UNUSED
)
2627 mn10300_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2629 if (REGNO_REG_CLASS (regno
) == FP_REGS
2630 || REGNO_REG_CLASS (regno
) == FP_ACC_REGS
)
2631 /* Do not store integer values in FP registers. */
2632 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& ((regno
& 1) == 0);
2634 if (! TARGET_AM33
&& REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2637 if (((regno
) & 1) == 0 || GET_MODE_SIZE (mode
) == 4)
2640 if (REGNO_REG_CLASS (regno
) == DATA_REGS
2641 || (TARGET_AM33
&& REGNO_REG_CLASS (regno
) == ADDRESS_REGS
)
2642 || REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2643 return GET_MODE_SIZE (mode
) <= 4;
2649 mn10300_modes_tieable (machine_mode mode1
, machine_mode mode2
)
2651 if (GET_MODE_CLASS (mode1
) == MODE_FLOAT
2652 && GET_MODE_CLASS (mode2
) != MODE_FLOAT
)
2655 if (GET_MODE_CLASS (mode2
) == MODE_FLOAT
2656 && GET_MODE_CLASS (mode1
) != MODE_FLOAT
)
2661 || (GET_MODE_SIZE (mode1
) <= 4 && GET_MODE_SIZE (mode2
) <= 4))
2668 cc_flags_for_mode (machine_mode mode
)
2673 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
| CC_FLAG_V
;
2675 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
;
2677 return CC_FLAG_Z
| CC_FLAG_N
;
2686 cc_flags_for_code (enum rtx_code code
)
2698 case GT
: /* ~(Z|(N^V)) */
2699 case LE
: /* Z|(N^V) */
2700 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_V
;
2706 case GTU
: /* ~(C | Z) */
2707 case LEU
: /* C | Z */
2708 return CC_FLAG_Z
| CC_FLAG_C
;
2726 mn10300_select_cc_mode (enum rtx_code code
, rtx x
, rtx y ATTRIBUTE_UNUSED
)
2730 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2731 return CC_FLOATmode
;
2733 req
= cc_flags_for_code (code
);
2735 if (req
& CC_FLAG_V
)
2737 if (req
& CC_FLAG_C
)
2743 set_is_load_p (rtx set
)
2745 return MEM_P (SET_SRC (set
));
2749 set_is_store_p (rtx set
)
2751 return MEM_P (SET_DEST (set
));
2754 /* Update scheduling costs for situations that cannot be
2755 described using the attributes and DFA machinery.
2756 DEP is the insn being scheduled.
2757 INSN is the previous insn.
2758 COST is the current cycle cost for DEP. */
2761 mn10300_adjust_sched_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep
,
2762 int cost
, unsigned int)
2771 /* We are only interested in pairs of SET. */
2772 insn_set
= single_set (insn
);
2776 dep_set
= single_set (dep
);
2780 /* For the AM34 a load instruction that follows a
2781 store instruction incurs an extra cycle of delay. */
2782 if (mn10300_tune_cpu
== PROCESSOR_AM34
2783 && set_is_load_p (dep_set
)
2784 && set_is_store_p (insn_set
))
2787 /* For the AM34 a non-store, non-branch FPU insn that follows
2788 another FPU insn incurs a one cycle throughput increase. */
2789 else if (mn10300_tune_cpu
== PROCESSOR_AM34
2790 && ! set_is_store_p (insn_set
)
2792 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) == MODE_FLOAT
2793 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) == MODE_FLOAT
)
2796 /* Resolve the conflict described in section 1-7-4 of
2797 Chapter 3 of the MN103E Series Instruction Manual
2800 "When the preceding instruction is a CPU load or
2801 store instruction, a following FPU instruction
2802 cannot be executed until the CPU completes the
2803 latency period even though there are no register
2804 or flag dependencies between them." */
2806 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2807 if (! TARGET_AM33_2
)
2810 /* If a data dependence already exists then the cost is correct. */
2814 /* Check that the instruction about to scheduled is an FPU instruction. */
2815 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) != MODE_FLOAT
)
2818 /* Now check to see if the previous instruction is a load or store. */
2819 if (! set_is_load_p (insn_set
) && ! set_is_store_p (insn_set
))
2822 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2823 only applies when an INTEGER load/store precedes an FPU
2824 instruction, but is this true ? For now we assume that it is. */
2825 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) != MODE_INT
)
2828 /* Extract the latency value from the timings attribute. */
2829 timings
= get_attr_timings (insn
);
2830 return timings
< 100 ? (timings
% 10) : (timings
% 100);
2834 mn10300_conditional_register_usage (void)
2840 for (i
= FIRST_EXTENDED_REGNUM
;
2841 i
<= LAST_EXTENDED_REGNUM
; i
++)
2842 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2846 for (i
= FIRST_FP_REGNUM
;
2847 i
<= LAST_FP_REGNUM
; i
++)
2848 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2851 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] =
2852 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
2855 /* Worker function for TARGET_MD_ASM_ADJUST.
2856 We do this in the mn10300 backend to maintain source compatibility
2857 with the old cc0-based compiler. */
2860 mn10300_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
2861 vec
<const char *> &/*constraints*/,
2862 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
2864 clobbers
.safe_push (gen_rtx_REG (CCmode
, CC_REG
));
2865 SET_HARD_REG_BIT (clobbered_regs
, CC_REG
);
2869 /* A helper function for splitting cbranch patterns after reload. */
2872 mn10300_split_cbranch (machine_mode cmp_mode
, rtx cmp_op
, rtx label_ref
)
2876 flags
= gen_rtx_REG (cmp_mode
, CC_REG
);
2877 x
= gen_rtx_COMPARE (cmp_mode
, XEXP (cmp_op
, 0), XEXP (cmp_op
, 1));
2878 x
= gen_rtx_SET (flags
, x
);
2881 x
= gen_rtx_fmt_ee (GET_CODE (cmp_op
), VOIDmode
, flags
, const0_rtx
);
2882 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
, label_ref
, pc_rtx
);
2883 x
= gen_rtx_SET (pc_rtx
, x
);
2887 /* A helper function for matching parallels that set the flags. */
2890 mn10300_match_ccmode (rtx insn
, machine_mode cc_mode
)
2893 machine_mode flags_mode
;
2895 gcc_checking_assert (XVECLEN (PATTERN (insn
), 0) == 2);
2897 op1
= XVECEXP (PATTERN (insn
), 0, 1);
2898 gcc_checking_assert (GET_CODE (SET_SRC (op1
)) == COMPARE
);
2900 flags
= SET_DEST (op1
);
2901 flags_mode
= GET_MODE (flags
);
2903 if (GET_MODE (SET_SRC (op1
)) != flags_mode
)
2905 if (GET_MODE_CLASS (flags_mode
) != MODE_CC
)
2908 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2909 if (cc_flags_for_mode (flags_mode
) & ~cc_flags_for_mode (cc_mode
))
2915 /* This function is used to help split:
2917 (set (reg) (and (reg) (int)))
2921 (set (reg) (shift (reg) (int))
2922 (set (reg) (shift (reg) (int))
2924 where the shitfs will be shorter than the "and" insn.
2926 It returns the number of bits that should be shifted. A positive
2927 values means that the low bits are to be cleared (and hence the
2928 shifts should be right followed by left) whereas a negative value
2929 means that the high bits are to be cleared (left followed by right).
2930 Zero is returned when it would not be economical to split the AND. */
2933 mn10300_split_and_operand_count (rtx op
)
2935 HOST_WIDE_INT val
= INTVAL (op
);
2940 /* High bit is set, look for bits clear at the bottom. */
2941 count
= exact_log2 (-val
);
2944 /* This is only size win if we can use the asl2 insn. Otherwise we
2945 would be replacing 1 6-byte insn with 2 3-byte insns. */
2946 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2952 /* High bit is clear, look for bits set at the bottom. */
2953 count
= exact_log2 (val
+ 1);
2955 /* Again, this is only a size win with asl2. */
2956 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2965 enum attr_liw_op op
;
2970 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2971 extract the operands and LIW attributes from the insn and use them to fill
2972 in the liw_data structure. Return true upon success or false if the insn
2973 cannot be bundled. */
2976 extract_bundle (rtx_insn
*insn
, struct liw_data
* pdata
)
2978 bool allow_consts
= true;
2981 gcc_assert (pdata
!= NULL
);
2985 /* Make sure that we are dealing with a simple SET insn. */
2986 p
= single_set (insn
);
2990 /* Make sure that it could go into one of the LIW pipelines. */
2991 pdata
->slot
= get_attr_liw (insn
);
2992 if (pdata
->slot
== LIW_BOTH
)
2995 pdata
->op
= get_attr_liw_op (insn
);
3000 pdata
->dest
= SET_DEST (p
);
3001 pdata
->src
= SET_SRC (p
);
3004 pdata
->dest
= XEXP (SET_SRC (p
), 0);
3005 pdata
->src
= XEXP (SET_SRC (p
), 1);
3012 /* The AND, OR and XOR long instruction words only accept register arguments. */
3013 allow_consts
= false;
3016 pdata
->dest
= SET_DEST (p
);
3017 pdata
->src
= XEXP (SET_SRC (p
), 1);
3021 if (! REG_P (pdata
->dest
))
3024 if (REG_P (pdata
->src
))
3027 return allow_consts
&& satisfies_constraint_O (pdata
->src
);
3030 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3031 the instructions with the assumption that LIW1 would be executed before LIW2
3032 so we must check for overlaps between their sources and destinations. */
3035 check_liw_constraints (struct liw_data
* pliw1
, struct liw_data
* pliw2
)
3037 /* Check for slot conflicts. */
3038 if (pliw2
->slot
== pliw1
->slot
&& pliw1
->slot
!= LIW_EITHER
)
3041 /* If either operation is a compare, then "dest" is really an input; the real
3042 destination is CC_REG. So these instructions need different checks. */
3044 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3045 check its values prior to any changes made by OP. */
3046 if (pliw1
->op
== LIW_OP_CMP
)
3048 /* Two sequential comparisons means dead code, which ought to
3049 have been eliminated given that bundling only happens with
3050 optimization. We cannot bundle them in any case. */
3051 gcc_assert (pliw1
->op
!= pliw2
->op
);
3055 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3056 is the destination of OP, as the CMP will look at the old value, not the new
3058 if (pliw2
->op
== LIW_OP_CMP
)
3060 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3063 if (REG_P (pliw2
->src
))
3064 return REGNO (pliw2
->src
) != REGNO (pliw1
->dest
);
3069 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3070 same destination register. */
3071 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3074 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3075 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3076 we can replace the source in OP2 with the source of OP1. */
3077 if (REG_P (pliw2
->src
) && REGNO (pliw2
->src
) == REGNO (pliw1
->dest
))
3079 if (pliw1
->op
== LIW_OP_MOV
&& REG_P (pliw1
->src
))
3081 if (! REG_P (pliw1
->src
)
3082 && (pliw2
->op
== LIW_OP_AND
3083 || pliw2
->op
== LIW_OP_OR
3084 || pliw2
->op
== LIW_OP_XOR
))
3087 pliw2
->src
= pliw1
->src
;
3093 /* Everything else is OK. */
3097 /* Combine pairs of insns into LIW bundles. */
3100 mn10300_bundle_liw (void)
3104 for (r
= get_insns (); r
!= NULL
; r
= next_nonnote_nondebug_insn (r
))
3106 rtx_insn
*insn1
, *insn2
;
3107 struct liw_data liw1
, liw2
;
3110 if (! extract_bundle (insn1
, & liw1
))
3113 insn2
= next_nonnote_nondebug_insn (insn1
);
3114 if (! extract_bundle (insn2
, & liw2
))
3117 /* Check for source/destination overlap. */
3118 if (! check_liw_constraints (& liw1
, & liw2
))
3121 if (liw1
.slot
== LIW_OP2
|| liw2
.slot
== LIW_OP1
)
3123 struct liw_data temp
;
3130 delete_insn (insn2
);
3133 if (liw1
.op
== LIW_OP_CMP
)
3134 insn2_pat
= gen_cmp_liw (liw2
.dest
, liw2
.src
, liw1
.dest
, liw1
.src
,
3136 else if (liw2
.op
== LIW_OP_CMP
)
3137 insn2_pat
= gen_liw_cmp (liw1
.dest
, liw1
.src
, liw2
.dest
, liw2
.src
,
3140 insn2_pat
= gen_liw (liw1
.dest
, liw2
.dest
, liw1
.src
, liw2
.src
,
3141 GEN_INT (liw1
.op
), GEN_INT (liw2
.op
));
3143 insn2
= emit_insn_after (insn2_pat
, insn1
);
3144 delete_insn (insn1
);
3149 #define DUMP(reason, insn) \
3154 fprintf (dump_file, reason "\n"); \
3155 if (insn != NULL_RTX) \
3156 print_rtl_single (dump_file, insn); \
3157 fprintf(dump_file, "\n"); \
3162 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3163 Insert a SETLB insn just before LABEL. */
3166 mn10300_insert_setlb_lcc (rtx label
, rtx branch
)
3168 rtx lcc
, comparison
, cmp_reg
;
3170 if (LABEL_NUSES (label
) > 1)
3174 /* This label is used both as an entry point to the loop
3175 and as a loop-back point for the loop. We need to separate
3176 these two functions so that the SETLB happens upon entry,
3177 but the loop-back does not go to the SETLB instruction. */
3178 DUMP ("Inserting SETLB insn after:", label
);
3179 insn
= emit_insn_after (gen_setlb (), label
);
3180 label
= gen_label_rtx ();
3181 emit_label_after (label
, insn
);
3182 DUMP ("Created new loop-back label:", label
);
3186 DUMP ("Inserting SETLB insn before:", label
);
3187 emit_insn_before (gen_setlb (), label
);
3190 comparison
= XEXP (SET_SRC (PATTERN (branch
)), 0);
3191 cmp_reg
= XEXP (comparison
, 0);
3192 gcc_assert (REG_P (cmp_reg
));
3194 /* If the comparison has not already been split out of the branch
3196 gcc_assert (REGNO (cmp_reg
) == CC_REG
);
3198 if (GET_MODE (cmp_reg
) == CC_FLOATmode
)
3199 lcc
= gen_FLcc (comparison
, label
);
3201 lcc
= gen_Lcc (comparison
, label
);
3203 rtx_insn
*jump
= emit_jump_insn_before (lcc
, branch
);
3204 mark_jump_label (XVECEXP (lcc
, 0, 0), jump
, 0);
3205 JUMP_LABEL (jump
) = label
;
3206 DUMP ("Replacing branch insn...", branch
);
3207 DUMP ("... with Lcc insn:", jump
);
3208 delete_insn (branch
);
3212 mn10300_block_contains_call (basic_block block
)
3216 FOR_BB_INSNS (block
, insn
)
3224 mn10300_loop_contains_call_insn (loop_p loop
)
3227 bool result
= false;
3230 bbs
= get_loop_body (loop
);
3232 for (i
= 0; i
< loop
->num_nodes
; i
++)
3233 if (mn10300_block_contains_call (bbs
[i
]))
3244 mn10300_scan_for_setlb_lcc (void)
3248 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX
);
3251 compute_bb_for_insn ();
3253 /* Find the loops. */
3254 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
3256 /* FIXME: For now we only investigate innermost loops. In practice however
3257 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3258 be the case that its parent loop is suitable. Thus we should check all
3259 loops, but work from the innermost outwards. */
3260 FOR_EACH_LOOP (loop
, LI_ONLY_INNERMOST
)
3262 const char * reason
= NULL
;
3264 /* Check to see if we can modify this loop. If we cannot
3265 then set 'reason' to describe why it could not be done. */
3266 if (loop
->latch
== NULL
)
3267 reason
= "it contains multiple latches";
3268 else if (loop
->header
!= loop
->latch
)
3269 /* FIXME: We could handle loops that span multiple blocks,
3270 but this requires a lot more work tracking down the branches
3271 that need altering, so for now keep things simple. */
3272 reason
= "the loop spans multiple blocks";
3273 else if (mn10300_loop_contains_call_insn (loop
))
3274 reason
= "it contains CALL insns";
3277 rtx_insn
*branch
= BB_END (loop
->latch
);
3279 gcc_assert (JUMP_P (branch
));
3280 if (single_set (branch
) == NULL_RTX
|| ! any_condjump_p (branch
))
3281 /* We cannot optimize tablejumps and the like. */
3282 /* FIXME: We could handle unconditional jumps. */
3283 reason
= "it is not a simple loop";
3289 flow_loop_dump (loop
, dump_file
, NULL
, 0);
3291 label
= BB_HEAD (loop
->header
);
3292 gcc_assert (LABEL_P (label
));
3294 mn10300_insert_setlb_lcc (label
, branch
);
3298 if (dump_file
&& reason
!= NULL
)
3299 fprintf (dump_file
, "Loop starting with insn %d is not suitable because %s\n",
3300 INSN_UID (BB_HEAD (loop
->header
)),
3304 loop_optimizer_finalize ();
3306 df_finish_pass (false);
3308 DUMP ("SETLB scan complete", NULL_RTX
);
3312 mn10300_reorg (void)
3314 /* These are optimizations, so only run them if optimizing. */
3315 if (TARGET_AM33
&& (optimize
> 0 || optimize_size
))
3317 if (TARGET_ALLOW_SETLB
)
3318 mn10300_scan_for_setlb_lcc ();
3320 if (TARGET_ALLOW_LIW
)
3321 mn10300_bundle_liw ();
3325 /* Initialize the GCC target structure. */
3327 #undef TARGET_MACHINE_DEPENDENT_REORG
3328 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3330 #undef TARGET_ASM_ALIGNED_HI_OP
3331 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3333 #undef TARGET_LEGITIMIZE_ADDRESS
3334 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3336 #undef TARGET_ADDRESS_COST
3337 #define TARGET_ADDRESS_COST mn10300_address_cost
3338 #undef TARGET_REGISTER_MOVE_COST
3339 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3340 #undef TARGET_MEMORY_MOVE_COST
3341 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3342 #undef TARGET_RTX_COSTS
3343 #define TARGET_RTX_COSTS mn10300_rtx_costs
3345 #undef TARGET_ASM_FILE_START
3346 #define TARGET_ASM_FILE_START mn10300_file_start
3347 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3348 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3350 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3351 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3353 #undef TARGET_OPTION_OVERRIDE
3354 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3356 #undef TARGET_ENCODE_SECTION_INFO
3357 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3359 #undef TARGET_PROMOTE_PROTOTYPES
3360 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3361 #undef TARGET_RETURN_IN_MEMORY
3362 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3363 #undef TARGET_PASS_BY_REFERENCE
3364 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3365 #undef TARGET_CALLEE_COPIES
3366 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3367 #undef TARGET_ARG_PARTIAL_BYTES
3368 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3369 #undef TARGET_FUNCTION_ARG
3370 #define TARGET_FUNCTION_ARG mn10300_function_arg
3371 #undef TARGET_FUNCTION_ARG_ADVANCE
3372 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3374 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3375 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3376 #undef TARGET_EXPAND_BUILTIN_VA_START
3377 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3379 #undef TARGET_CASE_VALUES_THRESHOLD
3380 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3383 #define TARGET_LRA_P hook_bool_void_false
3385 #undef TARGET_LEGITIMATE_ADDRESS_P
3386 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3387 #undef TARGET_DELEGITIMIZE_ADDRESS
3388 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3389 #undef TARGET_LEGITIMATE_CONSTANT_P
3390 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3392 #undef TARGET_PREFERRED_RELOAD_CLASS
3393 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3394 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3395 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3396 mn10300_preferred_output_reload_class
3397 #undef TARGET_SECONDARY_RELOAD
3398 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3400 #undef TARGET_TRAMPOLINE_INIT
3401 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3403 #undef TARGET_FUNCTION_VALUE
3404 #define TARGET_FUNCTION_VALUE mn10300_function_value
3405 #undef TARGET_LIBCALL_VALUE
3406 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3408 #undef TARGET_ASM_OUTPUT_MI_THUNK
3409 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3410 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3411 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3413 #undef TARGET_SCHED_ADJUST_COST
3414 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3416 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3417 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3419 #undef TARGET_MD_ASM_ADJUST
3420 #define TARGET_MD_ASM_ADJUST mn10300_md_asm_adjust
3422 #undef TARGET_FLAGS_REGNUM
3423 #define TARGET_FLAGS_REGNUM CC_REG
3425 struct gcc_target targetm
= TARGET_INITIALIZER
;