1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996-2015 Free Software Foundation, Inc.
3 Contributed by Jeff Law (law@cygnus.com).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
36 #include "diagnostic-core.h"
38 #include "stor-layout.h"
42 #include "insn-attr.h"
46 #include "tm-constrs.h"
51 /* This file should be included last. */
52 #include "target-def.h"
54 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
55 names are not prefixed by underscores, to tell whether to prefix a
56 label with a plus sign or not, so that the assembler can tell
57 symbol names from register names. */
58 int mn10300_protect_label
;
60 /* Selected processor type for tuning. */
61 enum processor_type mn10300_tune_cpu
= PROCESSOR_DEFAULT
;
68 static int cc_flags_for_mode(machine_mode
);
69 static int cc_flags_for_code(enum rtx_code
);
71 /* Implement TARGET_OPTION_OVERRIDE. */
73 mn10300_option_override (void)
76 target_flags
&= ~MASK_MULT_BUG
;
79 /* Disable scheduling for the MN10300 as we do
80 not have timing information available for it. */
81 flag_schedule_insns
= 0;
82 flag_schedule_insns_after_reload
= 0;
84 /* Force enable splitting of wide types, as otherwise it is trivial
85 to run out of registers. Indeed, this works so well that register
86 allocation problems are now more common *without* optimization,
87 when this flag is not enabled by default. */
88 flag_split_wide_types
= 1;
91 if (mn10300_tune_string
)
93 if (strcasecmp (mn10300_tune_string
, "mn10300") == 0)
94 mn10300_tune_cpu
= PROCESSOR_MN10300
;
95 else if (strcasecmp (mn10300_tune_string
, "am33") == 0)
96 mn10300_tune_cpu
= PROCESSOR_AM33
;
97 else if (strcasecmp (mn10300_tune_string
, "am33-2") == 0)
98 mn10300_tune_cpu
= PROCESSOR_AM33_2
;
99 else if (strcasecmp (mn10300_tune_string
, "am34") == 0)
100 mn10300_tune_cpu
= PROCESSOR_AM34
;
102 error ("-mtune= expects mn10300, am33, am33-2, or am34");
107 mn10300_file_start (void)
109 default_file_start ();
112 fprintf (asm_out_file
, "\t.am33_2\n");
113 else if (TARGET_AM33
)
114 fprintf (asm_out_file
, "\t.am33\n");
117 /* Note: This list must match the liw_op attribute in mn10300.md. */
119 static const char *liw_op_names
[] =
121 "add", "cmp", "sub", "mov",
127 /* Print operand X using operand code CODE to assembly language output file
131 mn10300_print_operand (FILE *file
, rtx x
, int code
)
137 unsigned int liw_op
= UINTVAL (x
);
139 gcc_assert (TARGET_ALLOW_LIW
);
140 gcc_assert (liw_op
< LIW_OP_MAX
);
141 fputs (liw_op_names
[liw_op
], file
);
148 enum rtx_code cmp
= GET_CODE (x
);
149 machine_mode mode
= GET_MODE (XEXP (x
, 0));
154 cmp
= reverse_condition (cmp
);
155 have_flags
= cc_flags_for_mode (mode
);
166 /* bge is smaller than bnc. */
167 str
= (have_flags
& CC_FLAG_V
? "ge" : "nc");
170 str
= (have_flags
& CC_FLAG_V
? "lt" : "ns");
218 gcc_checking_assert ((cc_flags_for_code (cmp
) & ~have_flags
) == 0);
224 /* This is used for the operand to a call instruction;
225 if it's a REG, enclose it in parens, else output
226 the operand normally. */
230 mn10300_print_operand (file
, x
, 0);
234 mn10300_print_operand (file
, x
, 0);
238 switch (GET_CODE (x
))
242 output_address (XEXP (x
, 0));
247 fprintf (file
, "fd%d", REGNO (x
) - 18);
255 /* These are the least significant word in a 64bit value. */
257 switch (GET_CODE (x
))
261 output_address (XEXP (x
, 0));
266 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
270 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
277 switch (GET_MODE (x
))
280 REAL_VALUE_TO_TARGET_DOUBLE
281 (*CONST_DOUBLE_REAL_VALUE (x
), val
);
282 fprintf (file
, "0x%lx", val
[0]);
285 REAL_VALUE_TO_TARGET_SINGLE
286 (*CONST_DOUBLE_REAL_VALUE (x
), val
[0]);
287 fprintf (file
, "0x%lx", val
[0]);
291 mn10300_print_operand_address (file
,
292 GEN_INT (CONST_DOUBLE_LOW (x
)));
303 split_double (x
, &low
, &high
);
304 fprintf (file
, "%ld", (long)INTVAL (low
));
313 /* Similarly, but for the most significant word. */
315 switch (GET_CODE (x
))
319 x
= adjust_address (x
, SImode
, 4);
320 output_address (XEXP (x
, 0));
325 fprintf (file
, "%s", reg_names
[REGNO (x
) + 1]);
329 fprintf (file
, "%s", reg_names
[subreg_regno (x
) + 1]);
336 switch (GET_MODE (x
))
339 REAL_VALUE_TO_TARGET_DOUBLE
340 (*CONST_DOUBLE_REAL_VALUE (x
), val
);
341 fprintf (file
, "0x%lx", val
[1]);
347 mn10300_print_operand_address (file
,
348 GEN_INT (CONST_DOUBLE_HIGH (x
)));
359 split_double (x
, &low
, &high
);
360 fprintf (file
, "%ld", (long)INTVAL (high
));
371 if (REG_P (XEXP (x
, 0)))
372 output_address (gen_rtx_PLUS (SImode
, XEXP (x
, 0), const0_rtx
));
374 output_address (XEXP (x
, 0));
379 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
380 fprintf (file
, "%d", (int)((~INTVAL (x
)) & 0xff));
384 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
385 fprintf (file
, "%d", (int)(INTVAL (x
) & 0xff));
388 /* For shift counts. The hardware ignores the upper bits of
389 any immediate, but the assembler will flag an out of range
390 shift count as an error. So we mask off the high bits
391 of the immediate here. */
395 fprintf (file
, "%d", (int)(INTVAL (x
) & 0x1f));
401 switch (GET_CODE (x
))
405 output_address (XEXP (x
, 0));
414 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
418 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
421 /* This will only be single precision.... */
426 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), val
);
427 fprintf (file
, "0x%lx", val
);
437 mn10300_print_operand_address (file
, x
);
446 /* Output assembly language output for the address ADDR to FILE. */
449 mn10300_print_operand_address (FILE *file
, rtx addr
)
451 switch (GET_CODE (addr
))
454 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
459 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
462 mn10300_print_operand (file
, XEXP (addr
, 1), 0);
466 mn10300_print_operand (file
, addr
, 0);
470 rtx base
= XEXP (addr
, 0);
471 rtx index
= XEXP (addr
, 1);
473 if (REG_P (index
) && !REG_OK_FOR_INDEX_P (index
))
479 gcc_assert (REG_P (index
) && REG_OK_FOR_INDEX_P (index
));
481 gcc_assert (REG_OK_FOR_BASE_P (base
));
483 mn10300_print_operand (file
, index
, 0);
485 mn10300_print_operand (file
, base
, 0);
489 output_addr_const (file
, addr
);
492 output_addr_const (file
, addr
);
497 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
499 Used for PIC-specific UNSPECs. */
502 mn10300_asm_output_addr_const_extra (FILE *file
, rtx x
)
504 if (GET_CODE (x
) == UNSPEC
)
509 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
510 output_addr_const (file
, XVECEXP (x
, 0, 0));
513 output_addr_const (file
, XVECEXP (x
, 0, 0));
514 fputs ("@GOT", file
);
517 output_addr_const (file
, XVECEXP (x
, 0, 0));
518 fputs ("@GOTOFF", file
);
521 output_addr_const (file
, XVECEXP (x
, 0, 0));
522 fputs ("@PLT", file
);
524 case UNSPEC_GOTSYM_OFF
:
525 assemble_name (file
, GOT_SYMBOL_NAME
);
527 output_addr_const (file
, XVECEXP (x
, 0, 0));
539 /* Count the number of FP registers that have to be saved. */
541 fp_regs_to_save (void)
548 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
549 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
555 /* Print a set of registers in the format required by "movm" and "ret".
556 Register K is saved if bit K of MASK is set. The data and address
557 registers can be stored individually, but the extended registers cannot.
558 We assume that the mask already takes that into account. For instance,
559 bits 14 to 17 must have the same value. */
562 mn10300_print_reg_list (FILE *file
, int mask
)
570 for (i
= 0; i
< FIRST_EXTENDED_REGNUM
; i
++)
571 if ((mask
& (1 << i
)) != 0)
575 fputs (reg_names
[i
], file
);
579 if ((mask
& 0x3c000) != 0)
581 gcc_assert ((mask
& 0x3c000) == 0x3c000);
584 fputs ("exreg1", file
);
591 /* If the MDR register is never clobbered, we can use the RETF instruction
592 which takes the address from the MDR register. This is 3 cycles faster
593 than having to load the address from the stack. */
596 mn10300_can_use_retf_insn (void)
598 /* Don't bother if we're not optimizing. In this case we won't
599 have proper access to df_regs_ever_live_p. */
603 /* EH returns alter the saved return address; MDR is not current. */
604 if (crtl
->calls_eh_return
)
607 /* Obviously not if MDR is ever clobbered. */
608 if (df_regs_ever_live_p (MDR_REG
))
611 /* ??? Careful not to use this during expand_epilogue etc. */
612 gcc_assert (!in_sequence_p ());
613 return leaf_function_p ();
617 mn10300_can_use_rets_insn (void)
619 return !mn10300_initial_offset (ARG_POINTER_REGNUM
, STACK_POINTER_REGNUM
);
622 /* Returns the set of live, callee-saved registers as a bitmask. The
623 callee-saved extended registers cannot be stored individually, so
624 all of them will be included in the mask if any one of them is used.
625 Also returns the number of bytes in the registers in the mask if
626 BYTES_SAVED is not NULL. */
629 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved
)
636 for (i
= 0; i
<= LAST_EXTENDED_REGNUM
; i
++)
637 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
643 if ((mask
& 0x3c000) != 0)
645 for (i
= 0x04000; i
< 0x40000; i
<<= 1)
653 * bytes_saved
= count
* UNITS_PER_WORD
;
661 RTX_FRAME_RELATED_P (r
) = 1;
665 /* Generate an instruction that pushes several registers onto the stack.
666 Register K will be saved if bit K in MASK is set. The function does
667 nothing if MASK is zero.
669 To be compatible with the "movm" instruction, the lowest-numbered
670 register must be stored in the lowest slot. If MASK is the set
671 { R1,...,RN }, where R1...RN are ordered least first, the generated
672 instruction will have the form:
675 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
676 (set (mem:SI (plus:SI (reg:SI 9)
680 (set (mem:SI (plus:SI (reg:SI 9)
685 mn10300_gen_multiple_store (unsigned int mask
)
687 /* The order in which registers are stored, from SP-4 through SP-N*4. */
688 static const unsigned int store_order
[8] = {
689 /* e2, e3: never saved */
690 FIRST_EXTENDED_REGNUM
+ 4,
691 FIRST_EXTENDED_REGNUM
+ 5,
692 FIRST_EXTENDED_REGNUM
+ 6,
693 FIRST_EXTENDED_REGNUM
+ 7,
694 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
695 FIRST_DATA_REGNUM
+ 2,
696 FIRST_DATA_REGNUM
+ 3,
697 FIRST_ADDRESS_REGNUM
+ 2,
698 FIRST_ADDRESS_REGNUM
+ 3,
699 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
709 for (i
= count
= 0; i
< ARRAY_SIZE(store_order
); ++i
)
711 unsigned regno
= store_order
[i
];
713 if (((mask
>> regno
) & 1) == 0)
717 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
718 x
= gen_frame_mem (SImode
, x
);
719 x
= gen_rtx_SET (x
, gen_rtx_REG (SImode
, regno
));
722 /* Remove the register from the mask so that... */
723 mask
&= ~(1u << regno
);
726 /* ... we can make sure that we didn't try to use a register
727 not listed in the store order. */
728 gcc_assert (mask
== 0);
730 /* Create the instruction that updates the stack pointer. */
731 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
732 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
735 /* We need one PARALLEL element to update the stack pointer and
736 an additional element for each register that is stored. */
737 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (count
+ 1, elts
));
741 static inline unsigned int
742 popcount (unsigned int mask
)
744 unsigned int count
= 0;
749 mask
&= ~ (mask
& - mask
);
755 mn10300_expand_prologue (void)
757 HOST_WIDE_INT size
= mn10300_frame_size ();
760 mask
= mn10300_get_live_callee_saved_regs (NULL
);
761 /* If we use any of the callee-saved registers, save them now. */
762 mn10300_gen_multiple_store (mask
);
764 if (flag_stack_usage_info
)
765 current_function_static_stack_size
= size
+ popcount (mask
) * 4;
767 if (TARGET_AM33_2
&& fp_regs_to_save ())
769 int num_regs_to_save
= fp_regs_to_save (), i
;
775 save_sp_partial_merge
,
779 unsigned int strategy_size
= (unsigned)-1, this_strategy_size
;
782 if (flag_stack_usage_info
)
783 current_function_static_stack_size
+= num_regs_to_save
* 4;
785 /* We have several different strategies to save FP registers.
786 We can store them using SP offsets, which is beneficial if
787 there are just a few registers to save, or we can use `a0' in
788 post-increment mode (`a0' is the only call-clobbered address
789 register that is never used to pass information to a
790 function). Furthermore, if we don't need a frame pointer, we
791 can merge the two SP adds into a single one, but this isn't
792 always beneficial; sometimes we can just split the two adds
793 so that we don't exceed a 16-bit constant size. The code
794 below will select which strategy to use, so as to generate
795 smallest code. Ties are broken in favor or shorter sequences
796 (in terms of number of instructions). */
798 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
799 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
800 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
801 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
803 /* We add 0 * (S) in two places to promote to the type of S,
804 so that all arms of the conditional have the same type. */
805 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
806 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
807 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
808 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
810 #define SIZE_FMOV_SP_(S,N) \
811 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
812 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
813 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
814 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
816 /* Consider alternative save_sp_merge only if we don't need the
817 frame pointer and size is nonzero. */
818 if (! frame_pointer_needed
&& size
)
820 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
821 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
822 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
823 this_strategy_size
+= SIZE_FMOV_SP (size
, num_regs_to_save
);
825 if (this_strategy_size
< strategy_size
)
827 strategy
= save_sp_merge
;
828 strategy_size
= this_strategy_size
;
832 /* Consider alternative save_sp_no_merge unconditionally. */
833 /* Insn: add -4 * num_regs_to_save, sp. */
834 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
835 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
836 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
839 /* Insn: add -size, sp. */
840 this_strategy_size
+= SIZE_ADD_SP (-size
);
843 if (this_strategy_size
< strategy_size
)
845 strategy
= save_sp_no_merge
;
846 strategy_size
= this_strategy_size
;
849 /* Consider alternative save_sp_partial_merge only if we don't
850 need a frame pointer and size is reasonably large. */
851 if (! frame_pointer_needed
&& size
+ 4 * num_regs_to_save
> 128)
853 /* Insn: add -128, sp. */
854 this_strategy_size
= SIZE_ADD_SP (-128);
855 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
856 this_strategy_size
+= SIZE_FMOV_SP (128 - 4 * num_regs_to_save
,
860 /* Insn: add 128-size, sp. */
861 this_strategy_size
+= SIZE_ADD_SP (128 - size
);
864 if (this_strategy_size
< strategy_size
)
866 strategy
= save_sp_partial_merge
;
867 strategy_size
= this_strategy_size
;
871 /* Consider alternative save_a0_merge only if we don't need a
872 frame pointer, size is nonzero and the user hasn't
873 changed the calling conventions of a0. */
874 if (! frame_pointer_needed
&& size
875 && call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
876 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
878 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
879 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
880 /* Insn: mov sp, a0. */
881 this_strategy_size
++;
884 /* Insn: add size, a0. */
885 this_strategy_size
+= SIZE_ADD_AX (size
);
887 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
888 this_strategy_size
+= 3 * num_regs_to_save
;
890 if (this_strategy_size
< strategy_size
)
892 strategy
= save_a0_merge
;
893 strategy_size
= this_strategy_size
;
897 /* Consider alternative save_a0_no_merge if the user hasn't
898 changed the calling conventions of a0. */
899 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
900 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
902 /* Insn: add -4 * num_regs_to_save, sp. */
903 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
904 /* Insn: mov sp, a0. */
905 this_strategy_size
++;
906 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
907 this_strategy_size
+= 3 * num_regs_to_save
;
910 /* Insn: add -size, sp. */
911 this_strategy_size
+= SIZE_ADD_SP (-size
);
914 if (this_strategy_size
< strategy_size
)
916 strategy
= save_a0_no_merge
;
917 strategy_size
= this_strategy_size
;
921 /* Emit the initial SP add, common to all strategies. */
924 case save_sp_no_merge
:
925 case save_a0_no_merge
:
926 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
928 GEN_INT (-4 * num_regs_to_save
))));
932 case save_sp_partial_merge
:
933 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
936 xsize
= 128 - 4 * num_regs_to_save
;
942 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
944 GEN_INT (-(size
+ 4 * num_regs_to_save
)))));
945 /* We'll have to adjust FP register saves according to the
948 /* Since we've already created the stack frame, don't do it
949 again at the end of the function. */
957 /* Now prepare register a0, if we have decided to use it. */
961 case save_sp_no_merge
:
962 case save_sp_partial_merge
:
967 case save_a0_no_merge
:
968 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
);
969 F (emit_insn (gen_movsi (reg
, stack_pointer_rtx
)));
971 F (emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (xsize
))));
972 reg
= gen_rtx_POST_INC (SImode
, reg
);
979 /* Now actually save the FP registers. */
980 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
981 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
989 /* If we aren't using `a0', use an SP offset. */
992 addr
= gen_rtx_PLUS (SImode
,
997 addr
= stack_pointer_rtx
;
1002 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode
, addr
),
1003 gen_rtx_REG (SFmode
, i
))));
1007 /* Now put the frame pointer into the frame pointer register. */
1008 if (frame_pointer_needed
)
1009 F (emit_move_insn (frame_pointer_rtx
, stack_pointer_rtx
));
1011 /* Allocate stack for this frame. */
1013 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1017 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
1018 emit_insn (gen_load_pic ());
1022 mn10300_expand_epilogue (void)
1024 HOST_WIDE_INT size
= mn10300_frame_size ();
1025 unsigned int reg_save_bytes
;
1027 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1029 if (TARGET_AM33_2
&& fp_regs_to_save ())
1031 int num_regs_to_save
= fp_regs_to_save (), i
;
1034 /* We have several options to restore FP registers. We could
1035 load them from SP offsets, but, if there are enough FP
1036 registers to restore, we win if we use a post-increment
1039 /* If we have a frame pointer, it's the best option, because we
1040 already know it has the value we want. */
1041 if (frame_pointer_needed
)
1042 reg
= gen_rtx_REG (SImode
, FRAME_POINTER_REGNUM
);
1043 /* Otherwise, we may use `a1', since it's call-clobbered and
1044 it's never used for return values. But only do so if it's
1045 smaller than using SP offsets. */
1048 enum { restore_sp_post_adjust
,
1049 restore_sp_pre_adjust
,
1050 restore_sp_partial_adjust
,
1051 restore_a1
} strategy
;
1052 unsigned int this_strategy_size
, strategy_size
= (unsigned)-1;
1054 /* Consider using sp offsets before adjusting sp. */
1055 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1056 this_strategy_size
= SIZE_FMOV_SP (size
, num_regs_to_save
);
1057 /* If size is too large, we'll have to adjust SP with an
1059 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1061 /* Insn: add size + 4 * num_regs_to_save, sp. */
1062 this_strategy_size
+= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
);
1064 /* If we don't have to restore any non-FP registers,
1065 we'll be able to save one byte by using rets. */
1066 if (! reg_save_bytes
)
1067 this_strategy_size
--;
1069 if (this_strategy_size
< strategy_size
)
1071 strategy
= restore_sp_post_adjust
;
1072 strategy_size
= this_strategy_size
;
1075 /* Consider using sp offsets after adjusting sp. */
1076 /* Insn: add size, sp. */
1077 this_strategy_size
= SIZE_ADD_SP (size
);
1078 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1079 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
1080 /* We're going to use ret to release the FP registers
1081 save area, so, no savings. */
1083 if (this_strategy_size
< strategy_size
)
1085 strategy
= restore_sp_pre_adjust
;
1086 strategy_size
= this_strategy_size
;
1089 /* Consider using sp offsets after partially adjusting sp.
1090 When size is close to 32Kb, we may be able to adjust SP
1091 with an imm16 add instruction while still using fmov
1093 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1095 /* Insn: add size + 4 * num_regs_to_save
1096 + reg_save_bytes - 252,sp. */
1097 this_strategy_size
= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
1098 + (int) reg_save_bytes
- 252);
1099 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1100 this_strategy_size
+= SIZE_FMOV_SP (252 - reg_save_bytes
1101 - 4 * num_regs_to_save
,
1103 /* We're going to use ret to release the FP registers
1104 save area, so, no savings. */
1106 if (this_strategy_size
< strategy_size
)
1108 strategy
= restore_sp_partial_adjust
;
1109 strategy_size
= this_strategy_size
;
1113 /* Consider using a1 in post-increment mode, as long as the
1114 user hasn't changed the calling conventions of a1. */
1115 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
+ 1]
1116 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
+1])
1118 /* Insn: mov sp,a1. */
1119 this_strategy_size
= 1;
1122 /* Insn: add size,a1. */
1123 this_strategy_size
+= SIZE_ADD_AX (size
);
1125 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1126 this_strategy_size
+= 3 * num_regs_to_save
;
1127 /* If size is large enough, we may be able to save a
1129 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1131 /* Insn: mov a1,sp. */
1132 this_strategy_size
+= 2;
1134 /* If we don't have to restore any non-FP registers,
1135 we'll be able to save one byte by using rets. */
1136 if (! reg_save_bytes
)
1137 this_strategy_size
--;
1139 if (this_strategy_size
< strategy_size
)
1141 strategy
= restore_a1
;
1142 strategy_size
= this_strategy_size
;
1148 case restore_sp_post_adjust
:
1151 case restore_sp_pre_adjust
:
1152 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1158 case restore_sp_partial_adjust
:
1159 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1161 GEN_INT (size
+ 4 * num_regs_to_save
1162 + reg_save_bytes
- 252)));
1163 size
= 252 - reg_save_bytes
- 4 * num_regs_to_save
;
1167 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
+ 1);
1168 emit_insn (gen_movsi (reg
, stack_pointer_rtx
));
1170 emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (size
)));
1178 /* Adjust the selected register, if any, for post-increment. */
1180 reg
= gen_rtx_POST_INC (SImode
, reg
);
1182 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
1183 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
1191 /* If we aren't using a post-increment register, use an
1193 addr
= gen_rtx_PLUS (SImode
,
1198 addr
= stack_pointer_rtx
;
1202 emit_insn (gen_movsf (gen_rtx_REG (SFmode
, i
),
1203 gen_rtx_MEM (SFmode
, addr
)));
1206 /* If we were using the restore_a1 strategy and the number of
1207 bytes to be released won't fit in the `ret' byte, copy `a1'
1208 to `sp', to avoid having to use `add' to adjust it. */
1209 if (! frame_pointer_needed
&& reg
&& size
+ reg_save_bytes
> 255)
1211 emit_move_insn (stack_pointer_rtx
, XEXP (reg
, 0));
1216 /* Maybe cut back the stack, except for the register save area.
1218 If the frame pointer exists, then use the frame pointer to
1221 If the stack size + register save area is more than 255 bytes,
1222 then the stack must be cut back here since the size + register
1223 save size is too big for a ret/retf instruction.
1225 Else leave it alone, it will be cut back as part of the
1226 ret/retf instruction, or there wasn't any stack to begin with.
1228 Under no circumstances should the register save area be
1229 deallocated here, that would leave a window where an interrupt
1230 could occur and trash the register save area. */
1231 if (frame_pointer_needed
)
1233 emit_move_insn (stack_pointer_rtx
, frame_pointer_rtx
);
1236 else if (size
+ reg_save_bytes
> 255)
1238 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1244 /* Adjust the stack and restore callee-saved registers, if any. */
1245 if (mn10300_can_use_rets_insn ())
1246 emit_jump_insn (ret_rtx
);
1248 emit_jump_insn (gen_return_ret (GEN_INT (size
+ reg_save_bytes
)));
1251 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1252 This function is for MATCH_PARALLEL and so assumes OP is known to be
1253 parallel. If OP is a multiple store, return a mask indicating which
1254 registers it saves. Return 0 otherwise. */
1257 mn10300_store_multiple_regs (rtx op
)
1265 count
= XVECLEN (op
, 0);
1269 /* Check that first instruction has the form (set (sp) (plus A B)) */
1270 elt
= XVECEXP (op
, 0, 0);
1271 if (GET_CODE (elt
) != SET
1272 || (! REG_P (SET_DEST (elt
)))
1273 || REGNO (SET_DEST (elt
)) != STACK_POINTER_REGNUM
1274 || GET_CODE (SET_SRC (elt
)) != PLUS
)
1277 /* Check that A is the stack pointer and B is the expected stack size.
1278 For OP to match, each subsequent instruction should push a word onto
1279 the stack. We therefore expect the first instruction to create
1280 COUNT-1 stack slots. */
1281 elt
= SET_SRC (elt
);
1282 if ((! REG_P (XEXP (elt
, 0)))
1283 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1284 || (! CONST_INT_P (XEXP (elt
, 1)))
1285 || INTVAL (XEXP (elt
, 1)) != -(count
- 1) * 4)
1289 for (i
= 1; i
< count
; i
++)
1291 /* Check that element i is a (set (mem M) R). */
1292 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1293 Remember: the ordering is *not* monotonic. */
1294 elt
= XVECEXP (op
, 0, i
);
1295 if (GET_CODE (elt
) != SET
1296 || (! MEM_P (SET_DEST (elt
)))
1297 || (! REG_P (SET_SRC (elt
))))
1300 /* Remember which registers are to be saved. */
1301 last
= REGNO (SET_SRC (elt
));
1302 mask
|= (1 << last
);
1304 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1305 elt
= XEXP (SET_DEST (elt
), 0);
1306 if (GET_CODE (elt
) != PLUS
1307 || (! REG_P (XEXP (elt
, 0)))
1308 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1309 || (! CONST_INT_P (XEXP (elt
, 1)))
1310 || INTVAL (XEXP (elt
, 1)) != -i
* 4)
1314 /* All or none of the callee-saved extended registers must be in the set. */
1315 if ((mask
& 0x3c000) != 0
1316 && (mask
& 0x3c000) != 0x3c000)
1322 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1325 mn10300_preferred_reload_class (rtx x
, reg_class_t rclass
)
1327 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1328 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1331 && !HARD_REGISTER_P (x
))
1332 || (GET_CODE (x
) == SUBREG
1333 && REG_P (SUBREG_REG (x
))
1334 && !HARD_REGISTER_P (SUBREG_REG (x
))))
1335 return LIMIT_RELOAD_CLASS (GET_MODE (x
), rclass
);
1340 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1343 mn10300_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
1345 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1346 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1350 /* Implement TARGET_SECONDARY_RELOAD. */
1353 mn10300_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1354 machine_mode mode
, secondary_reload_info
*sri
)
1356 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1357 enum reg_class xclass
= NO_REGS
;
1358 unsigned int xregno
= INVALID_REGNUM
;
1363 if (xregno
>= FIRST_PSEUDO_REGISTER
)
1364 xregno
= true_regnum (x
);
1365 if (xregno
!= INVALID_REGNUM
)
1366 xclass
= REGNO_REG_CLASS (xregno
);
1371 /* Memory load/stores less than a full word wide can't have an
1372 address or stack pointer destination. They must use a data
1373 register as an intermediate register. */
1374 if (rclass
!= DATA_REGS
1375 && (mode
== QImode
|| mode
== HImode
)
1376 && xclass
== NO_REGS
)
1379 /* We can only move SP to/from an address register. */
1381 && rclass
== SP_REGS
1382 && xclass
!= ADDRESS_REGS
)
1383 return ADDRESS_REGS
;
1385 && xclass
== SP_REGS
1386 && rclass
!= ADDRESS_REGS
1387 && rclass
!= SP_OR_ADDRESS_REGS
)
1388 return ADDRESS_REGS
;
1391 /* We can't directly load sp + const_int into a register;
1392 we must use an address register as an scratch. */
1394 && rclass
!= SP_REGS
1395 && rclass
!= SP_OR_ADDRESS_REGS
1396 && rclass
!= SP_OR_GENERAL_REGS
1397 && GET_CODE (x
) == PLUS
1398 && (XEXP (x
, 0) == stack_pointer_rtx
1399 || XEXP (x
, 1) == stack_pointer_rtx
))
1401 sri
->icode
= CODE_FOR_reload_plus_sp_const
;
1405 /* We can only move MDR to/from a data register. */
1406 if (rclass
== MDR_REGS
&& xclass
!= DATA_REGS
)
1408 if (xclass
== MDR_REGS
&& rclass
!= DATA_REGS
)
1411 /* We can't load/store an FP register from a constant address. */
1413 && (rclass
== FP_REGS
|| xclass
== FP_REGS
)
1414 && (xclass
== NO_REGS
|| rclass
== NO_REGS
))
1418 if (xregno
>= FIRST_PSEUDO_REGISTER
&& xregno
!= INVALID_REGNUM
)
1420 addr
= reg_equiv_mem (xregno
);
1422 addr
= XEXP (addr
, 0);
1427 if (addr
&& CONSTANT_ADDRESS_P (addr
))
1428 return GENERAL_REGS
;
1430 /* Otherwise assume no secondary reloads are needed. */
1435 mn10300_frame_size (void)
1437 /* size includes the fixed stack space needed for function calls. */
1438 int size
= get_frame_size () + crtl
->outgoing_args_size
;
1440 /* And space for the return pointer. */
1441 size
+= crtl
->outgoing_args_size
? 4 : 0;
1447 mn10300_initial_offset (int from
, int to
)
1451 gcc_assert (from
== ARG_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
);
1452 gcc_assert (to
== FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
1454 if (to
== STACK_POINTER_REGNUM
)
1455 diff
= mn10300_frame_size ();
1457 /* The difference between the argument pointer and the frame pointer
1458 is the size of the callee register save area. */
1459 if (from
== ARG_POINTER_REGNUM
)
1461 unsigned int reg_save_bytes
;
1463 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1464 diff
+= reg_save_bytes
;
1465 diff
+= 4 * fp_regs_to_save ();
1471 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1474 mn10300_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
1476 /* Return values > 8 bytes in length in memory. */
1477 return (int_size_in_bytes (type
) > 8
1478 || int_size_in_bytes (type
) == 0
1479 || TYPE_MODE (type
) == BLKmode
);
1482 /* Flush the argument registers to the stack for a stdarg function;
1483 return the new argument pointer. */
1485 mn10300_builtin_saveregs (void)
1488 tree fntype
= TREE_TYPE (current_function_decl
);
1489 int argadj
= ((!stdarg_p (fntype
))
1490 ? UNITS_PER_WORD
: 0);
1491 alias_set_type set
= get_varargs_alias_set ();
1494 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
1496 offset
= crtl
->args
.arg_offset_rtx
;
1498 mem
= gen_rtx_MEM (SImode
, crtl
->args
.internal_arg_pointer
);
1499 set_mem_alias_set (mem
, set
);
1500 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
1502 mem
= gen_rtx_MEM (SImode
,
1503 plus_constant (Pmode
,
1504 crtl
->args
.internal_arg_pointer
, 4));
1505 set_mem_alias_set (mem
, set
);
1506 emit_move_insn (mem
, gen_rtx_REG (SImode
, 1));
1508 return copy_to_reg (expand_binop (Pmode
, add_optab
,
1509 crtl
->args
.internal_arg_pointer
,
1510 offset
, 0, 0, OPTAB_LIB_WIDEN
));
1514 mn10300_va_start (tree valist
, rtx nextarg
)
1516 nextarg
= expand_builtin_saveregs ();
1517 std_expand_builtin_va_start (valist
, nextarg
);
1520 /* Return true when a parameter should be passed by reference. */
1523 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
1524 machine_mode mode
, const_tree type
,
1525 bool named ATTRIBUTE_UNUSED
)
1527 unsigned HOST_WIDE_INT size
;
1530 size
= int_size_in_bytes (type
);
1532 size
= GET_MODE_SIZE (mode
);
1534 return (size
> 8 || size
== 0);
1537 /* Return an RTX to represent where a value with mode MODE will be returned
1538 from a function. If the result is NULL_RTX, the argument is pushed. */
1541 mn10300_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
1542 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1544 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1545 rtx result
= NULL_RTX
;
1548 /* We only support using 2 data registers as argument registers. */
1551 /* Figure out the size of the object to be passed. */
1552 if (mode
== BLKmode
)
1553 size
= int_size_in_bytes (type
);
1555 size
= GET_MODE_SIZE (mode
);
1557 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1559 /* Don't pass this arg via a register if all the argument registers
1561 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1564 /* Don't pass this arg via a register if it would be split between
1565 registers and memory. */
1566 if (type
== NULL_TREE
1567 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1570 switch (cum
->nbytes
/ UNITS_PER_WORD
)
1573 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
);
1576 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
+ 1);
1585 /* Update the data in CUM to advance over an argument
1586 of mode MODE and data type TYPE.
1587 (TYPE is null for libcalls where that information may not be available.) */
1590 mn10300_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
1591 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1593 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1595 cum
->nbytes
+= (mode
!= BLKmode
1596 ? (GET_MODE_SIZE (mode
) + 3) & ~3
1597 : (int_size_in_bytes (type
) + 3) & ~3);
1600 /* Return the number of bytes of registers to use for an argument passed
1601 partially in registers and partially in memory. */
1604 mn10300_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
1605 tree type
, bool named ATTRIBUTE_UNUSED
)
1607 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1610 /* We only support using 2 data registers as argument registers. */
1613 /* Figure out the size of the object to be passed. */
1614 if (mode
== BLKmode
)
1615 size
= int_size_in_bytes (type
);
1617 size
= GET_MODE_SIZE (mode
);
1619 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1621 /* Don't pass this arg via a register if all the argument registers
1623 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1626 if (cum
->nbytes
+ size
<= nregs
* UNITS_PER_WORD
)
1629 /* Don't pass this arg via a register if it would be split between
1630 registers and memory. */
1631 if (type
== NULL_TREE
1632 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1635 return nregs
* UNITS_PER_WORD
- cum
->nbytes
;
1638 /* Return the location of the function's value. This will be either
1639 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1640 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1641 we only return the PARALLEL for outgoing values; we do not want
1642 callers relying on this extra copy. */
1645 mn10300_function_value (const_tree valtype
,
1646 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1650 machine_mode mode
= TYPE_MODE (valtype
);
1652 if (! POINTER_TYPE_P (valtype
))
1653 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1654 else if (! TARGET_PTR_A0D0
|| ! outgoing
1655 || cfun
->returns_struct
)
1656 return gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
);
1658 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (2));
1660 = gen_rtx_EXPR_LIST (VOIDmode
,
1661 gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
),
1665 = gen_rtx_EXPR_LIST (VOIDmode
,
1666 gen_rtx_REG (mode
, FIRST_DATA_REGNUM
),
1671 /* Implements TARGET_LIBCALL_VALUE. */
1674 mn10300_libcall_value (machine_mode mode
,
1675 const_rtx fun ATTRIBUTE_UNUSED
)
1677 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1680 /* Implements FUNCTION_VALUE_REGNO_P. */
1683 mn10300_function_value_regno_p (const unsigned int regno
)
1685 return (regno
== FIRST_DATA_REGNUM
|| regno
== FIRST_ADDRESS_REGNUM
);
1688 /* Output an addition operation. */
1691 mn10300_output_add (rtx operands
[3], bool need_flags
)
1693 rtx dest
, src1
, src2
;
1694 unsigned int dest_regnum
, src1_regnum
, src2_regnum
;
1695 enum reg_class src1_class
, src2_class
, dest_class
;
1701 dest_regnum
= true_regnum (dest
);
1702 src1_regnum
= true_regnum (src1
);
1704 dest_class
= REGNO_REG_CLASS (dest_regnum
);
1705 src1_class
= REGNO_REG_CLASS (src1_regnum
);
1707 if (CONST_INT_P (src2
))
1709 gcc_assert (dest_regnum
== src1_regnum
);
1711 if (src2
== const1_rtx
&& !need_flags
)
1713 if (INTVAL (src2
) == 4 && !need_flags
&& dest_class
!= DATA_REGS
)
1716 gcc_assert (!need_flags
|| dest_class
!= SP_REGS
);
1719 else if (CONSTANT_P (src2
))
1722 src2_regnum
= true_regnum (src2
);
1723 src2_class
= REGNO_REG_CLASS (src2_regnum
);
1725 if (dest_regnum
== src1_regnum
)
1727 if (dest_regnum
== src2_regnum
)
1730 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1731 this directly, as below, but when optimizing for space we can sometimes
1732 do better by using a mov+add. For MN103, we claimed that we could
1733 implement a three-operand add because the various move and add insns
1734 change sizes across register classes, and we can often do better than
1735 reload in choosing which operand to move. */
1736 if (TARGET_AM33
&& optimize_insn_for_speed_p ())
1737 return "add %2,%1,%0";
1739 /* Catch cases where no extended register was used. */
1740 if (src1_class
!= EXTENDED_REGS
1741 && src2_class
!= EXTENDED_REGS
1742 && dest_class
!= EXTENDED_REGS
)
1744 /* We have to copy one of the sources into the destination, then
1745 add the other source to the destination.
1747 Carefully select which source to copy to the destination; a
1748 naive implementation will waste a byte when the source classes
1749 are different and the destination is an address register.
1750 Selecting the lowest cost register copy will optimize this
1752 if (src1_class
== dest_class
)
1753 return "mov %1,%0\n\tadd %2,%0";
1755 return "mov %2,%0\n\tadd %1,%0";
1758 /* At least one register is an extended register. */
1760 /* The three operand add instruction on the am33 is a win iff the
1761 output register is an extended register, or if both source
1762 registers are extended registers. */
1763 if (dest_class
== EXTENDED_REGS
|| src1_class
== src2_class
)
1764 return "add %2,%1,%0";
1766 /* It is better to copy one of the sources to the destination, then
1767 perform a 2 address add. The destination in this case must be
1768 an address or data register and one of the sources must be an
1769 extended register and the remaining source must not be an extended
1772 The best code for this case is to copy the extended reg to the
1773 destination, then emit a two address add. */
1774 if (src1_class
== EXTENDED_REGS
)
1775 return "mov %1,%0\n\tadd %2,%0";
1777 return "mov %2,%0\n\tadd %1,%0";
1780 /* Return 1 if X contains a symbolic expression. We know these
1781 expressions will have one of a few well defined forms, so
1782 we need only check those forms. */
1785 mn10300_symbolic_operand (rtx op
,
1786 machine_mode mode ATTRIBUTE_UNUSED
)
1788 switch (GET_CODE (op
))
1795 return ((GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
1796 || GET_CODE (XEXP (op
, 0)) == LABEL_REF
)
1797 && CONST_INT_P (XEXP (op
, 1)));
1803 /* Try machine dependent ways of modifying an illegitimate address
1804 to be legitimate. If we find one, return the new valid address.
1805 This macro is used in only one place: `memory_address' in explow.c.
1807 OLDX is the address as it was before break_out_memory_refs was called.
1808 In some cases it is useful to look at this to decide what needs to be done.
1810 Normally it is always safe for this macro to do nothing. It exists to
1811 recognize opportunities to optimize the output.
1813 But on a few ports with segmented architectures and indexed addressing
1814 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1817 mn10300_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1818 machine_mode mode ATTRIBUTE_UNUSED
)
1820 if (flag_pic
&& ! mn10300_legitimate_pic_operand_p (x
))
1821 x
= mn10300_legitimize_pic_address (oldx
, NULL_RTX
);
1823 /* Uh-oh. We might have an address for x[n-100000]. This needs
1824 special handling to avoid creating an indexed memory address
1825 with x-100000 as the base. */
1826 if (GET_CODE (x
) == PLUS
1827 && mn10300_symbolic_operand (XEXP (x
, 1), VOIDmode
))
1829 /* Ugly. We modify things here so that the address offset specified
1830 by the index expression is computed first, then added to x to form
1831 the entire address. */
1833 rtx regx1
, regy1
, regy2
, y
;
1835 /* Strip off any CONST. */
1837 if (GET_CODE (y
) == CONST
)
1840 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1842 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1843 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1844 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1845 regx1
= force_reg (Pmode
,
1846 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
, regx1
,
1848 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1854 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1855 @GOTOFF in `reg'. */
1858 mn10300_legitimize_pic_address (rtx orig
, rtx reg
)
1862 if (GET_CODE (orig
) == LABEL_REF
1863 || (GET_CODE (orig
) == SYMBOL_REF
1864 && (CONSTANT_POOL_ADDRESS_P (orig
)
1865 || ! MN10300_GLOBAL_P (orig
))))
1868 reg
= gen_reg_rtx (Pmode
);
1870 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOTOFF
);
1871 x
= gen_rtx_CONST (SImode
, x
);
1872 emit_move_insn (reg
, x
);
1874 x
= emit_insn (gen_addsi3 (reg
, reg
, pic_offset_table_rtx
));
1876 else if (GET_CODE (orig
) == SYMBOL_REF
)
1879 reg
= gen_reg_rtx (Pmode
);
1881 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOT
);
1882 x
= gen_rtx_CONST (SImode
, x
);
1883 x
= gen_rtx_PLUS (SImode
, pic_offset_table_rtx
, x
);
1884 x
= gen_const_mem (SImode
, x
);
1886 x
= emit_move_insn (reg
, x
);
1891 set_unique_reg_note (x
, REG_EQUAL
, orig
);
1895 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1896 isn't protected by a PIC unspec; nonzero otherwise. */
1899 mn10300_legitimate_pic_operand_p (rtx x
)
1904 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1907 if (GET_CODE (x
) == UNSPEC
1908 && (XINT (x
, 1) == UNSPEC_PIC
1909 || XINT (x
, 1) == UNSPEC_GOT
1910 || XINT (x
, 1) == UNSPEC_GOTOFF
1911 || XINT (x
, 1) == UNSPEC_PLT
1912 || XINT (x
, 1) == UNSPEC_GOTSYM_OFF
))
1915 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1916 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1922 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1923 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x
, i
, j
)))
1926 else if (fmt
[i
] == 'e'
1927 && ! mn10300_legitimate_pic_operand_p (XEXP (x
, i
)))
1934 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1935 legitimate, and FALSE otherwise.
1937 On the mn10300, the value in the address register must be
1938 in the same memory space/segment as the effective address.
1940 This is problematical for reload since it does not understand
1941 that base+index != index+base in a memory reference.
1943 Note it is still possible to use reg+reg addressing modes,
1944 it's just much more difficult. For a discussion of a possible
1945 workaround and solution, see the comments in pa.c before the
1946 function record_unscaled_index_insn_codes. */
1949 mn10300_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1953 if (CONSTANT_ADDRESS_P (x
))
1954 return !flag_pic
|| mn10300_legitimate_pic_operand_p (x
);
1956 if (RTX_OK_FOR_BASE_P (x
, strict
))
1959 if (TARGET_AM33
&& (mode
== SImode
|| mode
== SFmode
|| mode
== HImode
))
1961 if (GET_CODE (x
) == POST_INC
)
1962 return RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
);
1963 if (GET_CODE (x
) == POST_MODIFY
)
1964 return (RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
)
1965 && CONSTANT_ADDRESS_P (XEXP (x
, 1)));
1968 if (GET_CODE (x
) != PLUS
)
1972 index
= XEXP (x
, 1);
1978 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1979 addressing is hard to satisfy. */
1983 return (REGNO_GENERAL_P (REGNO (base
), strict
)
1984 && REGNO_GENERAL_P (REGNO (index
), strict
));
1987 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base
), strict
))
1990 if (CONST_INT_P (index
))
1991 return IN_RANGE (INTVAL (index
), -1 - 0x7fffffff, 0x7fffffff);
1993 if (CONSTANT_ADDRESS_P (index
))
1994 return !flag_pic
|| mn10300_legitimate_pic_operand_p (index
);
2000 mn10300_regno_in_class_p (unsigned regno
, int rclass
, bool strict
)
2002 if (regno
>= FIRST_PSEUDO_REGISTER
)
2008 regno
= reg_renumber
[regno
];
2009 if (regno
== INVALID_REGNUM
)
2012 return TEST_HARD_REG_BIT (reg_class_contents
[rclass
], regno
);
2016 mn10300_legitimize_reload_address (rtx x
,
2017 machine_mode mode ATTRIBUTE_UNUSED
,
2018 int opnum
, int type
,
2019 int ind_levels ATTRIBUTE_UNUSED
)
2021 bool any_change
= false;
2023 /* See above re disabling reg+reg addressing for MN103. */
2027 if (GET_CODE (x
) != PLUS
)
2030 if (XEXP (x
, 0) == stack_pointer_rtx
)
2032 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2033 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2034 opnum
, (enum reload_type
) type
);
2037 if (XEXP (x
, 1) == stack_pointer_rtx
)
2039 push_reload (XEXP (x
, 1), NULL_RTX
, &XEXP (x
, 1), NULL
,
2040 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2041 opnum
, (enum reload_type
) type
);
2045 return any_change
? x
: NULL_RTX
;
2048 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2049 constant. Note that some "constants" aren't valid, such as TLS
2050 symbols and unconverted GOT-based references, so we eliminate
2054 mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2056 switch (GET_CODE (x
))
2061 if (GET_CODE (x
) == PLUS
)
2063 if (! CONST_INT_P (XEXP (x
, 1)))
2068 /* Only some unspecs are valid as "constants". */
2069 if (GET_CODE (x
) == UNSPEC
)
2071 switch (XINT (x
, 1))
2083 /* We must have drilled down to a symbol. */
2084 if (! mn10300_symbolic_operand (x
, Pmode
))
2095 /* Undo pic address legitimization for the benefit of debug info. */
2098 mn10300_delegitimize_address (rtx orig_x
)
2100 rtx x
= orig_x
, ret
, addend
= NULL
;
2105 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
2108 if (XEXP (x
, 0) == pic_offset_table_rtx
)
2110 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2111 some odd-looking "addresses" that were never valid in the first place.
2112 We need to look harder to avoid warnings being emitted. */
2113 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
2115 rtx x0
= XEXP (x
, 0);
2116 rtx x00
= XEXP (x0
, 0);
2117 rtx x01
= XEXP (x0
, 1);
2119 if (x00
== pic_offset_table_rtx
)
2121 else if (x01
== pic_offset_table_rtx
)
2131 if (GET_CODE (x
) != CONST
)
2134 if (GET_CODE (x
) != UNSPEC
)
2137 ret
= XVECEXP (x
, 0, 0);
2138 if (XINT (x
, 1) == UNSPEC_GOTOFF
)
2140 else if (XINT (x
, 1) == UNSPEC_GOT
)
2145 gcc_assert (GET_CODE (ret
) == SYMBOL_REF
);
2146 if (need_mem
!= MEM_P (orig_x
))
2148 if (need_mem
&& addend
)
2151 ret
= gen_rtx_PLUS (Pmode
, addend
, ret
);
2155 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2156 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2157 with an address register. */
2160 mn10300_address_cost (rtx x
, machine_mode mode ATTRIBUTE_UNUSED
,
2161 addr_space_t as ATTRIBUTE_UNUSED
, bool speed
)
2166 switch (GET_CODE (x
))
2171 /* We assume all of these require a 32-bit constant, even though
2172 some symbol and label references can be relaxed. */
2173 return speed
? 1 : 4;
2181 /* Assume any symbolic offset is a 32-bit constant. */
2182 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2183 if (IN_RANGE (i
, -128, 127))
2184 return speed
? 0 : 1;
2187 if (IN_RANGE (i
, -0x800000, 0x7fffff))
2193 index
= XEXP (x
, 1);
2194 if (register_operand (index
, SImode
))
2196 /* Attempt to minimize the number of registers in the address.
2197 This is similar to what other ports do. */
2198 if (register_operand (base
, SImode
))
2202 index
= XEXP (x
, 0);
2205 /* Assume any symbolic offset is a 32-bit constant. */
2206 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2207 if (IN_RANGE (i
, -128, 127))
2208 return speed
? 0 : 1;
2209 if (IN_RANGE (i
, -32768, 32767))
2210 return speed
? 0 : 2;
2211 return speed
? 2 : 6;
2214 return rtx_cost (x
, Pmode
, MEM
, 0, speed
);
2218 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2220 Recall that the base value of 2 is required by assumptions elsewhere
2221 in the body of the compiler, and that cost 2 is special-cased as an
2222 early exit from reload meaning no work is required. */
2225 mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2226 reg_class_t ifrom
, reg_class_t ito
)
2228 enum reg_class from
= (enum reg_class
) ifrom
;
2229 enum reg_class to
= (enum reg_class
) ito
;
2230 enum reg_class scratch
, test
;
2232 /* Simplify the following code by unifying the fp register classes. */
2233 if (to
== FP_ACC_REGS
)
2235 if (from
== FP_ACC_REGS
)
2238 /* Diagnose invalid moves by costing them as two moves. */
2243 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2244 else if (to
== MDR_REGS
)
2245 scratch
= DATA_REGS
;
2246 else if (to
== FP_REGS
&& to
!= from
)
2247 scratch
= GENERAL_REGS
;
2251 if (from
== SP_REGS
)
2252 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2253 else if (from
== MDR_REGS
)
2254 scratch
= DATA_REGS
;
2255 else if (from
== FP_REGS
&& to
!= from
)
2256 scratch
= GENERAL_REGS
;
2258 if (scratch
!= NO_REGS
&& !reg_class_subset_p (test
, scratch
))
2259 return (mn10300_register_move_cost (VOIDmode
, from
, scratch
)
2260 + mn10300_register_move_cost (VOIDmode
, scratch
, to
));
2262 /* From here on, all we need consider are legal combinations. */
2266 /* The scale here is bytes * 2. */
2268 if (from
== to
&& (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2271 if (from
== SP_REGS
)
2272 return (to
== ADDRESS_REGS
? 2 : 6);
2274 /* For MN103, all remaining legal moves are two bytes. */
2279 return (from
== ADDRESS_REGS
? 4 : 6);
2281 if ((from
== ADDRESS_REGS
|| from
== DATA_REGS
)
2282 && (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2285 if (to
== EXTENDED_REGS
)
2286 return (to
== from
? 6 : 4);
2288 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2293 /* The scale here is cycles * 2. */
2297 if (from
== FP_REGS
)
2300 /* All legal moves between integral registers are single cycle. */
2305 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2307 Given lack of the form of the address, this must be speed-relative,
2308 though we should never be less expensive than a size-relative register
2309 move cost above. This is not a problem. */
2312 mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2313 reg_class_t iclass
, bool in ATTRIBUTE_UNUSED
)
2315 enum reg_class rclass
= (enum reg_class
) iclass
;
2317 if (rclass
== FP_REGS
)
2322 /* Implement the TARGET_RTX_COSTS hook.
2324 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2325 to represent cycles. Size-relative costs are in bytes. */
2328 mn10300_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
2329 int opno ATTRIBUTE_UNUSED
, int *ptotal
, bool speed
)
2331 /* This value is used for SYMBOL_REF etc where we want to pretend
2332 we have a full 32-bit constant. */
2333 HOST_WIDE_INT i
= 0x12345678;
2335 int code
= GET_CODE (x
);
2344 if (outer_code
== SET
)
2346 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2347 if (IN_RANGE (i
, -32768, 32767))
2348 total
= COSTS_N_INSNS (1);
2350 total
= COSTS_N_INSNS (2);
2354 /* 16-bit integer operands don't affect latency;
2355 24-bit and 32-bit operands add a cycle. */
2356 if (IN_RANGE (i
, -32768, 32767))
2359 total
= COSTS_N_INSNS (1);
2364 if (outer_code
== SET
)
2368 else if (IN_RANGE (i
, -128, 127))
2370 else if (IN_RANGE (i
, -32768, 32767))
2377 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2378 if (IN_RANGE (i
, -128, 127))
2380 else if (IN_RANGE (i
, -32768, 32767))
2382 else if (TARGET_AM33
&& IN_RANGE (i
, -0x01000000, 0x00ffffff))
2394 /* We assume all of these require a 32-bit constant, even though
2395 some symbol and label references can be relaxed. */
2399 switch (XINT (x
, 1))
2405 case UNSPEC_GOTSYM_OFF
:
2406 /* The PIC unspecs also resolve to a 32-bit constant. */
2410 /* Assume any non-listed unspec is some sort of arithmetic. */
2411 goto do_arith_costs
;
2415 /* Notice the size difference of INC and INC4. */
2416 if (!speed
&& outer_code
== SET
&& CONST_INT_P (XEXP (x
, 1)))
2418 i
= INTVAL (XEXP (x
, 1));
2419 if (i
== 1 || i
== 4)
2421 total
= 1 + rtx_cost (XEXP (x
, 0), mode
, PLUS
, 0, speed
);
2425 goto do_arith_costs
;
2439 total
= (speed
? COSTS_N_INSNS (1) : 2);
2443 /* Notice the size difference of ASL2 and variants. */
2444 if (!speed
&& CONST_INT_P (XEXP (x
, 1)))
2445 switch (INTVAL (XEXP (x
, 1)))
2460 total
= (speed
? COSTS_N_INSNS (1) : 3);
2464 total
= (speed
? COSTS_N_INSNS (3) : 2);
2471 total
= (speed
? COSTS_N_INSNS (39)
2472 /* Include space to load+retrieve MDR. */
2473 : code
== MOD
|| code
== UMOD
? 6 : 4);
2477 total
= mn10300_address_cost (XEXP (x
, 0), mode
,
2478 MEM_ADDR_SPACE (x
), speed
);
2480 total
= COSTS_N_INSNS (2 + total
);
2484 /* Probably not implemented. Assume external call. */
2485 total
= (speed
? COSTS_N_INSNS (10) : 7);
2497 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2498 may access it using GOTOFF instead of GOT. */
2501 mn10300_encode_section_info (tree decl
, rtx rtl
, int first
)
2505 default_encode_section_info (decl
, rtl
, first
);
2510 symbol
= XEXP (rtl
, 0);
2511 if (GET_CODE (symbol
) != SYMBOL_REF
)
2515 SYMBOL_REF_FLAG (symbol
) = (*targetm
.binds_local_p
) (decl
);
2518 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2519 and readonly data size. So we crank up the case threshold value to
2520 encourage a series of if/else comparisons to implement many small switch
2521 statements. In theory, this value could be increased much more if we
2522 were solely optimizing for space, but we keep it "reasonable" to avoid
2523 serious code efficiency lossage. */
2526 mn10300_case_values_threshold (void)
2531 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2534 mn10300_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
2536 rtx mem
, disp
, fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
2538 /* This is a strict alignment target, which means that we play
2539 some games to make sure that the locations at which we need
2540 to store <chain> and <disp> wind up at aligned addresses.
2543 0xfc 0xdd mov chain,a1
2545 0xf8 0xed 0x00 btst 0,d1
2549 Note that the two extra insns are effectively nops; they
2550 clobber the flags but do not affect the contents of D0 or D1. */
2552 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
2553 plus_constant (Pmode
, XEXP (m_tramp
, 0), 11),
2554 NULL_RTX
, 1, OPTAB_DIRECT
);
2556 mem
= adjust_address (m_tramp
, SImode
, 0);
2557 emit_move_insn (mem
, gen_int_mode (0xddfc0028, SImode
));
2558 mem
= adjust_address (m_tramp
, SImode
, 4);
2559 emit_move_insn (mem
, chain_value
);
2560 mem
= adjust_address (m_tramp
, SImode
, 8);
2561 emit_move_insn (mem
, gen_int_mode (0xdc00edf8, SImode
));
2562 mem
= adjust_address (m_tramp
, SImode
, 12);
2563 emit_move_insn (mem
, disp
);
2566 /* Output the assembler code for a C++ thunk function.
2567 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2568 is the decl for the target function. DELTA is an immediate constant
2569 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2570 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2571 additionally added to THIS. Finally jump to the entry point of
2575 mn10300_asm_output_mi_thunk (FILE * file
,
2576 tree thunk_fndecl ATTRIBUTE_UNUSED
,
2577 HOST_WIDE_INT delta
,
2578 HOST_WIDE_INT vcall_offset
,
2583 /* Get the register holding the THIS parameter. Handle the case
2584 where there is a hidden first argument for a returned structure. */
2585 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
2586 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
+ 1];
2588 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
];
2590 fprintf (file
, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START
);
2593 fprintf (file
, "\tadd %d, %s\n", (int) delta
, _this
);
2597 const char * scratch
= reg_names
[FIRST_ADDRESS_REGNUM
+ 1];
2599 fprintf (file
, "\tmov %s, %s\n", _this
, scratch
);
2600 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2601 fprintf (file
, "\tadd %d, %s\n", (int) vcall_offset
, scratch
);
2602 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2603 fprintf (file
, "\tadd %s, %s\n", scratch
, _this
);
2606 fputs ("\tjmp ", file
);
2607 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
2611 /* Return true if mn10300_output_mi_thunk would be able to output the
2612 assembler code for the thunk function specified by the arguments
2613 it is passed, and false otherwise. */
2616 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED
,
2617 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
2618 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
2619 const_tree function ATTRIBUTE_UNUSED
)
2625 mn10300_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2627 if (REGNO_REG_CLASS (regno
) == FP_REGS
2628 || REGNO_REG_CLASS (regno
) == FP_ACC_REGS
)
2629 /* Do not store integer values in FP registers. */
2630 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& ((regno
& 1) == 0);
2632 if (! TARGET_AM33
&& REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2635 if (((regno
) & 1) == 0 || GET_MODE_SIZE (mode
) == 4)
2638 if (REGNO_REG_CLASS (regno
) == DATA_REGS
2639 || (TARGET_AM33
&& REGNO_REG_CLASS (regno
) == ADDRESS_REGS
)
2640 || REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2641 return GET_MODE_SIZE (mode
) <= 4;
2647 mn10300_modes_tieable (machine_mode mode1
, machine_mode mode2
)
2649 if (GET_MODE_CLASS (mode1
) == MODE_FLOAT
2650 && GET_MODE_CLASS (mode2
) != MODE_FLOAT
)
2653 if (GET_MODE_CLASS (mode2
) == MODE_FLOAT
2654 && GET_MODE_CLASS (mode1
) != MODE_FLOAT
)
2659 || (GET_MODE_SIZE (mode1
) <= 4 && GET_MODE_SIZE (mode2
) <= 4))
2666 cc_flags_for_mode (machine_mode mode
)
2671 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
| CC_FLAG_V
;
2673 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
;
2675 return CC_FLAG_Z
| CC_FLAG_N
;
2684 cc_flags_for_code (enum rtx_code code
)
2697 case GT
: /* ~(Z|(N^V)) */
2698 case LE
: /* Z|(N^V) */
2699 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_V
;
2705 case GTU
: /* ~(C | Z) */
2706 case LEU
: /* C | Z */
2707 return CC_FLAG_Z
| CC_FLAG_C
;
2725 mn10300_select_cc_mode (enum rtx_code code
, rtx x
, rtx y ATTRIBUTE_UNUSED
)
2729 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2730 return CC_FLOATmode
;
2732 req
= cc_flags_for_code (code
);
2734 if (req
& CC_FLAG_V
)
2736 if (req
& CC_FLAG_C
)
2742 set_is_load_p (rtx set
)
2744 return MEM_P (SET_SRC (set
));
2748 set_is_store_p (rtx set
)
2750 return MEM_P (SET_DEST (set
));
2753 /* Update scheduling costs for situations that cannot be
2754 described using the attributes and DFA machinery.
2755 DEP is the insn being scheduled.
2756 INSN is the previous insn.
2757 COST is the current cycle cost for DEP. */
2760 mn10300_adjust_sched_cost (rtx_insn
*insn
, rtx link
, rtx_insn
*dep
, int cost
)
2769 /* We are only interested in pairs of SET. */
2770 insn_set
= single_set (insn
);
2774 dep_set
= single_set (dep
);
2778 /* For the AM34 a load instruction that follows a
2779 store instruction incurs an extra cycle of delay. */
2780 if (mn10300_tune_cpu
== PROCESSOR_AM34
2781 && set_is_load_p (dep_set
)
2782 && set_is_store_p (insn_set
))
2785 /* For the AM34 a non-store, non-branch FPU insn that follows
2786 another FPU insn incurs a one cycle throughput increase. */
2787 else if (mn10300_tune_cpu
== PROCESSOR_AM34
2788 && ! set_is_store_p (insn_set
)
2790 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) == MODE_FLOAT
2791 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) == MODE_FLOAT
)
2794 /* Resolve the conflict described in section 1-7-4 of
2795 Chapter 3 of the MN103E Series Instruction Manual
2798 "When the preceding instruction is a CPU load or
2799 store instruction, a following FPU instruction
2800 cannot be executed until the CPU completes the
2801 latency period even though there are no register
2802 or flag dependencies between them." */
2804 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2805 if (! TARGET_AM33_2
)
2808 /* If a data dependence already exists then the cost is correct. */
2809 if (REG_NOTE_KIND (link
) == 0)
2812 /* Check that the instruction about to scheduled is an FPU instruction. */
2813 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) != MODE_FLOAT
)
2816 /* Now check to see if the previous instruction is a load or store. */
2817 if (! set_is_load_p (insn_set
) && ! set_is_store_p (insn_set
))
2820 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2821 only applies when an INTEGER load/store precedes an FPU
2822 instruction, but is this true ? For now we assume that it is. */
2823 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) != MODE_INT
)
2826 /* Extract the latency value from the timings attribute. */
2827 timings
= get_attr_timings (insn
);
2828 return timings
< 100 ? (timings
% 10) : (timings
% 100);
2832 mn10300_conditional_register_usage (void)
2838 for (i
= FIRST_EXTENDED_REGNUM
;
2839 i
<= LAST_EXTENDED_REGNUM
; i
++)
2840 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2844 for (i
= FIRST_FP_REGNUM
;
2845 i
<= LAST_FP_REGNUM
; i
++)
2846 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2849 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] =
2850 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
2853 /* Worker function for TARGET_MD_ASM_ADJUST.
2854 We do this in the mn10300 backend to maintain source compatibility
2855 with the old cc0-based compiler. */
2858 mn10300_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
2859 vec
<const char *> &/*constraints*/,
2860 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
2862 clobbers
.safe_push (gen_rtx_REG (CCmode
, CC_REG
));
2863 SET_HARD_REG_BIT (clobbered_regs
, CC_REG
);
2867 /* A helper function for splitting cbranch patterns after reload. */
2870 mn10300_split_cbranch (machine_mode cmp_mode
, rtx cmp_op
, rtx label_ref
)
2874 flags
= gen_rtx_REG (cmp_mode
, CC_REG
);
2875 x
= gen_rtx_COMPARE (cmp_mode
, XEXP (cmp_op
, 0), XEXP (cmp_op
, 1));
2876 x
= gen_rtx_SET (flags
, x
);
2879 x
= gen_rtx_fmt_ee (GET_CODE (cmp_op
), VOIDmode
, flags
, const0_rtx
);
2880 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
, label_ref
, pc_rtx
);
2881 x
= gen_rtx_SET (pc_rtx
, x
);
2885 /* A helper function for matching parallels that set the flags. */
2888 mn10300_match_ccmode (rtx insn
, machine_mode cc_mode
)
2891 machine_mode flags_mode
;
2893 gcc_checking_assert (XVECLEN (PATTERN (insn
), 0) == 2);
2895 op1
= XVECEXP (PATTERN (insn
), 0, 1);
2896 gcc_checking_assert (GET_CODE (SET_SRC (op1
)) == COMPARE
);
2898 flags
= SET_DEST (op1
);
2899 flags_mode
= GET_MODE (flags
);
2901 if (GET_MODE (SET_SRC (op1
)) != flags_mode
)
2903 if (GET_MODE_CLASS (flags_mode
) != MODE_CC
)
2906 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2907 if (cc_flags_for_mode (flags_mode
) & ~cc_flags_for_mode (cc_mode
))
2913 /* This function is used to help split:
2915 (set (reg) (and (reg) (int)))
2919 (set (reg) (shift (reg) (int))
2920 (set (reg) (shift (reg) (int))
2922 where the shitfs will be shorter than the "and" insn.
2924 It returns the number of bits that should be shifted. A positive
2925 values means that the low bits are to be cleared (and hence the
2926 shifts should be right followed by left) whereas a negative value
2927 means that the high bits are to be cleared (left followed by right).
2928 Zero is returned when it would not be economical to split the AND. */
2931 mn10300_split_and_operand_count (rtx op
)
2933 HOST_WIDE_INT val
= INTVAL (op
);
2938 /* High bit is set, look for bits clear at the bottom. */
2939 count
= exact_log2 (-val
);
2942 /* This is only size win if we can use the asl2 insn. Otherwise we
2943 would be replacing 1 6-byte insn with 2 3-byte insns. */
2944 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2950 /* High bit is clear, look for bits set at the bottom. */
2951 count
= exact_log2 (val
+ 1);
2953 /* Again, this is only a size win with asl2. */
2954 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2963 enum attr_liw_op op
;
2968 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2969 extract the operands and LIW attributes from the insn and use them to fill
2970 in the liw_data structure. Return true upon success or false if the insn
2971 cannot be bundled. */
2974 extract_bundle (rtx_insn
*insn
, struct liw_data
* pdata
)
2976 bool allow_consts
= true;
2979 gcc_assert (pdata
!= NULL
);
2983 /* Make sure that we are dealing with a simple SET insn. */
2984 p
= single_set (insn
);
2988 /* Make sure that it could go into one of the LIW pipelines. */
2989 pdata
->slot
= get_attr_liw (insn
);
2990 if (pdata
->slot
== LIW_BOTH
)
2993 pdata
->op
= get_attr_liw_op (insn
);
2998 pdata
->dest
= SET_DEST (p
);
2999 pdata
->src
= SET_SRC (p
);
3002 pdata
->dest
= XEXP (SET_SRC (p
), 0);
3003 pdata
->src
= XEXP (SET_SRC (p
), 1);
3010 /* The AND, OR and XOR long instruction words only accept register arguments. */
3011 allow_consts
= false;
3014 pdata
->dest
= SET_DEST (p
);
3015 pdata
->src
= XEXP (SET_SRC (p
), 1);
3019 if (! REG_P (pdata
->dest
))
3022 if (REG_P (pdata
->src
))
3025 return allow_consts
&& satisfies_constraint_O (pdata
->src
);
3028 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3029 the instructions with the assumption that LIW1 would be executed before LIW2
3030 so we must check for overlaps between their sources and destinations. */
3033 check_liw_constraints (struct liw_data
* pliw1
, struct liw_data
* pliw2
)
3035 /* Check for slot conflicts. */
3036 if (pliw2
->slot
== pliw1
->slot
&& pliw1
->slot
!= LIW_EITHER
)
3039 /* If either operation is a compare, then "dest" is really an input; the real
3040 destination is CC_REG. So these instructions need different checks. */
3042 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3043 check its values prior to any changes made by OP. */
3044 if (pliw1
->op
== LIW_OP_CMP
)
3046 /* Two sequential comparisons means dead code, which ought to
3047 have been eliminated given that bundling only happens with
3048 optimization. We cannot bundle them in any case. */
3049 gcc_assert (pliw1
->op
!= pliw2
->op
);
3053 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3054 is the destination of OP, as the CMP will look at the old value, not the new
3056 if (pliw2
->op
== LIW_OP_CMP
)
3058 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3061 if (REG_P (pliw2
->src
))
3062 return REGNO (pliw2
->src
) != REGNO (pliw1
->dest
);
3067 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3068 same destination register. */
3069 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3072 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3073 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3074 we can replace the source in OP2 with the source of OP1. */
3075 if (REG_P (pliw2
->src
) && REGNO (pliw2
->src
) == REGNO (pliw1
->dest
))
3077 if (pliw1
->op
== LIW_OP_MOV
&& REG_P (pliw1
->src
))
3079 if (! REG_P (pliw1
->src
)
3080 && (pliw2
->op
== LIW_OP_AND
3081 || pliw2
->op
== LIW_OP_OR
3082 || pliw2
->op
== LIW_OP_XOR
))
3085 pliw2
->src
= pliw1
->src
;
3091 /* Everything else is OK. */
3095 /* Combine pairs of insns into LIW bundles. */
3098 mn10300_bundle_liw (void)
3102 for (r
= get_insns (); r
!= NULL
; r
= next_nonnote_nondebug_insn (r
))
3104 rtx_insn
*insn1
, *insn2
;
3105 struct liw_data liw1
, liw2
;
3108 if (! extract_bundle (insn1
, & liw1
))
3111 insn2
= next_nonnote_nondebug_insn (insn1
);
3112 if (! extract_bundle (insn2
, & liw2
))
3115 /* Check for source/destination overlap. */
3116 if (! check_liw_constraints (& liw1
, & liw2
))
3119 if (liw1
.slot
== LIW_OP2
|| liw2
.slot
== LIW_OP1
)
3121 struct liw_data temp
;
3128 delete_insn (insn2
);
3131 if (liw1
.op
== LIW_OP_CMP
)
3132 insn2_pat
= gen_cmp_liw (liw2
.dest
, liw2
.src
, liw1
.dest
, liw1
.src
,
3134 else if (liw2
.op
== LIW_OP_CMP
)
3135 insn2_pat
= gen_liw_cmp (liw1
.dest
, liw1
.src
, liw2
.dest
, liw2
.src
,
3138 insn2_pat
= gen_liw (liw1
.dest
, liw2
.dest
, liw1
.src
, liw2
.src
,
3139 GEN_INT (liw1
.op
), GEN_INT (liw2
.op
));
3141 insn2
= emit_insn_after (insn2_pat
, insn1
);
3142 delete_insn (insn1
);
3147 #define DUMP(reason, insn) \
3152 fprintf (dump_file, reason "\n"); \
3153 if (insn != NULL_RTX) \
3154 print_rtl_single (dump_file, insn); \
3155 fprintf(dump_file, "\n"); \
3160 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3161 Insert a SETLB insn just before LABEL. */
3164 mn10300_insert_setlb_lcc (rtx label
, rtx branch
)
3166 rtx lcc
, comparison
, cmp_reg
;
3168 if (LABEL_NUSES (label
) > 1)
3172 /* This label is used both as an entry point to the loop
3173 and as a loop-back point for the loop. We need to separate
3174 these two functions so that the SETLB happens upon entry,
3175 but the loop-back does not go to the SETLB instruction. */
3176 DUMP ("Inserting SETLB insn after:", label
);
3177 insn
= emit_insn_after (gen_setlb (), label
);
3178 label
= gen_label_rtx ();
3179 emit_label_after (label
, insn
);
3180 DUMP ("Created new loop-back label:", label
);
3184 DUMP ("Inserting SETLB insn before:", label
);
3185 emit_insn_before (gen_setlb (), label
);
3188 comparison
= XEXP (SET_SRC (PATTERN (branch
)), 0);
3189 cmp_reg
= XEXP (comparison
, 0);
3190 gcc_assert (REG_P (cmp_reg
));
3192 /* If the comparison has not already been split out of the branch
3194 gcc_assert (REGNO (cmp_reg
) == CC_REG
);
3196 if (GET_MODE (cmp_reg
) == CC_FLOATmode
)
3197 lcc
= gen_FLcc (comparison
, label
);
3199 lcc
= gen_Lcc (comparison
, label
);
3201 rtx_insn
*jump
= emit_jump_insn_before (lcc
, branch
);
3202 mark_jump_label (XVECEXP (lcc
, 0, 0), jump
, 0);
3203 JUMP_LABEL (jump
) = label
;
3204 DUMP ("Replacing branch insn...", branch
);
3205 DUMP ("... with Lcc insn:", jump
);
3206 delete_insn (branch
);
3210 mn10300_block_contains_call (basic_block block
)
3214 FOR_BB_INSNS (block
, insn
)
3222 mn10300_loop_contains_call_insn (loop_p loop
)
3225 bool result
= false;
3228 bbs
= get_loop_body (loop
);
3230 for (i
= 0; i
< loop
->num_nodes
; i
++)
3231 if (mn10300_block_contains_call (bbs
[i
]))
3242 mn10300_scan_for_setlb_lcc (void)
3246 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX
);
3249 compute_bb_for_insn ();
3251 /* Find the loops. */
3252 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
3254 /* FIXME: For now we only investigate innermost loops. In practice however
3255 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3256 be the case that its parent loop is suitable. Thus we should check all
3257 loops, but work from the innermost outwards. */
3258 FOR_EACH_LOOP (loop
, LI_ONLY_INNERMOST
)
3260 const char * reason
= NULL
;
3262 /* Check to see if we can modify this loop. If we cannot
3263 then set 'reason' to describe why it could not be done. */
3264 if (loop
->latch
== NULL
)
3265 reason
= "it contains multiple latches";
3266 else if (loop
->header
!= loop
->latch
)
3267 /* FIXME: We could handle loops that span multiple blocks,
3268 but this requires a lot more work tracking down the branches
3269 that need altering, so for now keep things simple. */
3270 reason
= "the loop spans multiple blocks";
3271 else if (mn10300_loop_contains_call_insn (loop
))
3272 reason
= "it contains CALL insns";
3275 rtx_insn
*branch
= BB_END (loop
->latch
);
3277 gcc_assert (JUMP_P (branch
));
3278 if (single_set (branch
) == NULL_RTX
|| ! any_condjump_p (branch
))
3279 /* We cannot optimize tablejumps and the like. */
3280 /* FIXME: We could handle unconditional jumps. */
3281 reason
= "it is not a simple loop";
3287 flow_loop_dump (loop
, dump_file
, NULL
, 0);
3289 label
= BB_HEAD (loop
->header
);
3290 gcc_assert (LABEL_P (label
));
3292 mn10300_insert_setlb_lcc (label
, branch
);
3296 if (dump_file
&& reason
!= NULL
)
3297 fprintf (dump_file
, "Loop starting with insn %d is not suitable because %s\n",
3298 INSN_UID (BB_HEAD (loop
->header
)),
3302 loop_optimizer_finalize ();
3304 df_finish_pass (false);
3306 DUMP ("SETLB scan complete", NULL_RTX
);
3310 mn10300_reorg (void)
3312 /* These are optimizations, so only run them if optimizing. */
3313 if (TARGET_AM33
&& (optimize
> 0 || optimize_size
))
3315 if (TARGET_ALLOW_SETLB
)
3316 mn10300_scan_for_setlb_lcc ();
3318 if (TARGET_ALLOW_LIW
)
3319 mn10300_bundle_liw ();
3323 /* Initialize the GCC target structure. */
3325 #undef TARGET_MACHINE_DEPENDENT_REORG
3326 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3328 #undef TARGET_ASM_ALIGNED_HI_OP
3329 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3331 #undef TARGET_LEGITIMIZE_ADDRESS
3332 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3334 #undef TARGET_ADDRESS_COST
3335 #define TARGET_ADDRESS_COST mn10300_address_cost
3336 #undef TARGET_REGISTER_MOVE_COST
3337 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3338 #undef TARGET_MEMORY_MOVE_COST
3339 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3340 #undef TARGET_RTX_COSTS
3341 #define TARGET_RTX_COSTS mn10300_rtx_costs
3343 #undef TARGET_ASM_FILE_START
3344 #define TARGET_ASM_FILE_START mn10300_file_start
3345 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3346 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3348 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3349 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3351 #undef TARGET_OPTION_OVERRIDE
3352 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3354 #undef TARGET_ENCODE_SECTION_INFO
3355 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3357 #undef TARGET_PROMOTE_PROTOTYPES
3358 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3359 #undef TARGET_RETURN_IN_MEMORY
3360 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3361 #undef TARGET_PASS_BY_REFERENCE
3362 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3363 #undef TARGET_CALLEE_COPIES
3364 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3365 #undef TARGET_ARG_PARTIAL_BYTES
3366 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3367 #undef TARGET_FUNCTION_ARG
3368 #define TARGET_FUNCTION_ARG mn10300_function_arg
3369 #undef TARGET_FUNCTION_ARG_ADVANCE
3370 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3372 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3373 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3374 #undef TARGET_EXPAND_BUILTIN_VA_START
3375 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3377 #undef TARGET_CASE_VALUES_THRESHOLD
3378 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3380 #undef TARGET_LEGITIMATE_ADDRESS_P
3381 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3382 #undef TARGET_DELEGITIMIZE_ADDRESS
3383 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3384 #undef TARGET_LEGITIMATE_CONSTANT_P
3385 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3387 #undef TARGET_PREFERRED_RELOAD_CLASS
3388 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3389 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3390 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3391 mn10300_preferred_output_reload_class
3392 #undef TARGET_SECONDARY_RELOAD
3393 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3395 #undef TARGET_TRAMPOLINE_INIT
3396 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3398 #undef TARGET_FUNCTION_VALUE
3399 #define TARGET_FUNCTION_VALUE mn10300_function_value
3400 #undef TARGET_LIBCALL_VALUE
3401 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3403 #undef TARGET_ASM_OUTPUT_MI_THUNK
3404 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3405 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3406 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3408 #undef TARGET_SCHED_ADJUST_COST
3409 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3411 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3412 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3414 #undef TARGET_MD_ASM_ADJUST
3415 #define TARGET_MD_ASM_ADJUST mn10300_md_asm_adjust
3417 #undef TARGET_FLAGS_REGNUM
3418 #define TARGET_FLAGS_REGNUM CC_REG
3420 struct gcc_target targetm
= TARGET_INITIALIZER
;