1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Jeff Law (law@cygnus.com).
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
33 #include "insn-attr.h"
41 #include "diagnostic-core.h"
43 #include "tm-constrs.h"
45 #include "target-def.h"
50 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
51 names are not prefixed by underscores, to tell whether to prefix a
52 label with a plus sign or not, so that the assembler can tell
53 symbol names from register names. */
54 int mn10300_protect_label
;
56 /* Selected processor type for tuning. */
57 enum processor_type mn10300_tune_cpu
= PROCESSOR_DEFAULT
;
59 /* The size of the callee register save area. Right now we save everything
60 on entry since it costs us nothing in code size. It does cost us from a
61 speed standpoint, so we want to optimize this sooner or later. */
62 #define REG_SAVE_BYTES (4 * df_regs_ever_live_p (2) \
63 + 4 * df_regs_ever_live_p (3) \
64 + 4 * df_regs_ever_live_p (6) \
65 + 4 * df_regs_ever_live_p (7) \
66 + 16 * (df_regs_ever_live_p (14) \
67 || df_regs_ever_live_p (15) \
68 || df_regs_ever_live_p (16) \
69 || df_regs_ever_live_p (17)))
76 static int cc_flags_for_mode(enum machine_mode
);
77 static int cc_flags_for_code(enum rtx_code
);
79 /* Implement TARGET_OPTION_OVERRIDE. */
82 mn10300_option_override (void)
85 target_flags
&= ~MASK_MULT_BUG
;
88 /* Disable scheduling for the MN10300 as we do
89 not have timing information available for it. */
90 flag_schedule_insns
= 0;
91 flag_schedule_insns_after_reload
= 0;
93 /* Force enable splitting of wide types, as otherwise it is trivial
94 to run out of registers. Indeed, this works so well that register
95 allocation problems are now more common *without* optimization,
96 when this flag is not enabled by default. */
97 flag_split_wide_types
= 1;
100 if (mn10300_tune_string
)
102 if (strcasecmp (mn10300_tune_string
, "mn10300") == 0)
103 mn10300_tune_cpu
= PROCESSOR_MN10300
;
104 else if (strcasecmp (mn10300_tune_string
, "am33") == 0)
105 mn10300_tune_cpu
= PROCESSOR_AM33
;
106 else if (strcasecmp (mn10300_tune_string
, "am33-2") == 0)
107 mn10300_tune_cpu
= PROCESSOR_AM33_2
;
108 else if (strcasecmp (mn10300_tune_string
, "am34") == 0)
109 mn10300_tune_cpu
= PROCESSOR_AM34
;
111 error ("-mtune= expects mn10300, am33, am33-2, or am34");
116 mn10300_file_start (void)
118 default_file_start ();
121 fprintf (asm_out_file
, "\t.am33_2\n");
122 else if (TARGET_AM33
)
123 fprintf (asm_out_file
, "\t.am33\n");
126 /* Note: This list must match the liw_op attribute in mn10300.md. */
128 static const char *liw_op_names
[] =
130 "add", "cmp", "sub", "mov",
136 /* Print operand X using operand code CODE to assembly language output file
140 mn10300_print_operand (FILE *file
, rtx x
, int code
)
146 unsigned int liw_op
= UINTVAL (x
);
148 gcc_assert (TARGET_ALLOW_LIW
);
149 gcc_assert (liw_op
< LIW_OP_MAX
);
150 fputs (liw_op_names
[liw_op
], file
);
157 enum rtx_code cmp
= GET_CODE (x
);
158 enum machine_mode mode
= GET_MODE (XEXP (x
, 0));
163 cmp
= reverse_condition (cmp
);
164 have_flags
= cc_flags_for_mode (mode
);
175 /* bge is smaller than bnc. */
176 str
= (have_flags
& CC_FLAG_V
? "ge" : "nc");
179 str
= (have_flags
& CC_FLAG_V
? "lt" : "ns");
227 gcc_checking_assert ((cc_flags_for_code (cmp
) & ~have_flags
) == 0);
233 /* This is used for the operand to a call instruction;
234 if it's a REG, enclose it in parens, else output
235 the operand normally. */
239 mn10300_print_operand (file
, x
, 0);
243 mn10300_print_operand (file
, x
, 0);
247 switch (GET_CODE (x
))
251 output_address (XEXP (x
, 0));
256 fprintf (file
, "fd%d", REGNO (x
) - 18);
264 /* These are the least significant word in a 64bit value. */
266 switch (GET_CODE (x
))
270 output_address (XEXP (x
, 0));
275 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
279 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
287 switch (GET_MODE (x
))
290 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
291 REAL_VALUE_TO_TARGET_DOUBLE (rv
, val
);
292 fprintf (file
, "0x%lx", val
[0]);
295 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
296 REAL_VALUE_TO_TARGET_SINGLE (rv
, val
[0]);
297 fprintf (file
, "0x%lx", val
[0]);
301 mn10300_print_operand_address (file
,
302 GEN_INT (CONST_DOUBLE_LOW (x
)));
313 split_double (x
, &low
, &high
);
314 fprintf (file
, "%ld", (long)INTVAL (low
));
323 /* Similarly, but for the most significant word. */
325 switch (GET_CODE (x
))
329 x
= adjust_address (x
, SImode
, 4);
330 output_address (XEXP (x
, 0));
335 fprintf (file
, "%s", reg_names
[REGNO (x
) + 1]);
339 fprintf (file
, "%s", reg_names
[subreg_regno (x
) + 1]);
347 switch (GET_MODE (x
))
350 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
351 REAL_VALUE_TO_TARGET_DOUBLE (rv
, val
);
352 fprintf (file
, "0x%lx", val
[1]);
358 mn10300_print_operand_address (file
,
359 GEN_INT (CONST_DOUBLE_HIGH (x
)));
370 split_double (x
, &low
, &high
);
371 fprintf (file
, "%ld", (long)INTVAL (high
));
382 if (REG_P (XEXP (x
, 0)))
383 output_address (gen_rtx_PLUS (SImode
, XEXP (x
, 0), const0_rtx
));
385 output_address (XEXP (x
, 0));
390 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
391 fprintf (file
, "%d", (int)((~INTVAL (x
)) & 0xff));
395 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
396 fprintf (file
, "%d", (int)(INTVAL (x
) & 0xff));
399 /* For shift counts. The hardware ignores the upper bits of
400 any immediate, but the assembler will flag an out of range
401 shift count as an error. So we mask off the high bits
402 of the immediate here. */
406 fprintf (file
, "%d", (int)(INTVAL (x
) & 0x1f));
412 switch (GET_CODE (x
))
416 output_address (XEXP (x
, 0));
425 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
429 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
432 /* This will only be single precision.... */
438 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
439 REAL_VALUE_TO_TARGET_SINGLE (rv
, val
);
440 fprintf (file
, "0x%lx", val
);
450 mn10300_print_operand_address (file
, x
);
459 /* Output assembly language output for the address ADDR to FILE. */
462 mn10300_print_operand_address (FILE *file
, rtx addr
)
464 switch (GET_CODE (addr
))
467 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
472 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
475 mn10300_print_operand (file
, XEXP (addr
, 1), 0);
479 mn10300_print_operand (file
, addr
, 0);
483 rtx base
= XEXP (addr
, 0);
484 rtx index
= XEXP (addr
, 1);
486 if (REG_P (index
) && !REG_OK_FOR_INDEX_P (index
))
492 gcc_assert (REG_P (index
) && REG_OK_FOR_INDEX_P (index
));
494 gcc_assert (REG_OK_FOR_BASE_P (base
));
496 mn10300_print_operand (file
, index
, 0);
498 mn10300_print_operand (file
, base
, 0);
502 output_addr_const (file
, addr
);
505 output_addr_const (file
, addr
);
510 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
512 Used for PIC-specific UNSPECs. */
515 mn10300_asm_output_addr_const_extra (FILE *file
, rtx x
)
517 if (GET_CODE (x
) == UNSPEC
)
522 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
523 output_addr_const (file
, XVECEXP (x
, 0, 0));
526 output_addr_const (file
, XVECEXP (x
, 0, 0));
527 fputs ("@GOT", file
);
530 output_addr_const (file
, XVECEXP (x
, 0, 0));
531 fputs ("@GOTOFF", file
);
534 output_addr_const (file
, XVECEXP (x
, 0, 0));
535 fputs ("@PLT", file
);
537 case UNSPEC_GOTSYM_OFF
:
538 assemble_name (file
, GOT_SYMBOL_NAME
);
540 output_addr_const (file
, XVECEXP (x
, 0, 0));
552 /* Count the number of FP registers that have to be saved. */
554 fp_regs_to_save (void)
561 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
562 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
568 /* Print a set of registers in the format required by "movm" and "ret".
569 Register K is saved if bit K of MASK is set. The data and address
570 registers can be stored individually, but the extended registers cannot.
571 We assume that the mask already takes that into account. For instance,
572 bits 14 to 17 must have the same value. */
575 mn10300_print_reg_list (FILE *file
, int mask
)
583 for (i
= 0; i
< FIRST_EXTENDED_REGNUM
; i
++)
584 if ((mask
& (1 << i
)) != 0)
588 fputs (reg_names
[i
], file
);
592 if ((mask
& 0x3c000) != 0)
594 gcc_assert ((mask
& 0x3c000) == 0x3c000);
597 fputs ("exreg1", file
);
604 /* If the MDR register is never clobbered, we can use the RETF instruction
605 which takes the address from the MDR register. This is 3 cycles faster
606 than having to load the address from the stack. */
609 mn10300_can_use_retf_insn (void)
611 /* Don't bother if we're not optimizing. In this case we won't
612 have proper access to df_regs_ever_live_p. */
616 /* EH returns alter the saved return address; MDR is not current. */
617 if (crtl
->calls_eh_return
)
620 /* Obviously not if MDR is ever clobbered. */
621 if (df_regs_ever_live_p (MDR_REG
))
624 /* ??? Careful not to use this during expand_epilogue etc. */
625 gcc_assert (!in_sequence_p ());
626 return leaf_function_p ();
630 mn10300_can_use_rets_insn (void)
632 return !mn10300_initial_offset (ARG_POINTER_REGNUM
, STACK_POINTER_REGNUM
);
635 /* Returns the set of live, callee-saved registers as a bitmask. The
636 callee-saved extended registers cannot be stored individually, so
637 all of them will be included in the mask if any one of them is used. */
640 mn10300_get_live_callee_saved_regs (void)
646 for (i
= 0; i
<= LAST_EXTENDED_REGNUM
; i
++)
647 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
649 if ((mask
& 0x3c000) != 0)
658 RTX_FRAME_RELATED_P (r
) = 1;
662 /* Generate an instruction that pushes several registers onto the stack.
663 Register K will be saved if bit K in MASK is set. The function does
664 nothing if MASK is zero.
666 To be compatible with the "movm" instruction, the lowest-numbered
667 register must be stored in the lowest slot. If MASK is the set
668 { R1,...,RN }, where R1...RN are ordered least first, the generated
669 instruction will have the form:
672 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
673 (set (mem:SI (plus:SI (reg:SI 9)
677 (set (mem:SI (plus:SI (reg:SI 9)
682 mn10300_gen_multiple_store (unsigned int mask
)
684 /* The order in which registers are stored, from SP-4 through SP-N*4. */
685 static const unsigned int store_order
[8] = {
686 /* e2, e3: never saved */
687 FIRST_EXTENDED_REGNUM
+ 4,
688 FIRST_EXTENDED_REGNUM
+ 5,
689 FIRST_EXTENDED_REGNUM
+ 6,
690 FIRST_EXTENDED_REGNUM
+ 7,
691 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
692 FIRST_DATA_REGNUM
+ 2,
693 FIRST_DATA_REGNUM
+ 3,
694 FIRST_ADDRESS_REGNUM
+ 2,
695 FIRST_ADDRESS_REGNUM
+ 3,
696 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
706 for (i
= count
= 0; i
< ARRAY_SIZE(store_order
); ++i
)
708 unsigned regno
= store_order
[i
];
710 if (((mask
>> regno
) & 1) == 0)
714 x
= plus_constant (stack_pointer_rtx
, count
* -4);
715 x
= gen_frame_mem (SImode
, x
);
716 x
= gen_rtx_SET (VOIDmode
, x
, gen_rtx_REG (SImode
, regno
));
719 /* Remove the register from the mask so that... */
720 mask
&= ~(1u << regno
);
723 /* ... we can make sure that we didn't try to use a register
724 not listed in the store order. */
725 gcc_assert (mask
== 0);
727 /* Create the instruction that updates the stack pointer. */
728 x
= plus_constant (stack_pointer_rtx
, count
* -4);
729 x
= gen_rtx_SET (VOIDmode
, stack_pointer_rtx
, x
);
732 /* We need one PARALLEL element to update the stack pointer and
733 an additional element for each register that is stored. */
734 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (count
+ 1, elts
));
739 mn10300_expand_prologue (void)
741 HOST_WIDE_INT size
= mn10300_frame_size ();
743 /* If we use any of the callee-saved registers, save them now. */
744 mn10300_gen_multiple_store (mn10300_get_live_callee_saved_regs ());
746 if (TARGET_AM33_2
&& fp_regs_to_save ())
748 int num_regs_to_save
= fp_regs_to_save (), i
;
754 save_sp_partial_merge
,
758 unsigned int strategy_size
= (unsigned)-1, this_strategy_size
;
761 /* We have several different strategies to save FP registers.
762 We can store them using SP offsets, which is beneficial if
763 there are just a few registers to save, or we can use `a0' in
764 post-increment mode (`a0' is the only call-clobbered address
765 register that is never used to pass information to a
766 function). Furthermore, if we don't need a frame pointer, we
767 can merge the two SP adds into a single one, but this isn't
768 always beneficial; sometimes we can just split the two adds
769 so that we don't exceed a 16-bit constant size. The code
770 below will select which strategy to use, so as to generate
771 smallest code. Ties are broken in favor or shorter sequences
772 (in terms of number of instructions). */
774 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
775 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
776 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
777 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
779 /* We add 0 * (S) in two places to promote to the type of S,
780 so that all arms of the conditional have the same type. */
781 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
782 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
783 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
784 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
786 #define SIZE_FMOV_SP_(S,N) \
787 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
788 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
789 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
790 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
792 /* Consider alternative save_sp_merge only if we don't need the
793 frame pointer and size is nonzero. */
794 if (! frame_pointer_needed
&& size
)
796 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
797 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
798 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
799 this_strategy_size
+= SIZE_FMOV_SP (size
, num_regs_to_save
);
801 if (this_strategy_size
< strategy_size
)
803 strategy
= save_sp_merge
;
804 strategy_size
= this_strategy_size
;
808 /* Consider alternative save_sp_no_merge unconditionally. */
809 /* Insn: add -4 * num_regs_to_save, sp. */
810 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
811 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
812 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
815 /* Insn: add -size, sp. */
816 this_strategy_size
+= SIZE_ADD_SP (-size
);
819 if (this_strategy_size
< strategy_size
)
821 strategy
= save_sp_no_merge
;
822 strategy_size
= this_strategy_size
;
825 /* Consider alternative save_sp_partial_merge only if we don't
826 need a frame pointer and size is reasonably large. */
827 if (! frame_pointer_needed
&& size
+ 4 * num_regs_to_save
> 128)
829 /* Insn: add -128, sp. */
830 this_strategy_size
= SIZE_ADD_SP (-128);
831 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
832 this_strategy_size
+= SIZE_FMOV_SP (128 - 4 * num_regs_to_save
,
836 /* Insn: add 128-size, sp. */
837 this_strategy_size
+= SIZE_ADD_SP (128 - size
);
840 if (this_strategy_size
< strategy_size
)
842 strategy
= save_sp_partial_merge
;
843 strategy_size
= this_strategy_size
;
847 /* Consider alternative save_a0_merge only if we don't need a
848 frame pointer, size is nonzero and the user hasn't
849 changed the calling conventions of a0. */
850 if (! frame_pointer_needed
&& size
851 && call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
852 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
854 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
855 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
856 /* Insn: mov sp, a0. */
857 this_strategy_size
++;
860 /* Insn: add size, a0. */
861 this_strategy_size
+= SIZE_ADD_AX (size
);
863 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
864 this_strategy_size
+= 3 * num_regs_to_save
;
866 if (this_strategy_size
< strategy_size
)
868 strategy
= save_a0_merge
;
869 strategy_size
= this_strategy_size
;
873 /* Consider alternative save_a0_no_merge if the user hasn't
874 changed the calling conventions of a0. */
875 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
876 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
878 /* Insn: add -4 * num_regs_to_save, sp. */
879 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
880 /* Insn: mov sp, a0. */
881 this_strategy_size
++;
882 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
883 this_strategy_size
+= 3 * num_regs_to_save
;
886 /* Insn: add -size, sp. */
887 this_strategy_size
+= SIZE_ADD_SP (-size
);
890 if (this_strategy_size
< strategy_size
)
892 strategy
= save_a0_no_merge
;
893 strategy_size
= this_strategy_size
;
897 /* Emit the initial SP add, common to all strategies. */
900 case save_sp_no_merge
:
901 case save_a0_no_merge
:
902 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
904 GEN_INT (-4 * num_regs_to_save
))));
908 case save_sp_partial_merge
:
909 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
912 xsize
= 128 - 4 * num_regs_to_save
;
918 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
920 GEN_INT (-(size
+ 4 * num_regs_to_save
)))));
921 /* We'll have to adjust FP register saves according to the
924 /* Since we've already created the stack frame, don't do it
925 again at the end of the function. */
933 /* Now prepare register a0, if we have decided to use it. */
937 case save_sp_no_merge
:
938 case save_sp_partial_merge
:
943 case save_a0_no_merge
:
944 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
);
945 F (emit_insn (gen_movsi (reg
, stack_pointer_rtx
)));
947 F (emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (xsize
))));
948 reg
= gen_rtx_POST_INC (SImode
, reg
);
955 /* Now actually save the FP registers. */
956 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
957 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
965 /* If we aren't using `a0', use an SP offset. */
968 addr
= gen_rtx_PLUS (SImode
,
973 addr
= stack_pointer_rtx
;
978 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode
, addr
),
979 gen_rtx_REG (SFmode
, i
))));
983 /* Now put the frame pointer into the frame pointer register. */
984 if (frame_pointer_needed
)
985 F (emit_move_insn (frame_pointer_rtx
, stack_pointer_rtx
));
987 /* Allocate stack for this frame. */
989 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
993 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
994 emit_insn (gen_load_pic ());
998 mn10300_expand_epilogue (void)
1000 HOST_WIDE_INT size
= mn10300_frame_size ();
1001 int reg_save_bytes
= REG_SAVE_BYTES
;
1003 if (TARGET_AM33_2
&& fp_regs_to_save ())
1005 int num_regs_to_save
= fp_regs_to_save (), i
;
1008 /* We have several options to restore FP registers. We could
1009 load them from SP offsets, but, if there are enough FP
1010 registers to restore, we win if we use a post-increment
1013 /* If we have a frame pointer, it's the best option, because we
1014 already know it has the value we want. */
1015 if (frame_pointer_needed
)
1016 reg
= gen_rtx_REG (SImode
, FRAME_POINTER_REGNUM
);
1017 /* Otherwise, we may use `a1', since it's call-clobbered and
1018 it's never used for return values. But only do so if it's
1019 smaller than using SP offsets. */
1022 enum { restore_sp_post_adjust
,
1023 restore_sp_pre_adjust
,
1024 restore_sp_partial_adjust
,
1025 restore_a1
} strategy
;
1026 unsigned int this_strategy_size
, strategy_size
= (unsigned)-1;
1028 /* Consider using sp offsets before adjusting sp. */
1029 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1030 this_strategy_size
= SIZE_FMOV_SP (size
, num_regs_to_save
);
1031 /* If size is too large, we'll have to adjust SP with an
1033 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1035 /* Insn: add size + 4 * num_regs_to_save, sp. */
1036 this_strategy_size
+= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
);
1038 /* If we don't have to restore any non-FP registers,
1039 we'll be able to save one byte by using rets. */
1040 if (! reg_save_bytes
)
1041 this_strategy_size
--;
1043 if (this_strategy_size
< strategy_size
)
1045 strategy
= restore_sp_post_adjust
;
1046 strategy_size
= this_strategy_size
;
1049 /* Consider using sp offsets after adjusting sp. */
1050 /* Insn: add size, sp. */
1051 this_strategy_size
= SIZE_ADD_SP (size
);
1052 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1053 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
1054 /* We're going to use ret to release the FP registers
1055 save area, so, no savings. */
1057 if (this_strategy_size
< strategy_size
)
1059 strategy
= restore_sp_pre_adjust
;
1060 strategy_size
= this_strategy_size
;
1063 /* Consider using sp offsets after partially adjusting sp.
1064 When size is close to 32Kb, we may be able to adjust SP
1065 with an imm16 add instruction while still using fmov
1067 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1069 /* Insn: add size + 4 * num_regs_to_save
1070 + reg_save_bytes - 252,sp. */
1071 this_strategy_size
= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
1072 + reg_save_bytes
- 252);
1073 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1074 this_strategy_size
+= SIZE_FMOV_SP (252 - reg_save_bytes
1075 - 4 * num_regs_to_save
,
1077 /* We're going to use ret to release the FP registers
1078 save area, so, no savings. */
1080 if (this_strategy_size
< strategy_size
)
1082 strategy
= restore_sp_partial_adjust
;
1083 strategy_size
= this_strategy_size
;
1087 /* Consider using a1 in post-increment mode, as long as the
1088 user hasn't changed the calling conventions of a1. */
1089 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
+ 1]
1090 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
+1])
1092 /* Insn: mov sp,a1. */
1093 this_strategy_size
= 1;
1096 /* Insn: add size,a1. */
1097 this_strategy_size
+= SIZE_ADD_AX (size
);
1099 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1100 this_strategy_size
+= 3 * num_regs_to_save
;
1101 /* If size is large enough, we may be able to save a
1103 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1105 /* Insn: mov a1,sp. */
1106 this_strategy_size
+= 2;
1108 /* If we don't have to restore any non-FP registers,
1109 we'll be able to save one byte by using rets. */
1110 if (! reg_save_bytes
)
1111 this_strategy_size
--;
1113 if (this_strategy_size
< strategy_size
)
1115 strategy
= restore_a1
;
1116 strategy_size
= this_strategy_size
;
1122 case restore_sp_post_adjust
:
1125 case restore_sp_pre_adjust
:
1126 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1132 case restore_sp_partial_adjust
:
1133 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1135 GEN_INT (size
+ 4 * num_regs_to_save
1136 + reg_save_bytes
- 252)));
1137 size
= 252 - reg_save_bytes
- 4 * num_regs_to_save
;
1141 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
+ 1);
1142 emit_insn (gen_movsi (reg
, stack_pointer_rtx
));
1144 emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (size
)));
1152 /* Adjust the selected register, if any, for post-increment. */
1154 reg
= gen_rtx_POST_INC (SImode
, reg
);
1156 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
1157 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
1165 /* If we aren't using a post-increment register, use an
1167 addr
= gen_rtx_PLUS (SImode
,
1172 addr
= stack_pointer_rtx
;
1176 emit_insn (gen_movsf (gen_rtx_REG (SFmode
, i
),
1177 gen_rtx_MEM (SFmode
, addr
)));
1180 /* If we were using the restore_a1 strategy and the number of
1181 bytes to be released won't fit in the `ret' byte, copy `a1'
1182 to `sp', to avoid having to use `add' to adjust it. */
1183 if (! frame_pointer_needed
&& reg
&& size
+ reg_save_bytes
> 255)
1185 emit_move_insn (stack_pointer_rtx
, XEXP (reg
, 0));
1190 /* Maybe cut back the stack, except for the register save area.
1192 If the frame pointer exists, then use the frame pointer to
1195 If the stack size + register save area is more than 255 bytes,
1196 then the stack must be cut back here since the size + register
1197 save size is too big for a ret/retf instruction.
1199 Else leave it alone, it will be cut back as part of the
1200 ret/retf instruction, or there wasn't any stack to begin with.
1202 Under no circumstances should the register save area be
1203 deallocated here, that would leave a window where an interrupt
1204 could occur and trash the register save area. */
1205 if (frame_pointer_needed
)
1207 emit_move_insn (stack_pointer_rtx
, frame_pointer_rtx
);
1210 else if (size
+ reg_save_bytes
> 255)
1212 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1218 /* Adjust the stack and restore callee-saved registers, if any. */
1219 if (mn10300_can_use_rets_insn ())
1220 emit_jump_insn (ret_rtx
);
1222 emit_jump_insn (gen_return_ret (GEN_INT (size
+ REG_SAVE_BYTES
)));
1225 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1226 This function is for MATCH_PARALLEL and so assumes OP is known to be
1227 parallel. If OP is a multiple store, return a mask indicating which
1228 registers it saves. Return 0 otherwise. */
1231 mn10300_store_multiple_operation (rtx op
,
1232 enum machine_mode mode ATTRIBUTE_UNUSED
)
1240 count
= XVECLEN (op
, 0);
1244 /* Check that first instruction has the form (set (sp) (plus A B)) */
1245 elt
= XVECEXP (op
, 0, 0);
1246 if (GET_CODE (elt
) != SET
1247 || (! REG_P (SET_DEST (elt
)))
1248 || REGNO (SET_DEST (elt
)) != STACK_POINTER_REGNUM
1249 || GET_CODE (SET_SRC (elt
)) != PLUS
)
1252 /* Check that A is the stack pointer and B is the expected stack size.
1253 For OP to match, each subsequent instruction should push a word onto
1254 the stack. We therefore expect the first instruction to create
1255 COUNT-1 stack slots. */
1256 elt
= SET_SRC (elt
);
1257 if ((! REG_P (XEXP (elt
, 0)))
1258 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1259 || (! CONST_INT_P (XEXP (elt
, 1)))
1260 || INTVAL (XEXP (elt
, 1)) != -(count
- 1) * 4)
1264 for (i
= 1; i
< count
; i
++)
1266 /* Check that element i is a (set (mem M) R). */
1267 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1268 Remember: the ordering is *not* monotonic. */
1269 elt
= XVECEXP (op
, 0, i
);
1270 if (GET_CODE (elt
) != SET
1271 || (! MEM_P (SET_DEST (elt
)))
1272 || (! REG_P (SET_SRC (elt
))))
1275 /* Remember which registers are to be saved. */
1276 last
= REGNO (SET_SRC (elt
));
1277 mask
|= (1 << last
);
1279 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1280 elt
= XEXP (SET_DEST (elt
), 0);
1281 if (GET_CODE (elt
) != PLUS
1282 || (! REG_P (XEXP (elt
, 0)))
1283 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1284 || (! CONST_INT_P (XEXP (elt
, 1)))
1285 || INTVAL (XEXP (elt
, 1)) != -i
* 4)
1289 /* All or none of the callee-saved extended registers must be in the set. */
1290 if ((mask
& 0x3c000) != 0
1291 && (mask
& 0x3c000) != 0x3c000)
1297 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1300 mn10300_preferred_reload_class (rtx x
, reg_class_t rclass
)
1302 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1303 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1306 && !HARD_REGISTER_P (x
))
1307 || (GET_CODE (x
) == SUBREG
1308 && REG_P (SUBREG_REG (x
))
1309 && !HARD_REGISTER_P (SUBREG_REG (x
))))
1310 return LIMIT_RELOAD_CLASS (GET_MODE (x
), rclass
);
1315 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1318 mn10300_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
1320 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1321 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1325 /* Implement TARGET_SECONDARY_RELOAD. */
1328 mn10300_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1329 enum machine_mode mode
, secondary_reload_info
*sri
)
1331 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1332 enum reg_class xclass
= NO_REGS
;
1333 unsigned int xregno
= INVALID_REGNUM
;
1338 if (xregno
>= FIRST_PSEUDO_REGISTER
)
1339 xregno
= true_regnum (x
);
1340 if (xregno
!= INVALID_REGNUM
)
1341 xclass
= REGNO_REG_CLASS (xregno
);
1346 /* Memory load/stores less than a full word wide can't have an
1347 address or stack pointer destination. They must use a data
1348 register as an intermediate register. */
1349 if (rclass
!= DATA_REGS
1350 && (mode
== QImode
|| mode
== HImode
)
1351 && xclass
== NO_REGS
)
1354 /* We can only move SP to/from an address register. */
1356 && rclass
== SP_REGS
1357 && xclass
!= ADDRESS_REGS
)
1358 return ADDRESS_REGS
;
1360 && xclass
== SP_REGS
1361 && rclass
!= ADDRESS_REGS
1362 && rclass
!= SP_OR_ADDRESS_REGS
)
1363 return ADDRESS_REGS
;
1366 /* We can't directly load sp + const_int into a register;
1367 we must use an address register as an scratch. */
1369 && rclass
!= SP_REGS
1370 && rclass
!= SP_OR_ADDRESS_REGS
1371 && rclass
!= SP_OR_GENERAL_REGS
1372 && GET_CODE (x
) == PLUS
1373 && (XEXP (x
, 0) == stack_pointer_rtx
1374 || XEXP (x
, 1) == stack_pointer_rtx
))
1376 sri
->icode
= CODE_FOR_reload_plus_sp_const
;
1380 /* We can only move MDR to/from a data register. */
1381 if (rclass
== MDR_REGS
&& xclass
!= DATA_REGS
)
1383 if (xclass
== MDR_REGS
&& rclass
!= DATA_REGS
)
1386 /* We can't load/store an FP register from a constant address. */
1388 && (rclass
== FP_REGS
|| xclass
== FP_REGS
)
1389 && (xclass
== NO_REGS
|| rclass
== NO_REGS
))
1393 if (xregno
>= FIRST_PSEUDO_REGISTER
&& xregno
!= INVALID_REGNUM
)
1395 addr
= reg_equiv_mem (xregno
);
1397 addr
= XEXP (addr
, 0);
1402 if (addr
&& CONSTANT_ADDRESS_P (addr
))
1403 return GENERAL_REGS
;
1406 /* Otherwise assume no secondary reloads are needed. */
1411 mn10300_frame_size (void)
1413 /* size includes the fixed stack space needed for function calls. */
1414 int size
= get_frame_size () + crtl
->outgoing_args_size
;
1416 /* And space for the return pointer. */
1417 size
+= crtl
->outgoing_args_size
? 4 : 0;
1423 mn10300_initial_offset (int from
, int to
)
1427 gcc_assert (from
== ARG_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
);
1428 gcc_assert (to
== FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
1430 if (to
== STACK_POINTER_REGNUM
)
1431 diff
= mn10300_frame_size ();
1433 /* The difference between the argument pointer and the frame pointer
1434 is the size of the callee register save area. */
1435 if (from
== ARG_POINTER_REGNUM
)
1437 diff
+= REG_SAVE_BYTES
;
1438 diff
+= 4 * fp_regs_to_save ();
1444 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1447 mn10300_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
1449 /* Return values > 8 bytes in length in memory. */
1450 return (int_size_in_bytes (type
) > 8
1451 || int_size_in_bytes (type
) == 0
1452 || TYPE_MODE (type
) == BLKmode
);
1455 /* Flush the argument registers to the stack for a stdarg function;
1456 return the new argument pointer. */
1458 mn10300_builtin_saveregs (void)
1461 tree fntype
= TREE_TYPE (current_function_decl
);
1462 int argadj
= ((!stdarg_p (fntype
))
1463 ? UNITS_PER_WORD
: 0);
1464 alias_set_type set
= get_varargs_alias_set ();
1467 offset
= plus_constant (crtl
->args
.arg_offset_rtx
, argadj
);
1469 offset
= crtl
->args
.arg_offset_rtx
;
1471 mem
= gen_rtx_MEM (SImode
, crtl
->args
.internal_arg_pointer
);
1472 set_mem_alias_set (mem
, set
);
1473 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
1475 mem
= gen_rtx_MEM (SImode
,
1476 plus_constant (crtl
->args
.internal_arg_pointer
, 4));
1477 set_mem_alias_set (mem
, set
);
1478 emit_move_insn (mem
, gen_rtx_REG (SImode
, 1));
1480 return copy_to_reg (expand_binop (Pmode
, add_optab
,
1481 crtl
->args
.internal_arg_pointer
,
1482 offset
, 0, 0, OPTAB_LIB_WIDEN
));
1486 mn10300_va_start (tree valist
, rtx nextarg
)
1488 nextarg
= expand_builtin_saveregs ();
1489 std_expand_builtin_va_start (valist
, nextarg
);
1492 /* Return true when a parameter should be passed by reference. */
1495 mn10300_pass_by_reference (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
1496 enum machine_mode mode
, const_tree type
,
1497 bool named ATTRIBUTE_UNUSED
)
1499 unsigned HOST_WIDE_INT size
;
1502 size
= int_size_in_bytes (type
);
1504 size
= GET_MODE_SIZE (mode
);
1506 return (size
> 8 || size
== 0);
1509 /* Return an RTX to represent where a value with mode MODE will be returned
1510 from a function. If the result is NULL_RTX, the argument is pushed. */
1513 mn10300_function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
1514 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1516 rtx result
= NULL_RTX
;
1519 /* We only support using 2 data registers as argument registers. */
1522 /* Figure out the size of the object to be passed. */
1523 if (mode
== BLKmode
)
1524 size
= int_size_in_bytes (type
);
1526 size
= GET_MODE_SIZE (mode
);
1528 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1530 /* Don't pass this arg via a register if all the argument registers
1532 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1535 /* Don't pass this arg via a register if it would be split between
1536 registers and memory. */
1537 if (type
== NULL_TREE
1538 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1541 switch (cum
->nbytes
/ UNITS_PER_WORD
)
1544 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
);
1547 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
+ 1);
1556 /* Update the data in CUM to advance over an argument
1557 of mode MODE and data type TYPE.
1558 (TYPE is null for libcalls where that information may not be available.) */
1561 mn10300_function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
1562 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1564 cum
->nbytes
+= (mode
!= BLKmode
1565 ? (GET_MODE_SIZE (mode
) + 3) & ~3
1566 : (int_size_in_bytes (type
) + 3) & ~3);
1569 /* Return the number of bytes of registers to use for an argument passed
1570 partially in registers and partially in memory. */
1573 mn10300_arg_partial_bytes (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
1574 tree type
, bool named ATTRIBUTE_UNUSED
)
1578 /* We only support using 2 data registers as argument registers. */
1581 /* Figure out the size of the object to be passed. */
1582 if (mode
== BLKmode
)
1583 size
= int_size_in_bytes (type
);
1585 size
= GET_MODE_SIZE (mode
);
1587 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1589 /* Don't pass this arg via a register if all the argument registers
1591 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1594 if (cum
->nbytes
+ size
<= nregs
* UNITS_PER_WORD
)
1597 /* Don't pass this arg via a register if it would be split between
1598 registers and memory. */
1599 if (type
== NULL_TREE
1600 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1603 return nregs
* UNITS_PER_WORD
- cum
->nbytes
;
1606 /* Return the location of the function's value. This will be either
1607 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1608 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1609 we only return the PARALLEL for outgoing values; we do not want
1610 callers relying on this extra copy. */
1613 mn10300_function_value (const_tree valtype
,
1614 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1618 enum machine_mode mode
= TYPE_MODE (valtype
);
1620 if (! POINTER_TYPE_P (valtype
))
1621 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1622 else if (! TARGET_PTR_A0D0
|| ! outgoing
1623 || cfun
->returns_struct
)
1624 return gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
);
1626 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (2));
1628 = gen_rtx_EXPR_LIST (VOIDmode
,
1629 gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
),
1633 = gen_rtx_EXPR_LIST (VOIDmode
,
1634 gen_rtx_REG (mode
, FIRST_DATA_REGNUM
),
1639 /* Implements TARGET_LIBCALL_VALUE. */
1642 mn10300_libcall_value (enum machine_mode mode
,
1643 const_rtx fun ATTRIBUTE_UNUSED
)
1645 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1648 /* Implements FUNCTION_VALUE_REGNO_P. */
1651 mn10300_function_value_regno_p (const unsigned int regno
)
1653 return (regno
== FIRST_DATA_REGNUM
|| regno
== FIRST_ADDRESS_REGNUM
);
1656 /* Output an addition operation. */
1659 mn10300_output_add (rtx operands
[3], bool need_flags
)
1661 rtx dest
, src1
, src2
;
1662 unsigned int dest_regnum
, src1_regnum
, src2_regnum
;
1663 enum reg_class src1_class
, src2_class
, dest_class
;
1669 dest_regnum
= true_regnum (dest
);
1670 src1_regnum
= true_regnum (src1
);
1672 dest_class
= REGNO_REG_CLASS (dest_regnum
);
1673 src1_class
= REGNO_REG_CLASS (src1_regnum
);
1675 if (CONST_INT_P (src2
))
1677 gcc_assert (dest_regnum
== src1_regnum
);
1679 if (src2
== const1_rtx
&& !need_flags
)
1681 if (INTVAL (src2
) == 4 && !need_flags
&& dest_class
!= DATA_REGS
)
1684 gcc_assert (!need_flags
|| dest_class
!= SP_REGS
);
1687 else if (CONSTANT_P (src2
))
1690 src2_regnum
= true_regnum (src2
);
1691 src2_class
= REGNO_REG_CLASS (src2_regnum
);
1693 if (dest_regnum
== src1_regnum
)
1695 if (dest_regnum
== src2_regnum
)
1698 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1699 this directly, as below, but when optimizing for space we can sometimes
1700 do better by using a mov+add. For MN103, we claimed that we could
1701 implement a three-operand add because the various move and add insns
1702 change sizes across register classes, and we can often do better than
1703 reload in choosing which operand to move. */
1704 if (TARGET_AM33
&& optimize_insn_for_speed_p ())
1705 return "add %2,%1,%0";
1707 /* Catch cases where no extended register was used. */
1708 if (src1_class
!= EXTENDED_REGS
1709 && src2_class
!= EXTENDED_REGS
1710 && dest_class
!= EXTENDED_REGS
)
1712 /* We have to copy one of the sources into the destination, then
1713 add the other source to the destination.
1715 Carefully select which source to copy to the destination; a
1716 naive implementation will waste a byte when the source classes
1717 are different and the destination is an address register.
1718 Selecting the lowest cost register copy will optimize this
1720 if (src1_class
== dest_class
)
1721 return "mov %1,%0\n\tadd %2,%0";
1723 return "mov %2,%0\n\tadd %1,%0";
1726 /* At least one register is an extended register. */
1728 /* The three operand add instruction on the am33 is a win iff the
1729 output register is an extended register, or if both source
1730 registers are extended registers. */
1731 if (dest_class
== EXTENDED_REGS
|| src1_class
== src2_class
)
1732 return "add %2,%1,%0";
1734 /* It is better to copy one of the sources to the destination, then
1735 perform a 2 address add. The destination in this case must be
1736 an address or data register and one of the sources must be an
1737 extended register and the remaining source must not be an extended
1740 The best code for this case is to copy the extended reg to the
1741 destination, then emit a two address add. */
1742 if (src1_class
== EXTENDED_REGS
)
1743 return "mov %1,%0\n\tadd %2,%0";
1745 return "mov %2,%0\n\tadd %1,%0";
1748 /* Return 1 if X contains a symbolic expression. We know these
1749 expressions will have one of a few well defined forms, so
1750 we need only check those forms. */
1753 mn10300_symbolic_operand (rtx op
,
1754 enum machine_mode mode ATTRIBUTE_UNUSED
)
1756 switch (GET_CODE (op
))
1763 return ((GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
1764 || GET_CODE (XEXP (op
, 0)) == LABEL_REF
)
1765 && CONST_INT_P (XEXP (op
, 1)));
1771 /* Try machine dependent ways of modifying an illegitimate address
1772 to be legitimate. If we find one, return the new valid address.
1773 This macro is used in only one place: `memory_address' in explow.c.
1775 OLDX is the address as it was before break_out_memory_refs was called.
1776 In some cases it is useful to look at this to decide what needs to be done.
1778 Normally it is always safe for this macro to do nothing. It exists to
1779 recognize opportunities to optimize the output.
1781 But on a few ports with segmented architectures and indexed addressing
1782 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1785 mn10300_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1786 enum machine_mode mode ATTRIBUTE_UNUSED
)
1788 if (flag_pic
&& ! mn10300_legitimate_pic_operand_p (x
))
1789 x
= mn10300_legitimize_pic_address (oldx
, NULL_RTX
);
1791 /* Uh-oh. We might have an address for x[n-100000]. This needs
1792 special handling to avoid creating an indexed memory address
1793 with x-100000 as the base. */
1794 if (GET_CODE (x
) == PLUS
1795 && mn10300_symbolic_operand (XEXP (x
, 1), VOIDmode
))
1797 /* Ugly. We modify things here so that the address offset specified
1798 by the index expression is computed first, then added to x to form
1799 the entire address. */
1801 rtx regx1
, regy1
, regy2
, y
;
1803 /* Strip off any CONST. */
1805 if (GET_CODE (y
) == CONST
)
1808 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1810 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1811 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1812 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1813 regx1
= force_reg (Pmode
,
1814 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
, regx1
,
1816 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1822 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1823 @GOTOFF in `reg'. */
1826 mn10300_legitimize_pic_address (rtx orig
, rtx reg
)
1830 if (GET_CODE (orig
) == LABEL_REF
1831 || (GET_CODE (orig
) == SYMBOL_REF
1832 && (CONSTANT_POOL_ADDRESS_P (orig
)
1833 || ! MN10300_GLOBAL_P (orig
))))
1836 reg
= gen_reg_rtx (Pmode
);
1838 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOTOFF
);
1839 x
= gen_rtx_CONST (SImode
, x
);
1840 emit_move_insn (reg
, x
);
1842 x
= emit_insn (gen_addsi3 (reg
, reg
, pic_offset_table_rtx
));
1844 else if (GET_CODE (orig
) == SYMBOL_REF
)
1847 reg
= gen_reg_rtx (Pmode
);
1849 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOT
);
1850 x
= gen_rtx_CONST (SImode
, x
);
1851 x
= gen_rtx_PLUS (SImode
, pic_offset_table_rtx
, x
);
1852 x
= gen_const_mem (SImode
, x
);
1854 x
= emit_move_insn (reg
, x
);
1859 set_unique_reg_note (x
, REG_EQUAL
, orig
);
1863 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1864 isn't protected by a PIC unspec; nonzero otherwise. */
1867 mn10300_legitimate_pic_operand_p (rtx x
)
1872 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1875 if (GET_CODE (x
) == UNSPEC
1876 && (XINT (x
, 1) == UNSPEC_PIC
1877 || XINT (x
, 1) == UNSPEC_GOT
1878 || XINT (x
, 1) == UNSPEC_GOTOFF
1879 || XINT (x
, 1) == UNSPEC_PLT
1880 || XINT (x
, 1) == UNSPEC_GOTSYM_OFF
))
1883 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1884 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1890 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1891 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x
, i
, j
)))
1894 else if (fmt
[i
] == 'e'
1895 && ! mn10300_legitimate_pic_operand_p (XEXP (x
, i
)))
1902 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1903 legitimate, and FALSE otherwise.
1905 On the mn10300, the value in the address register must be
1906 in the same memory space/segment as the effective address.
1908 This is problematical for reload since it does not understand
1909 that base+index != index+base in a memory reference.
1911 Note it is still possible to use reg+reg addressing modes,
1912 it's just much more difficult. For a discussion of a possible
1913 workaround and solution, see the comments in pa.c before the
1914 function record_unscaled_index_insn_codes. */
1917 mn10300_legitimate_address_p (enum machine_mode mode
, rtx x
, bool strict
)
1921 if (CONSTANT_ADDRESS_P (x
))
1922 return !flag_pic
|| mn10300_legitimate_pic_operand_p (x
);
1924 if (RTX_OK_FOR_BASE_P (x
, strict
))
1927 if (TARGET_AM33
&& (mode
== SImode
|| mode
== SFmode
|| mode
== HImode
))
1929 if (GET_CODE (x
) == POST_INC
)
1930 return RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
);
1931 if (GET_CODE (x
) == POST_MODIFY
)
1932 return (RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
)
1933 && CONSTANT_ADDRESS_P (XEXP (x
, 1)));
1936 if (GET_CODE (x
) != PLUS
)
1940 index
= XEXP (x
, 1);
1946 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1947 addressing is hard to satisfy. */
1951 return (REGNO_GENERAL_P (REGNO (base
), strict
)
1952 && REGNO_GENERAL_P (REGNO (index
), strict
));
1955 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base
), strict
))
1958 if (CONST_INT_P (index
))
1959 return IN_RANGE (INTVAL (index
), -1 - 0x7fffffff, 0x7fffffff);
1961 if (CONSTANT_ADDRESS_P (index
))
1962 return !flag_pic
|| mn10300_legitimate_pic_operand_p (index
);
1968 mn10300_regno_in_class_p (unsigned regno
, int rclass
, bool strict
)
1970 if (regno
>= FIRST_PSEUDO_REGISTER
)
1976 regno
= reg_renumber
[regno
];
1977 if (regno
== INVALID_REGNUM
)
1980 return TEST_HARD_REG_BIT (reg_class_contents
[rclass
], regno
);
1984 mn10300_legitimize_reload_address (rtx x
,
1985 enum machine_mode mode ATTRIBUTE_UNUSED
,
1986 int opnum
, int type
,
1987 int ind_levels ATTRIBUTE_UNUSED
)
1989 bool any_change
= false;
1991 /* See above re disabling reg+reg addressing for MN103. */
1995 if (GET_CODE (x
) != PLUS
)
1998 if (XEXP (x
, 0) == stack_pointer_rtx
)
2000 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2001 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2002 opnum
, (enum reload_type
) type
);
2005 if (XEXP (x
, 1) == stack_pointer_rtx
)
2007 push_reload (XEXP (x
, 1), NULL_RTX
, &XEXP (x
, 1), NULL
,
2008 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2009 opnum
, (enum reload_type
) type
);
2013 return any_change
? x
: NULL_RTX
;
2016 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2017 constant. Note that some "constants" aren't valid, such as TLS
2018 symbols and unconverted GOT-based references, so we eliminate
2022 mn10300_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2024 switch (GET_CODE (x
))
2029 if (GET_CODE (x
) == PLUS
)
2031 if (! CONST_INT_P (XEXP (x
, 1)))
2036 /* Only some unspecs are valid as "constants". */
2037 if (GET_CODE (x
) == UNSPEC
)
2039 switch (XINT (x
, 1))
2051 /* We must have drilled down to a symbol. */
2052 if (! mn10300_symbolic_operand (x
, Pmode
))
2063 /* Undo pic address legitimization for the benefit of debug info. */
2066 mn10300_delegitimize_address (rtx orig_x
)
2068 rtx x
= orig_x
, ret
, addend
= NULL
;
2073 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
2076 if (XEXP (x
, 0) == pic_offset_table_rtx
)
2078 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2079 some odd-looking "addresses" that were never valid in the first place.
2080 We need to look harder to avoid warnings being emitted. */
2081 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
2083 rtx x0
= XEXP (x
, 0);
2084 rtx x00
= XEXP (x0
, 0);
2085 rtx x01
= XEXP (x0
, 1);
2087 if (x00
== pic_offset_table_rtx
)
2089 else if (x01
== pic_offset_table_rtx
)
2099 if (GET_CODE (x
) != CONST
)
2102 if (GET_CODE (x
) != UNSPEC
)
2105 ret
= XVECEXP (x
, 0, 0);
2106 if (XINT (x
, 1) == UNSPEC_GOTOFF
)
2108 else if (XINT (x
, 1) == UNSPEC_GOT
)
2113 gcc_assert (GET_CODE (ret
) == SYMBOL_REF
);
2114 if (need_mem
!= MEM_P (orig_x
))
2116 if (need_mem
&& addend
)
2119 ret
= gen_rtx_PLUS (Pmode
, addend
, ret
);
2123 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2124 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2125 with an address register. */
2128 mn10300_address_cost (rtx x
, bool speed
)
2133 switch (GET_CODE (x
))
2138 /* We assume all of these require a 32-bit constant, even though
2139 some symbol and label references can be relaxed. */
2140 return speed
? 1 : 4;
2148 /* Assume any symbolic offset is a 32-bit constant. */
2149 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2150 if (IN_RANGE (i
, -128, 127))
2151 return speed
? 0 : 1;
2154 if (IN_RANGE (i
, -0x800000, 0x7fffff))
2160 index
= XEXP (x
, 1);
2161 if (register_operand (index
, SImode
))
2163 /* Attempt to minimize the number of registers in the address.
2164 This is similar to what other ports do. */
2165 if (register_operand (base
, SImode
))
2169 index
= XEXP (x
, 0);
2172 /* Assume any symbolic offset is a 32-bit constant. */
2173 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2174 if (IN_RANGE (i
, -128, 127))
2175 return speed
? 0 : 1;
2176 if (IN_RANGE (i
, -32768, 32767))
2177 return speed
? 0 : 2;
2178 return speed
? 2 : 6;
2181 return rtx_cost (x
, MEM
, speed
);
2185 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2187 Recall that the base value of 2 is required by assumptions elsewhere
2188 in the body of the compiler, and that cost 2 is special-cased as an
2189 early exit from reload meaning no work is required. */
2192 mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2193 reg_class_t ifrom
, reg_class_t ito
)
2195 enum reg_class from
= (enum reg_class
) ifrom
;
2196 enum reg_class to
= (enum reg_class
) ito
;
2197 enum reg_class scratch
, test
;
2199 /* Simplify the following code by unifying the fp register classes. */
2200 if (to
== FP_ACC_REGS
)
2202 if (from
== FP_ACC_REGS
)
2205 /* Diagnose invalid moves by costing them as two moves. */
2210 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2211 else if (to
== MDR_REGS
)
2212 scratch
= DATA_REGS
;
2213 else if (to
== FP_REGS
&& to
!= from
)
2214 scratch
= GENERAL_REGS
;
2218 if (from
== SP_REGS
)
2219 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2220 else if (from
== MDR_REGS
)
2221 scratch
= DATA_REGS
;
2222 else if (from
== FP_REGS
&& to
!= from
)
2223 scratch
= GENERAL_REGS
;
2225 if (scratch
!= NO_REGS
&& !reg_class_subset_p (test
, scratch
))
2226 return (mn10300_register_move_cost (VOIDmode
, from
, scratch
)
2227 + mn10300_register_move_cost (VOIDmode
, scratch
, to
));
2229 /* From here on, all we need consider are legal combinations. */
2233 /* The scale here is bytes * 2. */
2235 if (from
== to
&& (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2238 if (from
== SP_REGS
)
2239 return (to
== ADDRESS_REGS
? 2 : 6);
2241 /* For MN103, all remaining legal moves are two bytes. */
2246 return (from
== ADDRESS_REGS
? 4 : 6);
2248 if ((from
== ADDRESS_REGS
|| from
== DATA_REGS
)
2249 && (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2252 if (to
== EXTENDED_REGS
)
2253 return (to
== from
? 6 : 4);
2255 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2260 /* The scale here is cycles * 2. */
2264 if (from
== FP_REGS
)
2267 /* All legal moves between integral registers are single cycle. */
2272 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2274 Given lack of the form of the address, this must be speed-relative,
2275 though we should never be less expensive than a size-relative register
2276 move cost above. This is not a problem. */
2279 mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED
,
2280 reg_class_t iclass
, bool in ATTRIBUTE_UNUSED
)
2282 enum reg_class rclass
= (enum reg_class
) iclass
;
2284 if (rclass
== FP_REGS
)
2289 /* Implement the TARGET_RTX_COSTS hook.
2291 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2292 to represent cycles. Size-relative costs are in bytes. */
2295 mn10300_rtx_costs (rtx x
, int code
, int outer_code
, int *ptotal
, bool speed
)
2297 /* This value is used for SYMBOL_REF etc where we want to pretend
2298 we have a full 32-bit constant. */
2299 HOST_WIDE_INT i
= 0x12345678;
2309 if (outer_code
== SET
)
2311 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2312 if (IN_RANGE (i
, -32768, 32767))
2313 total
= COSTS_N_INSNS (1);
2315 total
= COSTS_N_INSNS (2);
2319 /* 16-bit integer operands don't affect latency;
2320 24-bit and 32-bit operands add a cycle. */
2321 if (IN_RANGE (i
, -32768, 32767))
2324 total
= COSTS_N_INSNS (1);
2329 if (outer_code
== SET
)
2333 else if (IN_RANGE (i
, -128, 127))
2335 else if (IN_RANGE (i
, -32768, 32767))
2342 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2343 if (IN_RANGE (i
, -128, 127))
2345 else if (IN_RANGE (i
, -32768, 32767))
2347 else if (TARGET_AM33
&& IN_RANGE (i
, -0x01000000, 0x00ffffff))
2359 /* We assume all of these require a 32-bit constant, even though
2360 some symbol and label references can be relaxed. */
2364 switch (XINT (x
, 1))
2370 case UNSPEC_GOTSYM_OFF
:
2371 /* The PIC unspecs also resolve to a 32-bit constant. */
2375 /* Assume any non-listed unspec is some sort of arithmetic. */
2376 goto do_arith_costs
;
2380 /* Notice the size difference of INC and INC4. */
2381 if (!speed
&& outer_code
== SET
&& CONST_INT_P (XEXP (x
, 1)))
2383 i
= INTVAL (XEXP (x
, 1));
2384 if (i
== 1 || i
== 4)
2386 total
= 1 + rtx_cost (XEXP (x
, 0), PLUS
, speed
);
2390 goto do_arith_costs
;
2404 total
= (speed
? COSTS_N_INSNS (1) : 2);
2408 /* Notice the size difference of ASL2 and variants. */
2409 if (!speed
&& CONST_INT_P (XEXP (x
, 1)))
2410 switch (INTVAL (XEXP (x
, 1)))
2425 total
= (speed
? COSTS_N_INSNS (1) : 3);
2429 total
= (speed
? COSTS_N_INSNS (3) : 2);
2436 total
= (speed
? COSTS_N_INSNS (39)
2437 /* Include space to load+retrieve MDR. */
2438 : code
== MOD
|| code
== UMOD
? 6 : 4);
2442 total
= mn10300_address_cost (XEXP (x
, 0), speed
);
2444 total
= COSTS_N_INSNS (2 + total
);
2448 /* Probably not implemented. Assume external call. */
2449 total
= (speed
? COSTS_N_INSNS (10) : 7);
2461 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2462 may access it using GOTOFF instead of GOT. */
2465 mn10300_encode_section_info (tree decl
, rtx rtl
, int first ATTRIBUTE_UNUSED
)
2471 symbol
= XEXP (rtl
, 0);
2472 if (GET_CODE (symbol
) != SYMBOL_REF
)
2476 SYMBOL_REF_FLAG (symbol
) = (*targetm
.binds_local_p
) (decl
);
2479 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2480 and readonly data size. So we crank up the case threshold value to
2481 encourage a series of if/else comparisons to implement many small switch
2482 statements. In theory, this value could be increased much more if we
2483 were solely optimizing for space, but we keep it "reasonable" to avoid
2484 serious code efficiency lossage. */
2487 mn10300_case_values_threshold (void)
2492 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2495 mn10300_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
2497 rtx mem
, disp
, fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
2499 /* This is a strict alignment target, which means that we play
2500 some games to make sure that the locations at which we need
2501 to store <chain> and <disp> wind up at aligned addresses.
2504 0xfc 0xdd mov chain,a1
2506 0xf8 0xed 0x00 btst 0,d1
2510 Note that the two extra insns are effectively nops; they
2511 clobber the flags but do not affect the contents of D0 or D1. */
2513 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
2514 plus_constant (XEXP (m_tramp
, 0), 11),
2515 NULL_RTX
, 1, OPTAB_DIRECT
);
2517 mem
= adjust_address (m_tramp
, SImode
, 0);
2518 emit_move_insn (mem
, gen_int_mode (0xddfc0028, SImode
));
2519 mem
= adjust_address (m_tramp
, SImode
, 4);
2520 emit_move_insn (mem
, chain_value
);
2521 mem
= adjust_address (m_tramp
, SImode
, 8);
2522 emit_move_insn (mem
, gen_int_mode (0xdc00edf8, SImode
));
2523 mem
= adjust_address (m_tramp
, SImode
, 12);
2524 emit_move_insn (mem
, disp
);
2527 /* Output the assembler code for a C++ thunk function.
2528 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2529 is the decl for the target function. DELTA is an immediate constant
2530 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2531 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2532 additionally added to THIS. Finally jump to the entry point of
2536 mn10300_asm_output_mi_thunk (FILE * file
,
2537 tree thunk_fndecl ATTRIBUTE_UNUSED
,
2538 HOST_WIDE_INT delta
,
2539 HOST_WIDE_INT vcall_offset
,
2544 /* Get the register holding the THIS parameter. Handle the case
2545 where there is a hidden first argument for a returned structure. */
2546 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
2547 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
+ 1];
2549 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
];
2551 fprintf (file
, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START
);
2554 fprintf (file
, "\tadd %d, %s\n", (int) delta
, _this
);
2558 const char * scratch
= reg_names
[FIRST_ADDRESS_REGNUM
+ 1];
2560 fprintf (file
, "\tmov %s, %s\n", _this
, scratch
);
2561 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2562 fprintf (file
, "\tadd %d, %s\n", (int) vcall_offset
, scratch
);
2563 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2564 fprintf (file
, "\tadd %s, %s\n", scratch
, _this
);
2567 fputs ("\tjmp ", file
);
2568 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
2572 /* Return true if mn10300_output_mi_thunk would be able to output the
2573 assembler code for the thunk function specified by the arguments
2574 it is passed, and false otherwise. */
2577 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED
,
2578 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
2579 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
2580 const_tree function ATTRIBUTE_UNUSED
)
2586 mn10300_hard_regno_mode_ok (unsigned int regno
, enum machine_mode mode
)
2588 if (REGNO_REG_CLASS (regno
) == FP_REGS
2589 || REGNO_REG_CLASS (regno
) == FP_ACC_REGS
)
2590 /* Do not store integer values in FP registers. */
2591 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& ((regno
& 1) == 0);
2593 if (((regno
) & 1) == 0 || GET_MODE_SIZE (mode
) == 4)
2596 if (REGNO_REG_CLASS (regno
) == DATA_REGS
2597 || (TARGET_AM33
&& REGNO_REG_CLASS (regno
) == ADDRESS_REGS
)
2598 || REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2599 return GET_MODE_SIZE (mode
) <= 4;
2605 mn10300_modes_tieable (enum machine_mode mode1
, enum machine_mode mode2
)
2607 if (GET_MODE_CLASS (mode1
) == MODE_FLOAT
2608 && GET_MODE_CLASS (mode2
) != MODE_FLOAT
)
2611 if (GET_MODE_CLASS (mode2
) == MODE_FLOAT
2612 && GET_MODE_CLASS (mode1
) != MODE_FLOAT
)
2617 || (GET_MODE_SIZE (mode1
) <= 4 && GET_MODE_SIZE (mode2
) <= 4))
2624 cc_flags_for_mode (enum machine_mode mode
)
2629 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
| CC_FLAG_V
;
2631 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
;
2633 return CC_FLAG_Z
| CC_FLAG_N
;
2642 cc_flags_for_code (enum rtx_code code
)
2655 case GT
: /* ~(Z|(N^V)) */
2656 case LE
: /* Z|(N^V) */
2657 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_V
;
2663 case GTU
: /* ~(C | Z) */
2664 case LEU
: /* C | Z */
2665 return CC_FLAG_Z
| CC_FLAG_C
;
2683 mn10300_select_cc_mode (enum rtx_code code
, rtx x
, rtx y ATTRIBUTE_UNUSED
)
2687 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2688 return CC_FLOATmode
;
2690 req
= cc_flags_for_code (code
);
2692 if (req
& CC_FLAG_V
)
2694 if (req
& CC_FLAG_C
)
2700 is_load_insn (rtx insn
)
2702 if (GET_CODE (PATTERN (insn
)) != SET
)
2705 return MEM_P (SET_SRC (PATTERN (insn
)));
2709 is_store_insn (rtx insn
)
2711 if (GET_CODE (PATTERN (insn
)) != SET
)
2714 return MEM_P (SET_DEST (PATTERN (insn
)));
2717 /* Update scheduling costs for situations that cannot be
2718 described using the attributes and DFA machinery.
2719 DEP is the insn being scheduled.
2720 INSN is the previous insn.
2721 COST is the current cycle cost for DEP. */
2724 mn10300_adjust_sched_cost (rtx insn
, rtx link
, rtx dep
, int cost
)
2726 int timings
= get_attr_timings (insn
);
2731 if (GET_CODE (insn
) == PARALLEL
)
2732 insn
= XVECEXP (insn
, 0, 0);
2734 if (GET_CODE (dep
) == PARALLEL
)
2735 dep
= XVECEXP (dep
, 0, 0);
2737 /* For the AM34 a load instruction that follows a
2738 store instruction incurs an extra cycle of delay. */
2739 if (mn10300_tune_cpu
== PROCESSOR_AM34
2740 && is_load_insn (dep
)
2741 && is_store_insn (insn
))
2744 /* For the AM34 a non-store, non-branch FPU insn that follows
2745 another FPU insn incurs a one cycle throughput increase. */
2746 else if (mn10300_tune_cpu
== PROCESSOR_AM34
2747 && ! is_store_insn (insn
)
2749 && GET_CODE (PATTERN (dep
)) == SET
2750 && GET_CODE (PATTERN (insn
)) == SET
2751 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep
)))) == MODE_FLOAT
2752 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn
)))) == MODE_FLOAT
)
2755 /* Resolve the conflict described in section 1-7-4 of
2756 Chapter 3 of the MN103E Series Instruction Manual
2759 "When the preceeding instruction is a CPU load or
2760 store instruction, a following FPU instruction
2761 cannot be executed until the CPU completes the
2762 latency period even though there are no register
2763 or flag dependencies between them." */
2765 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2766 if (! TARGET_AM33_2
)
2769 /* If a data dependence already exists then the cost is correct. */
2770 if (REG_NOTE_KIND (link
) == 0)
2773 /* Check that the instruction about to scheduled is an FPU instruction. */
2774 if (GET_CODE (PATTERN (dep
)) != SET
)
2777 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep
)))) != MODE_FLOAT
)
2780 /* Now check to see if the previous instruction is a load or store. */
2781 if (! is_load_insn (insn
) && ! is_store_insn (insn
))
2784 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2785 only applies when an INTEGER load/store preceeds an FPU
2786 instruction, but is this true ? For now we assume that it is. */
2787 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn
)))) != MODE_INT
)
2790 /* Extract the latency value from the timings attribute. */
2791 return timings
< 100 ? (timings
% 10) : (timings
% 100);
2795 mn10300_conditional_register_usage (void)
2801 for (i
= FIRST_EXTENDED_REGNUM
;
2802 i
<= LAST_EXTENDED_REGNUM
; i
++)
2803 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2807 for (i
= FIRST_FP_REGNUM
;
2808 i
<= LAST_FP_REGNUM
; i
++)
2809 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2812 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] =
2813 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
2816 /* Worker function for TARGET_MD_ASM_CLOBBERS.
2817 We do this in the mn10300 backend to maintain source compatibility
2818 with the old cc0-based compiler. */
2821 mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED
,
2822 tree inputs ATTRIBUTE_UNUSED
,
2825 clobbers
= tree_cons (NULL_TREE
, build_string (5, "EPSW"),
2830 /* A helper function for splitting cbranch patterns after reload. */
2833 mn10300_split_cbranch (enum machine_mode cmp_mode
, rtx cmp_op
, rtx label_ref
)
2837 flags
= gen_rtx_REG (cmp_mode
, CC_REG
);
2838 x
= gen_rtx_COMPARE (cmp_mode
, XEXP (cmp_op
, 0), XEXP (cmp_op
, 1));
2839 x
= gen_rtx_SET (VOIDmode
, flags
, x
);
2842 x
= gen_rtx_fmt_ee (GET_CODE (cmp_op
), VOIDmode
, flags
, const0_rtx
);
2843 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
, label_ref
, pc_rtx
);
2844 x
= gen_rtx_SET (VOIDmode
, pc_rtx
, x
);
2848 /* A helper function for matching parallels that set the flags. */
2851 mn10300_match_ccmode (rtx insn
, enum machine_mode cc_mode
)
2854 enum machine_mode flags_mode
;
2856 gcc_checking_assert (XVECLEN (PATTERN (insn
), 0) == 2);
2858 op1
= XVECEXP (PATTERN (insn
), 0, 1);
2859 gcc_checking_assert (GET_CODE (SET_SRC (op1
)) == COMPARE
);
2861 flags
= SET_DEST (op1
);
2862 flags_mode
= GET_MODE (flags
);
2864 if (GET_MODE (SET_SRC (op1
)) != flags_mode
)
2866 if (GET_MODE_CLASS (flags_mode
) != MODE_CC
)
2869 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2870 if (cc_flags_for_mode (flags_mode
) & ~cc_flags_for_mode (cc_mode
))
2877 mn10300_split_and_operand_count (rtx op
)
2879 HOST_WIDE_INT val
= INTVAL (op
);
2884 /* High bit is set, look for bits clear at the bottom. */
2885 count
= exact_log2 (-val
);
2888 /* This is only size win if we can use the asl2 insn. Otherwise we
2889 would be replacing 1 6-byte insn with 2 3-byte insns. */
2890 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2896 /* High bit is clear, look for bits set at the bottom. */
2897 count
= exact_log2 (val
+ 1);
2899 /* Again, this is only a size win with asl2. */
2900 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2909 enum attr_liw_op op
;
2914 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2915 extract the operands and LIW attributes from the insn and use them to fill
2916 in the liw_data structure. Return true upon success or false if the insn
2917 cannot be bundled. */
2920 extract_bundle (rtx insn
, struct liw_data
* pdata
)
2922 bool allow_consts
= true;
2925 gcc_assert (pdata
!= NULL
);
2927 if (insn
== NULL_RTX
)
2929 /* Make sure that we are dealing with a simple SET insn. */
2930 p
= single_set (insn
);
2934 /* Make sure that it could go into one of the LIW pipelines. */
2935 pdata
->slot
= get_attr_liw (insn
);
2936 if (pdata
->slot
== LIW_BOTH
)
2939 pdata
->op
= get_attr_liw_op (insn
);
2944 pdata
->dest
= SET_DEST (p
);
2945 pdata
->src
= SET_SRC (p
);
2948 pdata
->dest
= XEXP (SET_SRC (p
), 0);
2949 pdata
->src
= XEXP (SET_SRC (p
), 1);
2956 /* The AND, OR and XOR long instruction words only accept register arguments. */
2957 allow_consts
= false;
2960 pdata
->dest
= SET_DEST (p
);
2961 pdata
->src
= XEXP (SET_SRC (p
), 1);
2965 if (! REG_P (pdata
->dest
))
2968 if (REG_P (pdata
->src
))
2971 return allow_consts
&& satisfies_constraint_O (pdata
->src
);
2974 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
2975 the instructions with the assumption that LIW1 would be executed before LIW2
2976 so we must check for overlaps between their sources and destinations. */
2979 check_liw_constraints (struct liw_data
* pliw1
, struct liw_data
* pliw2
)
2981 /* Check for slot conflicts. */
2982 if (pliw2
->slot
== pliw1
->slot
&& pliw1
->slot
!= LIW_EITHER
)
2985 /* If either operation is a compare, then "dest" is really an input; the real
2986 destination is CC_REG. So these instructions need different checks. */
2988 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
2989 check its values prior to any changes made by OP. */
2990 if (pliw1
->op
== LIW_OP_CMP
)
2992 /* Two sequential comparisons means dead code, which ought to
2993 have been eliminated given that bundling only happens with
2994 optimization. We cannot bundle them in any case. */
2995 gcc_assert (pliw1
->op
!= pliw2
->op
);
2999 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3000 is the destination of OP, as the CMP will look at the old value, not the new
3002 if (pliw2
->op
== LIW_OP_CMP
)
3004 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3007 if (REG_P (pliw2
->src
))
3008 return REGNO (pliw2
->src
) != REGNO (pliw1
->dest
);
3013 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3014 same destination register. */
3015 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3018 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3019 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3020 we can replace the source in OP2 with the source of OP1. */
3021 if (REG_P (pliw2
->src
) && REGNO (pliw2
->src
) == REGNO (pliw1
->dest
))
3023 if (pliw1
->op
== LIW_OP_MOV
&& REG_P (pliw1
->src
))
3025 if (! REG_P (pliw1
->src
)
3026 && (pliw2
->op
== LIW_OP_AND
3027 || pliw2
->op
== LIW_OP_OR
3028 || pliw2
->op
== LIW_OP_XOR
))
3031 pliw2
->src
= pliw1
->src
;
3037 /* Everything else is OK. */
3041 /* Combine pairs of insns into LIW bundles. */
3044 mn10300_bundle_liw (void)
3048 for (r
= get_insns (); r
!= NULL_RTX
; r
= next_nonnote_nondebug_insn (r
))
3051 struct liw_data liw1
, liw2
;
3054 if (! extract_bundle (insn1
, & liw1
))
3057 insn2
= next_nonnote_nondebug_insn (insn1
);
3058 if (! extract_bundle (insn2
, & liw2
))
3061 /* Check for source/destination overlap. */
3062 if (! check_liw_constraints (& liw1
, & liw2
))
3065 if (liw1
.slot
== LIW_OP2
|| liw2
.slot
== LIW_OP1
)
3067 struct liw_data temp
;
3074 delete_insn (insn2
);
3076 if (liw1
.op
== LIW_OP_CMP
)
3077 insn2
= gen_cmp_liw (liw2
.dest
, liw2
.src
, liw1
.dest
, liw1
.src
,
3079 else if (liw2
.op
== LIW_OP_CMP
)
3080 insn2
= gen_liw_cmp (liw1
.dest
, liw1
.src
, liw2
.dest
, liw2
.src
,
3083 insn2
= gen_liw (liw1
.dest
, liw2
.dest
, liw1
.src
, liw2
.src
,
3084 GEN_INT (liw1
.op
), GEN_INT (liw2
.op
));
3086 insn2
= emit_insn_after (insn2
, insn1
);
3087 delete_insn (insn1
);
3092 #define DUMP(reason, insn) \
3097 fprintf (dump_file, reason "\n"); \
3098 if (insn != NULL_RTX) \
3099 print_rtl_single (dump_file, insn); \
3100 fprintf(dump_file, "\n"); \
3105 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3106 Insert a SETLB insn just before LABEL. */
3109 mn10300_insert_setlb_lcc (rtx label
, rtx branch
)
3111 rtx lcc
, comparison
, cmp_reg
;
3113 if (LABEL_NUSES (label
) > 1)
3117 /* This label is used both as an entry point to the loop
3118 and as a loop-back point for the loop. We need to separate
3119 these two functions so that the SETLB happens upon entry,
3120 but the loop-back does not go to the SETLB instruction. */
3121 DUMP ("Inserting SETLB insn after:", label
);
3122 insn
= emit_insn_after (gen_setlb (), label
);
3123 label
= gen_label_rtx ();
3124 emit_label_after (label
, insn
);
3125 DUMP ("Created new loop-back label:", label
);
3129 DUMP ("Inserting SETLB insn before:", label
);
3130 emit_insn_before (gen_setlb (), label
);
3133 comparison
= XEXP (SET_SRC (PATTERN (branch
)), 0);
3134 cmp_reg
= XEXP (comparison
, 0);
3135 gcc_assert (REG_P (cmp_reg
));
3137 /* If the comparison has not already been split out of the branch
3139 gcc_assert (REGNO (cmp_reg
) == CC_REG
);
3141 if (GET_MODE (cmp_reg
) == CC_FLOATmode
)
3142 lcc
= gen_FLcc (comparison
, label
);
3144 lcc
= gen_Lcc (comparison
, label
);
3146 lcc
= emit_jump_insn_before (lcc
, branch
);
3147 mark_jump_label (XVECEXP (PATTERN (lcc
), 0, 0), lcc
, 0);
3148 DUMP ("Replacing branch insn...", branch
);
3149 DUMP ("... with Lcc insn:", lcc
);
3150 delete_insn (branch
);
3154 mn10300_block_contains_call (struct basic_block_def
* block
)
3158 FOR_BB_INSNS (block
, insn
)
3166 mn10300_loop_contains_call_insn (loop_p loop
)
3169 bool result
= false;
3172 bbs
= get_loop_body (loop
);
3174 for (i
= 0; i
< loop
->num_nodes
; i
++)
3175 if (mn10300_block_contains_call (bbs
[i
]))
3186 mn10300_scan_for_setlb_lcc (void)
3189 loop_iterator liter
;
3192 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX
);
3195 compute_bb_for_insn ();
3197 /* Find the loops. */
3198 if (flow_loops_find (& loops
) < 1)
3199 DUMP ("No loops found", NULL_RTX
);
3200 current_loops
= & loops
;
3202 /* FIXME: For now we only investigate innermost loops. In practice however
3203 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3204 be the case that its parent loop is suitable. Thus we should check all
3205 loops, but work from the innermost outwards. */
3206 FOR_EACH_LOOP (liter
, loop
, LI_ONLY_INNERMOST
)
3208 const char * reason
= NULL
;
3210 /* Check to see if we can modify this loop. If we cannot
3211 then set 'reason' to describe why it could not be done. */
3212 if (loop
->latch
== NULL
)
3213 reason
= "it contains multiple latches";
3214 else if (loop
->header
!= loop
->latch
)
3215 /* FIXME: We could handle loops that span multiple blocks,
3216 but this requires a lot more work tracking down the branches
3217 that need altering, so for now keep things simple. */
3218 reason
= "the loop spans multiple blocks";
3219 else if (mn10300_loop_contains_call_insn (loop
))
3220 reason
= "it contains CALL insns";
3223 rtx branch
= BB_END (loop
->latch
);
3225 gcc_assert (JUMP_P (branch
));
3226 if (single_set (branch
) == NULL_RTX
|| ! any_condjump_p (branch
))
3227 /* We cannot optimize tablejumps and the like. */
3228 /* FIXME: We could handle unconditional jumps. */
3229 reason
= "it is not a simple loop";
3235 flow_loop_dump (loop
, dump_file
, NULL
, 0);
3237 label
= BB_HEAD (loop
->header
);
3238 gcc_assert (LABEL_P (label
));
3240 mn10300_insert_setlb_lcc (label
, branch
);
3244 if (dump_file
&& reason
!= NULL
)
3245 fprintf (dump_file
, "Loop starting with insn %d is not suitable because %s\n",
3246 INSN_UID (BB_HEAD (loop
->header
)),
3250 #if 0 /* FIXME: We should free the storage we allocated, but
3251 for some unknown reason this leads to seg-faults. */
3252 FOR_EACH_LOOP (liter
, loop
, 0)
3253 free_simple_loop_desc (loop
);
3255 flow_loops_free (current_loops
);
3258 current_loops
= NULL
;
3260 df_finish_pass (false);
3262 DUMP ("SETLB scan complete", NULL_RTX
);
3266 mn10300_reorg (void)
3268 /* These are optimizations, so only run them if optimizing. */
3269 if (TARGET_AM33
&& (optimize
> 0 || optimize_size
))
3271 if (TARGET_ALLOW_SETLB
)
3272 mn10300_scan_for_setlb_lcc ();
3274 if (TARGET_ALLOW_LIW
)
3275 mn10300_bundle_liw ();
3279 /* Initialize the GCC target structure. */
3281 #undef TARGET_MACHINE_DEPENDENT_REORG
3282 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3284 #undef TARGET_ASM_ALIGNED_HI_OP
3285 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3287 #undef TARGET_LEGITIMIZE_ADDRESS
3288 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3290 #undef TARGET_ADDRESS_COST
3291 #define TARGET_ADDRESS_COST mn10300_address_cost
3292 #undef TARGET_REGISTER_MOVE_COST
3293 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3294 #undef TARGET_MEMORY_MOVE_COST
3295 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3296 #undef TARGET_RTX_COSTS
3297 #define TARGET_RTX_COSTS mn10300_rtx_costs
3299 #undef TARGET_ASM_FILE_START
3300 #define TARGET_ASM_FILE_START mn10300_file_start
3301 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3302 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3304 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3305 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3307 #undef TARGET_OPTION_OVERRIDE
3308 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3310 #undef TARGET_ENCODE_SECTION_INFO
3311 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3313 #undef TARGET_PROMOTE_PROTOTYPES
3314 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3315 #undef TARGET_RETURN_IN_MEMORY
3316 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3317 #undef TARGET_PASS_BY_REFERENCE
3318 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3319 #undef TARGET_CALLEE_COPIES
3320 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3321 #undef TARGET_ARG_PARTIAL_BYTES
3322 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3323 #undef TARGET_FUNCTION_ARG
3324 #define TARGET_FUNCTION_ARG mn10300_function_arg
3325 #undef TARGET_FUNCTION_ARG_ADVANCE
3326 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3328 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3329 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3330 #undef TARGET_EXPAND_BUILTIN_VA_START
3331 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3333 #undef TARGET_CASE_VALUES_THRESHOLD
3334 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3336 #undef TARGET_LEGITIMATE_ADDRESS_P
3337 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3338 #undef TARGET_DELEGITIMIZE_ADDRESS
3339 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3340 #undef TARGET_LEGITIMATE_CONSTANT_P
3341 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3343 #undef TARGET_PREFERRED_RELOAD_CLASS
3344 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3345 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3346 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3347 mn10300_preferred_output_reload_class
3348 #undef TARGET_SECONDARY_RELOAD
3349 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3351 #undef TARGET_TRAMPOLINE_INIT
3352 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3354 #undef TARGET_FUNCTION_VALUE
3355 #define TARGET_FUNCTION_VALUE mn10300_function_value
3356 #undef TARGET_LIBCALL_VALUE
3357 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3359 #undef TARGET_ASM_OUTPUT_MI_THUNK
3360 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3361 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3362 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3364 #undef TARGET_SCHED_ADJUST_COST
3365 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3367 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3368 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3370 #undef TARGET_MD_ASM_CLOBBERS
3371 #define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3373 #undef TARGET_FLAGS_REGNUM
3374 #define TARGET_FLAGS_REGNUM CC_REG
3376 struct gcc_target targetm
= TARGET_INITIALIZER
;