+2002-08-27 Nick Clifton <nickc@redhat.com>
+ Catherine Moore <clm@redhat.com>
+ Jim Wilson <wilson@cygnus.com>
+
+ * config.gcc: Add v850e-*-* target.
+ Add --with-cpu= support for v850.
+ * config/v850/lib1funcs.asm: Add v850e callt functions.
+ * config/v850/v850.h: Add support for v850e target.
+ * config/v850/v850.c: Add functions to support v850e target.
+ * config/v850/v850-protos.h: Add prototypes for new functions in v850.c.
+ * config/v850/v850.md: Add patterns for v850e instructions.
+ * doc/invoke.texi: Document new v850e command line switches.
+
Tue Aug 27 18:30:47 2002 J"orn Rennecke <joern.rennecke@superh.com>
Aldy Hernandez <aldyh at redhat dot com>
c_target_objs="v850-c.o"
cxx_target_objs="v850-c.o"
;;
+v850e-*-*)
+ target_cpu_default="TARGET_CPU_v850e"
+ tm_file="dbxelf.h elfos.h svr4.h v850/v850.h"
+ tm_p_file=v850/v850-protos.h
+ tmake_file=v850/t-v850
+ md_file=v850/v850.md
+ out_file=v850/v850.c
+ if test x$stabs = xyes
+ then
+ tm_file="${tm_file} dbx.h"
+ fi
+ use_collect2=no
+ c_target_objs="v850-c.o"
+ cxx_target_objs="v850-c.o"
+ ;;
v850-*-*)
target_cpu_default="TARGET_CPU_generic"
tm_file="dbxelf.h elfos.h svr4.h ${tm_file}"
;;
esac
;;
+v850*-*-*)
+ case "x$with_cpu" in
+ x)
+ ;;
+ v850e)
+ target_cpu_default2="TARGET_CPU_$with_cpu"
+ ;;
+ *)
+ if test x$pass2done = xyes
+ then
+ echo "Unknown cpu used with --with-cpu=$with_cpu" 1>&2
+ exit 1
+ fi
+ ;;
+ esac
+ ;;
esac
if test "$target_cpu_default2" != ""
/* libgcc routines for NEC V850.
- Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 2002 Free Software Foundation, Inc.
This file is part of GNU CC.
.type __save_r31,@function
/* Allocate space and save register 31 on the stack */
/* Also allocate space for the argument save area */
- /* Called via: jalr __save_r29_r31,r10 */
+ /* Called via: jalr __save_r31,r10 */
__save_r31:
addi -20,sp,sp
st.w r31,16[sp]
ld.w 16[sp],r31
addi 20,sp,sp
jmp [r31]
- .size __return_r29_r31,.-__return_r29_r31
+ .size __return_r31,.-__return_r31
#endif /* L_save_31c */
#ifdef L_save_varargs
jmp [r10]
.size __restore_all_interrupt,.-__restore_all_interrupt
#endif /* L_save_all_interrupt */
+
+
+#if defined __v850e__
+#ifdef L_callt_save_r2_r29
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Allocate space and save registers 2, 20 .. 29 on the stack. */
+ /* Called via: callt ctoff(__callt_save_r2_r29). */
+ .align 2
+.L_save_r2_r29:
+ add -4, sp
+ st.w r2, 0[sp]
+ prepare {r20 - r29}, 0
+ ctret
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: callt ctoff(__callt_return_r2_r29). */
+ .align 2
+.L_return_r2_r29:
+ dispose 0, {r20-r29}
+ ld.w 0[sp], r2
+ add 4, sp
+ jmp [r31]
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_r2_r29
+ .type __callt_save_r2_r29,@function
+__callt_save_r2_r29: .short ctoff(.L_save_r2_r29)
+
+ .global __callt_return_r2_r29
+ .type __callt_return_r2_r29,@function
+__callt_return_r2_r29: .short ctoff(.L_return_r2_r29)
+
+#endif /* L_callt_save_r2_r29 */
+
+#ifdef L_callt_save_r2_r31
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Allocate space and save registers 2 and 20 .. 29, 31 on the stack. */
+ /* Also allocate space for the argument save area. */
+ /* Called via: callt ctoff(__callt_save_r2_r31). */
+ .align 2
+.L_save_r2_r31:
+ add -4, sp
+ st.w r2, 0[sp]
+ prepare {r20 - r29, r31}, 4
+ ctret
+
+ /* Restore saved registers, deallocate stack and return to the user. */
+ /* Called via: callt ctoff(__callt_return_r2_r31). */
+ .align 2
+.L_return_r2_r31:
+ dispose 4, {r20 - r29, r31}
+ ld.w 0[sp], r2
+ addi 4, sp, sp
+ jmp [r31]
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_r2_r31
+ .type __callt_save_r2_r31,@function
+__callt_save_r2_r31: .short ctoff(.L_save_r2_r31)
+
+ .global __callt_return_r2_r31
+ .type __callt_return_r2_r31,@function
+__callt_return_r2_r31: .short ctoff(.L_return_r2_r31)
+
+#endif /* L_callt_save_r2_r31 */
+
+
+#ifdef L_callt_save_r6_r9
+ /* Put these functions into the call table area. */
+ .call_table_text
+
+ /* Save registers r6 - r9 onto the stack in the space reserved for them.
+ Use by variable argument functions.
+ Called via: callt ctoff(__callt_save_r6_r9). */
+ .align 2
+.L_save_r6_r9:
+ mov ep,r1
+ mov sp,ep
+ sst.w r6,0[ep]
+ sst.w r7,4[ep]
+ sst.w r8,8[ep]
+ sst.w r9,12[ep]
+ mov r1,ep
+ ctret
+
+ /* Place the offsets of the start of this routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_r6_r9
+ .type __callt_save_r6_r9,@function
+__callt_save_r6_r9: .short ctoff(.L_save_r6_r9)
+#endif /* L_callt_save_r6_r9 */
+
+
+#ifdef L_callt_save_interrupt
+ /* Put this functions into the call table area */
+ .call_table_text
+
+ /* Save registers r1, ep, gp, r10 on stack and load up with expected values. */
+ /* Called via: callt ctoff(__callt_save_interrupt). */
+ .align 2
+.L_save_interrupt:
+ /* SP has already been moved before callt ctoff(_save_interrupt). */
+ /* addi -24, sp, sp */
+ st.w ep, 0[sp]
+ st.w gp, 4[sp]
+ st.w r1, 8[sp]
+ /* R10 has alread been saved bofore callt ctoff(_save_interrupt). */
+ /* st.w r10, 12[sp] */
+ mov hilo(__ep),ep
+ mov hilo(__gp),gp
+ ctret
+
+ /* Place the offsets of the start of the routine into the call table. */
+ .call_table_data
+ .global __callt_save_interrupt
+ .type __callt_save_interrupt,@function
+__callt_save_interrupt: .short ctoff(.L_save_interrupt)
+
+ .call_table_text
+
+ /* Restore saved registers, deallocate stack and return from the interrupt. */
+ /* Called via: callt ctoff(__callt_restore_itnerrupt). */
+ .text
+ .align 2
+ .globl __return_interrupt
+ .type __return_interrupt,@function
+.L_return_interrupt:
+ ld.w 20[sp], r1
+ ldsr r1, ctpsw
+ ld.w 16[sp], r1
+ ldsr r1, ctpc
+ ld.w 12[sp], r10
+ ld.w 8[sp], r1
+ ld.w 4[sp], gp
+ ld.w 0[sp], ep
+ addi 24, sp, sp
+ reti
+
+ /* Place the offsets of the start of the routine into the call table. */
+ .call_table_data
+
+ .global __callt_return_interrupt
+ .type __callt_return_interrupt,@function
+__callt_return_interrupt: .short ctoff(.L_return_interrupt)
+
+#endif /* L_callt_save_interrupt */
+
+#ifdef L_callt_save_all_interrupt
+ /* Put this functions into the call table area. */
+ .call_table_text
+
+ /* Save all registers except for those saved in __save_interrupt. */
+ /* Allocate enough stack for all of the registers & 16 bytes of space. */
+ /* Called via: callt ctoff(__callt_save_all_interrupt). */
+ .align 2
+.L_save_all_interrupt:
+ addi -60, sp, sp
+ mov ep, r1
+ mov sp, ep
+ sst.w r2, 56[ep]
+ sst.w r5, 52[ep]
+ sst.w r6, 48[ep]
+ sst.w r7, 44[ep]
+ sst.w r8, 40[ep]
+ sst.w r9, 36[ep]
+ sst.w r11, 32[ep]
+ sst.w r12, 28[ep]
+ sst.w r13, 24[ep]
+ sst.w r14, 20[ep]
+ sst.w r15, 16[ep]
+ sst.w r16, 12[ep]
+ sst.w r17, 8[ep]
+ sst.w r18, 4[ep]
+ sst.w r19, 0[ep]
+ mov r1, ep
+
+ prepare {r20 - r29, r31}, 4
+ ctret
+
+ /* Restore all registers saved in __save_all_interrupt. */
+ /* & deallocate the stack space. */
+ /* Called via: callt ctoff(__callt_restore_all_interrupt). */
+ .align 2
+.L_restore_all_interrupt:
+ dispose 4, {r20 - r29, r31}
+
+ mov ep, r1
+ mov sp, ep
+ sld.w 0 [ep], r19
+ sld.w 4 [ep], r18
+ sld.w 8 [ep], r17
+ sld.w 12[ep], r16
+ sld.w 16[ep], r15
+ sld.w 20[ep], r14
+ sld.w 24[ep], r13
+ sld.w 28[ep], r12
+ sld.w 32[ep], r11
+ sld.w 36[ep], r9
+ sld.w 40[ep], r8
+ sld.w 44[ep], r7
+ sld.w 48[ep], r6
+ sld.w 52[ep], r5
+ sld.w 56[ep], r2
+ mov r1, ep
+ addi 60, sp, sp
+ ctret
+
+ /* Place the offsets of the start of these routines into the call table. */
+ .call_table_data
+
+ .global __callt_save_all_interrupt
+ .type __callt_save_all_interrupt,@function
+__callt_save_all_interrupt: .short ctoff(.L_save_all_interrupt)
+
+ .global __callt_restore_all_interrupt
+ .type __callt_restore_all_interrupt,@function
+__callt_restore_all_interrupt: .short ctoff(.L_restore_all_interrupt)
+
+#endif /* L_callt_save_all_interrupt */
+
+
+#define MAKE_CALLT_FUNCS( START ) \
+ .call_table_text ;\
+ .align 2 ;\
+ /* Allocate space and save registers START .. r29 on the stack. */ ;\
+ /* Called via: callt ctoff(__callt_save_START_r29). */ ;\
+.L_save_##START##_r29: ;\
+ prepare { START - r29 }, 0 ;\
+ ctret ;\
+ ;\
+ /* Restore saved registers, deallocate stack and return. */ ;\
+ /* Called via: callt ctoff(__return_START_r29) */ ;\
+ .align 2 ;\
+.L_return_##START##_r29: ;\
+ dispose 0, { START - r29 }, r31 ;\
+ ;\
+ /* Place the offsets of the start of these funcs into the call table. */;\
+ .call_table_data ;\
+ ;\
+ .global __callt_save_##START##_r29 ;\
+ .type __callt_save_##START##_r29,@function ;\
+__callt_save_##START##_r29: .short ctoff(.L_save_##START##_r29 ) ;\
+ ;\
+ .global __callt_return_##START##_r29 ;\
+ .type __callt_return_##START##_r29,@function ;\
+__callt_return_##START##_r29: .short ctoff(.L_return_##START##_r29 )
+
+
+#define MAKE_CALLT_CFUNCS( START ) \
+ .call_table_text ;\
+ .align 2 ;\
+ /* Allocate space and save registers START .. r31 on the stack. */ ;\
+ /* Called via: callt ctoff(__callt_save_START_r31c). */ ;\
+.L_save_##START##_r31c: ;\
+ prepare { START - r29, r31}, 4 ;\
+ ctret ;\
+ ;\
+ /* Restore saved registers, deallocate stack and return. */ ;\
+ /* Called via: callt ctoff(__return_START_r31c). */ ;\
+ .align 2 ;\
+.L_return_##START##_r31c: ;\
+ dispose 4, { START - r29, r31}, r31 ;\
+ ;\
+ /* Place the offsets of the start of these funcs into the call table. */;\
+ .call_table_data ;\
+ ;\
+ .global __callt_save_##START##_r31c ;\
+ .type __callt_save_##START##_r31c,@function ;\
+__callt_save_##START##_r31c: .short ctoff(.L_save_##START##_r31c ) ;\
+ ;\
+ .global __callt_return_##START##_r31c ;\
+ .type __callt_return_##START##_r31c,@function ;\
+__callt_return_##START##_r31c: .short ctoff(.L_return_##START##_r31c )
+
+
+#ifdef L_callt_save_20
+ MAKE_CALLT_FUNCS (r20)
+#endif
+#ifdef L_callt_save_21
+ MAKE_CALLT_FUNCS (r21)
+#endif
+#ifdef L_callt_save_22
+ MAKE_CALLT_FUNCS (r22)
+#endif
+#ifdef L_callt_save_23
+ MAKE_CALLT_FUNCS (r23)
+#endif
+#ifdef L_callt_save_24
+ MAKE_CALLT_FUNCS (r24)
+#endif
+#ifdef L_callt_save_25
+ MAKE_CALLT_FUNCS (r25)
+#endif
+#ifdef L_callt_save_26
+ MAKE_CALLT_FUNCS (r26)
+#endif
+#ifdef L_callt_save_27
+ MAKE_CALLT_FUNCS (r27)
+#endif
+#ifdef L_callt_save_28
+ MAKE_CALLT_FUNCS (r28)
+#endif
+#ifdef L_callt_save_29
+ MAKE_CALLT_FUNCS (r29)
+#endif
+
+#ifdef L_callt_save_20c
+ MAKE_CALLT_CFUNCS (r20)
+#endif
+#ifdef L_callt_save_21c
+ MAKE_CALLT_CFUNCS (r21)
+#endif
+#ifdef L_callt_save_22c
+ MAKE_CALLT_CFUNCS (r22)
+#endif
+#ifdef L_callt_save_23c
+ MAKE_CALLT_CFUNCS (r23)
+#endif
+#ifdef L_callt_save_24c
+ MAKE_CALLT_CFUNCS (r24)
+#endif
+#ifdef L_callt_save_25c
+ MAKE_CALLT_CFUNCS (r25)
+#endif
+#ifdef L_callt_save_26c
+ MAKE_CALLT_CFUNCS (r26)
+#endif
+#ifdef L_callt_save_27c
+ MAKE_CALLT_CFUNCS (r27)
+#endif
+#ifdef L_callt_save_28c
+ MAKE_CALLT_CFUNCS (r28)
+#endif
+#ifdef L_callt_save_29c
+ MAKE_CALLT_CFUNCS (r29)
+#endif
+
+
+#ifdef L_callt_save_31c
+ .call_table_text
+ .align 2
+ /* Allocate space and save register r31 on the stack. */
+ /* Called via: callt ctoff(__callt_save_r31c). */
+.L_callt_save_r31c:
+ prepare {r31}, 4
+ ctret
+
+ /* Restore saved registers, deallocate stack and return. */
+ /* Called via: callt ctoff(__return_r31c). */
+ .align 2
+.L_callt_return_r31c:
+ dispose 4, {r31}, r31
+
+ /* Place the offsets of the start of these funcs into the call table. */
+ .call_table_data
+
+ .global __callt_save_r31c
+ .type __callt_save_r31c,@function
+__callt_save_r31c: .short ctoff(.L_callt_save_r31c)
+
+ .global __callt_return_r31c
+ .type __callt_return_r31c,@function
+__callt_return_r31c: .short ctoff(.L_callt_return_r31c)
+#endif
+
+#endif /* __v850e__ */
_save_31c \
_save_varargs \
_save_interrupt \
- _save_all_interrupt
+ _save_all_interrupt \
+ _callt_save_20 \
+ _callt_save_21 \
+ _callt_save_22 \
+ _callt_save_23 \
+ _callt_save_24 \
+ _callt_save_25 \
+ _callt_save_26 \
+ _callt_save_27 \
+ _callt_save_28 \
+ _callt_save_29 \
+ _callt_save_20c \
+ _callt_save_21c \
+ _callt_save_22c \
+ _callt_save_23c \
+ _callt_save_24c \
+ _callt_save_25c \
+ _callt_save_26c \
+ _callt_save_27c \
+ _callt_save_28c \
+ _callt_save_29c \
+ _callt_save_31c \
+ _callt_save_varargs \
+ _callt_save_interrupt \
+ _callt_save_all_interrupt \
+ _callt_save_r2_r29 \
+ _callt_save_r2_r31 \
+ _callt_save_r6_r9
# We want fine grained libraries, so use the new code to build the
# floating point emulation libraries.
cat $(srcdir)/config/fp-bit.c >> fp-bit.c
TCFLAGS = -Wa,-mwarn-signed-overflow -Wa,-mwarn-unsigned-overflow
+# Create non-target specific versions of the libraries
+TCFLAGS += -mno-app-regs -msmall-sld -mv850 -D__v850e__ -Wa,-mv850any
v850-c.o: $(srcdir)/config/v850/v850-c.c $(RTL_H) $(TREE_H) $(CONFIG_H)
$(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
extern char * construct_save_jarl PARAMS ((rtx));
extern char * construct_restore_jr PARAMS ((rtx));
#ifdef HAVE_MACHINE_MODES
+extern char * construct_dispose_instruction PARAMS ((rtx));
+extern char * construct_prepare_instruction PARAMS ((rtx));
+extern int pattern_is_ok_for_prepare PARAMS ((rtx, Mmode));
+extern int pattern_is_ok_for_dispose PARAMS ((rtx, Mmode));
extern int ep_memory_operand PARAMS ((rtx, Mmode, int));
extern int reg_or_0_operand PARAMS ((rtx, Mmode));
extern int reg_or_int5_operand PARAMS ((rtx, Mmode));
}
}
}
+
+ /* Make sure that the US_BIT_SET mask has been correctly initialised. */
+ if ((target_flags & MASK_US_MASK_SET) == 0)
+ {
+ target_flags |= MASK_US_MASK_SET;
+ target_flags &= ~MASK_US_BIT_SET;
+ }
}
\f
else
size = GET_MODE_SIZE (mode);
+ if (size < 1)
+ return 0;
+
if (type)
align = TYPE_ALIGN (type) / BITS_PER_UNIT;
else
}
}
+/* When assemble_integer is used to emit the offsets for a switch
+ table it can encounter (TRUNCATE:HI (MINUS:SI (LABEL_REF:SI) (LABEL_REF:SI))).
+ output_addr_const will normally barf at this, but it is OK to omit
+ the truncate and just emit the difference of the two labels. The
+ .hword directive will automatically handle the truncation for us.
+
+ Returns 1 if rtx was handled, 0 otherwise. */
+
+int
+v850_output_addr_const_extra (file, x)
+ FILE * file;
+ rtx x;
+{
+ if (GET_CODE (x) != TRUNCATE)
+ return 0;
+
+ x = XEXP (x, 0);
+
+ /* We must also handle the case where the switch table was passed a
+ constant value and so has been collapsed. In this case the first
+ label will have been deleted. In such a case it is OK to emit
+ nothing, since the table will not be used.
+ (cf gcc.c-torture/compile/990801-1.c). */
+ if (GET_CODE (x) == MINUS
+ && GET_CODE (XEXP (x, 0)) == LABEL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == CODE_LABEL
+ && INSN_DELETED_P (XEXP (XEXP (x, 0), 0)))
+ return 1;
+
+ output_addr_const (file, x);
+ return 1;
+}
\f
/* Return appropriate code to load up a 1, 2, or 4 integer/floating
point value. */
{
HOST_WIDE_INT value = INTVAL (src);
- if (CONST_OK_FOR_J (value)) /* signed 5 bit immediate */
+ if (CONST_OK_FOR_J (value)) /* Signed 5 bit immediate. */
return "mov %1,%0";
- else if (CONST_OK_FOR_K (value)) /* signed 16 bit immediate */
+ else if (CONST_OK_FOR_K (value)) /* Signed 16 bit immediate. */
return "movea lo(%1),%.,%0";
- else if (CONST_OK_FOR_L (value)) /* upper 16 bits were set */
+ else if (CONST_OK_FOR_L (value)) /* Upper 16 bits were set. */
return "movhi hi(%1),%.,%0";
- else /* random constant */
+ /* A random constant. */
+ else if (TARGET_V850E)
+ return "mov %1,%0";
+ else
return "movhi hi(%1),%.,%0\n\tmovea lo(%1),%0,%0";
}
HOST_WIDE_INT high, low;
const_double_split (src, &high, &low);
- if (CONST_OK_FOR_J (high)) /* signed 5 bit immediate */
+
+ if (CONST_OK_FOR_J (high)) /* Signed 5 bit immediate. */
return "mov %F1,%0";
- else if (CONST_OK_FOR_K (high)) /* signed 16 bit immediate */
+ else if (CONST_OK_FOR_K (high)) /* Signed 16 bit immediate. */
return "movea lo(%F1),%.,%0";
- else if (CONST_OK_FOR_L (high)) /* upper 16 bits were set */
+ else if (CONST_OK_FOR_L (high)) /* Upper 16 bits were set. */
return "movhi hi(%F1),%.,%0";
- else /* random constant */
+ /* A random constant. */
+ else if (TARGET_V850E)
+ return "mov %F1,%0";
+
+ else
return "movhi hi(%F1),%.,%0\n\tmovea lo(%F1),%0,%0";
}
|| GET_CODE (src) == SYMBOL_REF
|| GET_CODE (src) == CONST)
{
- return "movhi hi(%1),%.,%0\n\tmovea lo(%1),%0,%0";
+ if (TARGET_V850E)
+ return "mov hilo(%1),%0";
+ else
+ return "movhi hi(%1),%.,%0\n\tmovea lo(%1),%0,%0";
}
else if (GET_CODE (src) == HIGH)
switch (mode)
{
case QImode:
- max_offset = (1 << 7);
+ if (TARGET_SMALL_SLD)
+ max_offset = (1 << 4);
+ else if (TARGET_V850E
+ && ( ( unsignedp && ! TARGET_US_BIT_SET)
+ || (! unsignedp && TARGET_US_BIT_SET)))
+ max_offset = (1 << 4);
+ else
+ max_offset = (1 << 7);
break;
case HImode:
- max_offset = (1 << 8);
+ if (TARGET_SMALL_SLD)
+ max_offset = (1 << 5);
+ else if (TARGET_V850E
+ && ( ( unsignedp && ! TARGET_US_BIT_SET)
+ || (! unsignedp && TARGET_US_BIT_SET)))
+ max_offset = (1 << 5);
+ else
+ max_offset = (1 << 8);
break;
case SImode:
return register_operand (op, mode);
}
+/* Return true if OP is either a register or a signed nine bit integer. */
+
+int
+reg_or_int9_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return CONST_OK_FOR_O (INTVAL (op));
+
+ return register_operand (op, mode);
+}
+
+/* Return true if OP is either a register or a const integer. */
+
+int
+reg_or_const_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return TRUE;
+
+ return register_operand (op, mode);
+}
+
/* Return true if OP is a valid call operand. */
int
else if (GET_CODE (SET_SRC (pattern)) == MEM)
p_mem = &SET_SRC (pattern);
+ else if (GET_CODE (SET_SRC (pattern)) == SIGN_EXTEND
+ && GET_CODE (XEXP (SET_SRC (pattern), 0)) == MEM)
+ p_mem = &XEXP (SET_SRC (pattern), 0);
+
+ else if (GET_CODE (SET_SRC (pattern)) == ZERO_EXTEND
+ && GET_CODE (XEXP (SET_SRC (pattern), 0)) == MEM)
+ {
+ p_mem = &XEXP (SET_SRC (pattern), 0);
+ unsignedp = TRUE;
+ }
else
p_mem = (rtx *)0;
else if (GET_CODE (src) == MEM)
mem = src;
+ else if (GET_CODE (src) == SIGN_EXTEND
+ && GET_CODE (XEXP (src, 0)) == MEM)
+ mem = XEXP (src, 0);
+
+ else if (GET_CODE (src) == ZERO_EXTEND
+ && GET_CODE (XEXP (src, 0)) == MEM)
+ {
+ mem = XEXP (src, 0);
+ unsignedp = TRUE;
+ }
else
mem = NULL_RTX;
/* Save/setup global registers for interrupt functions right now. */
if (interrupt_handler)
{
+ if (TARGET_V850E && ! TARGET_DISABLE_CALLT)
+ emit_insn (gen_callt_save_interrupt ());
+ else
emit_insn (gen_save_interrupt ());
-
+
actual_fsize -= INTERRUPT_FIXED_SAVE_SIZE;
if (((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
{
if (TARGET_PROLOG_FUNCTION)
{
- emit_insn (gen_save_r6_r9 ());
+ if (TARGET_V850E && ! TARGET_DISABLE_CALLT)
+ emit_insn (gen_save_r6_r9_v850e ());
+ else
+ emit_insn (gen_save_r6_r9 ());
}
else
{
/* Special case interrupt functions that save all registers for a call. */
if (interrupt_handler && ((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
{
- emit_insn (gen_save_all_interrupt ());
+ if (TARGET_V850E && ! TARGET_DISABLE_CALLT)
+ emit_insn (gen_callt_save_all_interrupt ());
+ else
+ emit_insn (gen_save_all_interrupt ());
}
else
{
for a call. */
if (interrupt_handler && ((1L << LINK_POINTER_REGNUM) & reg_saved) != 0)
{
- emit_insn (gen_restore_all_interrupt ());
+ if (TARGET_V850E && ! TARGET_DISABLE_CALLT)
+ emit_insn (gen_callt_restore_all_interrupt ());
+ else
+ emit_insn (gen_restore_all_interrupt ());
}
else
{
/* And return or use reti for interrupt handlers. */
if (interrupt_handler)
- emit_jump_insn (gen_restore_interrupt ());
+ {
+ if (TARGET_V850E && ! TARGET_DISABLE_CALLT)
+ emit_insn (gen_callt_return_interrupt ());
+ else
+ emit_jump_insn (gen_return_interrupt ());
+ }
else if (actual_fsize)
emit_jump_insn (gen_return_internal ());
else
rtx op;
enum machine_mode ATTRIBUTE_UNUSED mode;
{
- /* The save/restore routines can only cope with registers 2, and 20 - 31 */
- return (GET_CODE (op) == REG)
- && (((REGNO (op) >= 20) && REGNO (op) <= 31)
- || REGNO (op) == 2);
+ /* The save/restore routines can only cope with registers 20 - 31. */
+ return ((GET_CODE (op) == REG)
+ && (((REGNO (op) >= 20) && REGNO (op) <= 31)));
}
/* Return non-zero if the given RTX is suitable for collapsing into
}
}
}
+
+/* Return non-zero if the given RTX is suitable
+ for collapsing into a DISPOSE instruction. */
+
+int
+pattern_is_ok_for_dispose (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ int count = XVECLEN (op, 0);
+ int i;
+
+ /* If there are no registers to restore then
+ the dispose instruction is not suitable. */
+ if (count <= 2)
+ return 0;
+
+ /* The pattern matching has already established that we are performing a
+ function epilogue and that we are popping at least one register. We must
+ now check the remaining entries in the vector to make sure that they are
+ also register pops. There is no good reason why there should ever be
+ anything else in this vector, but being paranoid always helps...
+
+ The test below performs the C equivalent of this machine description
+ pattern match:
+
+ (set (match_operand:SI n "register_is_ok_for_epilogue" "r")
+ (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI n "immediate_operand" "i"))))
+ */
+
+ for (i = 3; i < count; i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+ rtx dest;
+ rtx src;
+ rtx plus;
+
+ if (GET_CODE (vector_element) != SET)
+ return 0;
+
+ dest = SET_DEST (vector_element);
+ src = SET_SRC (vector_element);
+
+ if ( GET_CODE (dest) != REG
+ || GET_MODE (dest) != SImode
+ || ! register_is_ok_for_epilogue (dest, SImode)
+ || GET_CODE (src) != MEM
+ || GET_MODE (src) != SImode)
+ return 0;
+
+ plus = XEXP (src, 0);
+
+ if ( GET_CODE (plus) != PLUS
+ || GET_CODE (XEXP (plus, 0)) != REG
+ || GET_MODE (XEXP (plus, 0)) != SImode
+ || REGNO (XEXP (plus, 0)) != STACK_POINTER_REGNUM
+ || GET_CODE (XEXP (plus, 1)) != CONST_INT)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Construct a DISPOSE instruction that is the equivalent of
+ the given RTX. We have already verified that this should
+ be possible. */
+
+char *
+construct_dispose_instruction (op)
+ rtx op;
+{
+ int count = XVECLEN (op, 0);
+ int stack_bytes;
+ unsigned long int mask;
+ int i;
+ static char buff[ 100 ]; /* XXX */
+ int use_callt = 0;
+
+ if (count <= 2)
+ {
+ error ("Bogus DISPOSE construction: %d\n", count);
+ return NULL;
+ }
+
+ /* Work out how many bytes to pop off the
+ stack before retrieving registers. */
+ if (GET_CODE (XVECEXP (op, 0, 1)) != SET)
+ abort ();
+ if (GET_CODE (SET_SRC (XVECEXP (op, 0, 1))) != PLUS)
+ abort ();
+ if (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1)) != CONST_INT)
+ abort ();
+
+ stack_bytes = INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1));
+
+ /* Each pop will remove 4 bytes from the stack... */
+ stack_bytes -= (count - 2) * 4;
+
+ /* Make sure that the amount we are popping
+ will fit into the DISPOSE instruction. */
+ if (stack_bytes > 128)
+ {
+ error ("Too much stack space to dispose of: %d", stack_bytes);
+ return NULL;
+ }
+
+ /* Now compute the bit mask of registers to push. */
+ mask = 0;
+
+ for (i = 2; i < count; i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+
+ if (GET_CODE (vector_element) != SET)
+ abort ();
+ if (GET_CODE (SET_DEST (vector_element)) != REG)
+ abort ();
+ if (! register_is_ok_for_epilogue (SET_DEST (vector_element), SImode))
+ abort ();
+
+ if (REGNO (SET_DEST (vector_element)) == 2)
+ use_callt = 1;
+ else
+ mask |= 1 << REGNO (SET_DEST (vector_element));
+ }
+
+ if (! TARGET_DISABLE_CALLT
+ && (use_callt || stack_bytes == 0 || stack_bytes == 16))
+ {
+ if (use_callt)
+ {
+ sprintf (buff, "callt ctoff(__callt_return_r2_r%d)", (mask & (1 << 31)) ? 31 : 29);
+ return buff;
+ }
+ else
+ {
+ for (i = 20; i < 32; i++)
+ if (mask & (1 << i))
+ break;
+
+ if (i == 31)
+ sprintf (buff, "callt ctoff(__callt_return_r31c)");
+ else
+ sprintf (buff, "callt ctoff(__callt_return_r%d_r%d%s)",
+ i, (mask & (1 << 31)) ? 31 : 29, stack_bytes ? "c" : "");
+ }
+ }
+ else
+ {
+ static char regs [100]; /* XXX */
+ int done_one;
+
+ /* Generate the DISPOSE instruction. Note we could just issue the
+ bit mask as a number as the assembler can cope with this, but for
+ the sake of our readers we turn it into a textual description. */
+ regs[0] = 0;
+ done_one = 0;
+
+ for (i = 20; i < 32; i++)
+ {
+ if (mask & (1 << i))
+ {
+ int first;
+
+ if (done_one)
+ strcat (regs, ", ");
+ else
+ done_one = 1;
+
+ first = i;
+ strcat (regs, reg_names[ first ]);
+
+ for (i++; i < 32; i++)
+ if ((mask & (1 << i)) == 0)
+ break;
+
+ if (i > first + 1)
+ {
+ strcat (regs, " - ");
+ strcat (regs, reg_names[ i - 1 ] );
+ }
+ }
+ }
+
+ sprintf (buff, "dispose %d {%s}, r31", stack_bytes / 4, regs);
+ }
+
+ return buff;
+}
+
+/* Return non-zero if the given RTX is suitable
+ for collapsing into a PREPARE instruction. */
+
+int
+pattern_is_ok_for_prepare (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ int count = XVECLEN (op, 0);
+ int i;
+
+ /* If there are no registers to restore then the prepare instruction
+ is not suitable. */
+ if (count <= 1)
+ return 0;
+
+ /* The pattern matching has already established that we are adjusting the
+ stack and pushing at least one register. We must now check that the
+ remaining entries in the vector to make sure that they are also register
+ pushes.
+
+ The test below performs the C equivalent of this machine description
+ pattern match:
+
+ (set (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI 2 "immediate_operand" "i")))
+ (match_operand:SI 3 "register_is_ok_for_epilogue" "r"))
+
+ */
+
+ for (i = 2; i < count; i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+ rtx dest;
+ rtx src;
+ rtx plus;
+
+ if (GET_CODE (vector_element) != SET)
+ return 0;
+
+ dest = SET_DEST (vector_element);
+ src = SET_SRC (vector_element);
+
+ if ( GET_CODE (dest) != MEM
+ || GET_MODE (dest) != SImode
+ || GET_CODE (src) != REG
+ || GET_MODE (src) != SImode
+ || ! register_is_ok_for_epilogue (src, SImode)
+ )
+ return 0;
+
+ plus = XEXP (dest, 0);
+
+ if ( GET_CODE (plus) != PLUS
+ || GET_CODE (XEXP (plus, 0)) != REG
+ || GET_MODE (XEXP (plus, 0)) != SImode
+ || REGNO (XEXP (plus, 0)) != STACK_POINTER_REGNUM
+ || GET_CODE (XEXP (plus, 1)) != CONST_INT)
+ return 0;
+
+ /* If the register is being pushed somewhere other than the stack
+ space just aquired by the first operand then abandon this quest.
+ Note: the test is <= becuase both values are negative. */
+ if (INTVAL (XEXP (plus, 1))
+ <= INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1)))
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Construct a PREPARE instruction that is the equivalent of
+ the given RTL. We have already verified that this should
+ be possible. */
+
+char *
+construct_prepare_instruction (op)
+ rtx op;
+{
+ int count = XVECLEN (op, 0);
+ int stack_bytes;
+ unsigned long int mask;
+ int i;
+ static char buff[ 100 ]; /* XXX */
+ int use_callt = 0;
+
+ if (count <= 1)
+ {
+ error ("Bogus PREPEARE construction: %d\n", count);
+ return NULL;
+ }
+
+ /* Work out how many bytes to push onto
+ the stack after storing the registers. */
+ if (GET_CODE (XVECEXP (op, 0, 0)) != SET)
+ abort ();
+ if (GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != PLUS)
+ abort ();
+ if (GET_CODE (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1)) != CONST_INT)
+ abort ();
+
+ stack_bytes = INTVAL (XEXP (SET_SRC (XVECEXP (op, 0, 0)), 1));
+
+ /* Each push will put 4 bytes from the stack. */
+ stack_bytes += (count - 1) * 4;
+
+ /* Make sure that the amount we are popping
+ will fit into the DISPOSE instruction. */
+ if (stack_bytes < -128)
+ {
+ error ("Too much stack space to prepare: %d", stack_bytes);
+ return NULL;
+ }
+
+ /* Now compute the bit mask of registers to push. */
+ mask = 0;
+ for (i = 1; i < count; i++)
+ {
+ rtx vector_element = XVECEXP (op, 0, i);
+
+ if (GET_CODE (vector_element) != SET)
+ abort ();
+ if (GET_CODE (SET_SRC (vector_element)) != REG)
+ abort ();
+ if (! register_is_ok_for_epilogue (SET_SRC (vector_element), SImode))
+ abort ();
+
+ if (REGNO (SET_SRC (vector_element)) == 2)
+ use_callt = 1;
+ else
+ mask |= 1 << REGNO (SET_SRC (vector_element));
+ }
+
+ if ((! TARGET_DISABLE_CALLT)
+ && (use_callt || stack_bytes == 0 || stack_bytes == -16))
+ {
+ if (use_callt)
+ {
+ sprintf (buff, "callt ctoff(__callt_save_r2_r%d)", (mask & (1 << 31)) ? 31 : 29 );
+ return buff;
+ }
+
+ for (i = 20; i < 32; i++)
+ if (mask & (1 << i))
+ break;
+
+ if (i == 31)
+ sprintf (buff, "callt ctoff(__callt_save_r31c)");
+ else
+ sprintf (buff, "callt ctoff(__callt_save_r%d_r%d%s)",
+ i, (mask & (1 << 31)) ? 31 : 29, stack_bytes ? "c" : "");
+ }
+ else
+ {
+ static char regs [100]; /* XXX */
+ int done_one;
+
+
+ /* Generate the PREPARE instruction. Note we could just issue the
+ bit mask as a number as the assembler can cope with this, but for
+ the sake of our readers we turn it into a textual description. */
+ regs[0] = 0;
+ done_one = 0;
+
+ for (i = 20; i < 32; i++)
+ {
+ if (mask & (1 << i))
+ {
+ int first;
+
+ if (done_one)
+ strcat (regs, ", ");
+ else
+ done_one = 1;
+
+ first = i;
+ strcat (regs, reg_names[ first ]);
+
+ for (i++; i < 32; i++)
+ if ((mask & (1 << i)) == 0)
+ break;
+
+ if (i > first + 1)
+ {
+ strcat (regs, " - ");
+ strcat (regs, reg_names[ i - 1 ] );
+ }
+ }
+ }
+
+ sprintf (buff, "prepare {%s}, %d", regs, (- stack_bytes) / 4);
+ }
+
+ return buff;
+}
\f
/* Implement `va_arg'. */
#undef STARTFILE_SPEC
#undef ASM_SPEC
-
#define TARGET_CPU_generic 1
+#define TARGET_CPU_v850e 2
#ifndef TARGET_CPU_DEFAULT
#define TARGET_CPU_DEFAULT TARGET_CPU_generic
#define SUBTARGET_CPP_SPEC "%{!mv*:-D__v850__}"
#define TARGET_VERSION fprintf (stderr, " (NEC V850)");
+/* Choose which processor will be the default.
+ We must pass a -mv850xx option to the assembler if no explicit -mv* option
+ is given, because the assembler's processor default may not be correct. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v850e
+#undef MASK_DEFAULT
+#define MASK_DEFAULT MASK_V850E
+#undef SUBTARGET_ASM_SPEC
+#define SUBTARGET_ASM_SPEC "%{!mv*:-mv850e}"
+#undef SUBTARGET_CPP_SPEC
+#define SUBTARGET_CPP_SPEC "%{!mv*:-D__v850e__}"
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (NEC V850E)");
+#endif
#define ASM_SPEC "%{mv*:-mv%*}"
-#define CPP_SPEC "%{mv850ea:-D__v850ea__} %{mv850e:-D__v850e__} %{mv850:-D__v850__} %(subtarget_cpp_spec)"
+#define CPP_SPEC "%{mv850e:-D__v850e__} %{mv850:-D__v850__} %(subtarget_cpp_spec)"
#define EXTRA_SPECS \
{ "subtarget_asm_spec", SUBTARGET_ASM_SPEC }, \
#define MASK_CPU 0x00000030
#define MASK_V850 0x00000010
+#define MASK_V850E 0x00000020
+#define MASK_SMALL_SLD 0x00000040
#define MASK_BIG_SWITCH 0x00000100
+#define MASK_NO_APP_REGS 0x00000200
+#define MASK_DISABLE_CALLT 0x00000400
+
+#define MASK_US_BIT_SET 0x00001000
+#define MASK_US_MASK_SET 0x00002000
/* Macros used in the machine description to test the flags. */
/* Whether to emit 2 byte per entry or 4 byte per entry switch tables. */
#define TARGET_BIG_SWITCH (target_flags & MASK_BIG_SWITCH)
-/* General debug flag */
-#define TARGET_DEBUG (target_flags & MASK_DEBUG)
+/* General debug flag. */
+#define TARGET_DEBUG (target_flags & MASK_DEBUG)
+#define TARGET_V850E ((target_flags & MASK_V850E) == MASK_V850E)
+
+#define TARGET_US_BIT_SET (target_flags & MASK_US_BIT_SET)
+
+/* Whether to assume that the SLD.B and SLD.H instructions only have small
+ displacement fields, thus allowing the generated code to run on any of
+ the V850 range of processors. */
+#define TARGET_SMALL_SLD (target_flags & MASK_SMALL_SLD)
+
+/* True if callt will not be used for function prolog & epilog. */
+#define TARGET_DISABLE_CALLT (target_flags & MASK_DISABLE_CALLT)
+
+/* False if r2 and r5 can be used by the compiler. True if r2
+ and r5 are to be fixed registers (for compatibility with GHS). */
+#define TARGET_NO_APP_REGS (target_flags & MASK_NO_APP_REGS)
/* Macro to define tables used to set the flags.
This is a list in braces of pairs in braces,
{ "v850", MASK_V850, \
N_("Compile for the v850 processor") }, \
{ "v850", -(MASK_V850 ^ MASK_CPU), "" }, \
+ { "v850e", MASK_V850E, N_("Compile for v850e processor") }, \
+ { "v850e", -(MASK_V850E ^ MASK_CPU), "" }, /* Make sure that the other bits are cleared. */ \
+ { "small-sld", MASK_SMALL_SLD, N_("Enable the use of the short load instructions") }, \
+ { "no-small-sld", -MASK_SMALL_SLD, "" }, \
+ { "disable-callt", MASK_DISABLE_CALLT, \
+ N_("Do not use the callt instruction") }, \
+ { "no-disable-callt", -MASK_DISABLE_CALLT, "" }, \
+ { "US-bit-set", (MASK_US_BIT_SET | MASK_US_MASK_SET), "" }, \
+ { "no-US-bit-set", -MASK_US_BIT_SET, "" }, \
+ { "no-US-bit-set", MASK_US_MASK_SET, "" }, \
+ { "app-regs", -MASK_NO_APP_REGS, "" }, \
+ { "no-app-regs", MASK_NO_APP_REGS, \
+ N_("Do not use registers r2 and r5") }, \
{ "big-switch", MASK_BIG_SWITCH, \
N_("Use 4 byte entries in switch tables") },\
{ "", MASK_DEFAULT, ""}}
0, 1, 3, 4, 5, 30, 32, 33 /* fixed registers */ \
}
+/* If TARGET_NO_APP_REGS is not defined then add r2 and r5 to
+ the pool of fixed registers. See PR 14505. */
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ if (TARGET_NO_APP_REGS) \
+ { \
+ fixed_regs[2] = 1; call_used_regs[2] = 1; \
+ fixed_regs[5] = 1; call_used_regs[5] = 1; \
+ } \
+}
+
/* Return number of consecutive hard regs needed starting at reg REGNO
to hold something of mode MODE.
#define CONST_OK_FOR_M(VALUE) ((unsigned)(VALUE) < 0x10000)
/* 5 bit unsigned immediate in shift instructions */
#define CONST_OK_FOR_N(VALUE) ((unsigned) (VALUE) <= 31)
+/* 9 bit signed immediate for word multiply instruction. */
+#define CONST_OK_FOR_O(VALUE) ((unsigned) (VALUE) + 0x100 < 0x200)
-#define CONST_OK_FOR_O(VALUE) 0
#define CONST_OK_FOR_P(VALUE) 0
-
#define CONST_OK_FOR_LETTER_P(VALUE, C) \
((C) == 'I' ? CONST_OK_FOR_I (VALUE) : \
(C) == 'J' ? CONST_OK_FOR_J (VALUE) : \
((C) == 'Q' ? ep_memory_operand (OP, GET_MODE (OP), 0) \
: (C) == 'R' ? special_symbolref_operand (OP, VOIDmode) \
: (C) == 'S' ? (GET_CODE (OP) == SYMBOL_REF && ! ZDA_NAME_P (XSTR (OP, 0))) \
- : (C) == 'T' ? 0 \
+ : (C) == 'T' ? ep_memory_operand(OP,GET_MODE(OP),TRUE) \
: (C) == 'U' ? ((GET_CODE (OP) == SYMBOL_REF && ZDA_NAME_P (XSTR (OP, 0))) \
|| (GET_CODE (OP) == CONST \
&& GET_CODE (XEXP (OP, 0)) == PLUS \
#undef USER_LABEL_PREFIX
#define USER_LABEL_PREFIX "_"
-/* When assemble_integer is used to emit the offsets for a switch
- table it can encounter (TRUNCATE:HI (MINUS:SI (LABEL_REF:SI) (LABEL_REF:SI))).
- output_addr_const will normally barf at this, but it is OK to omit
- the truncate and just emit the difference of the two labels. The
- .hword directive will automatically handle the truncation for us. */
-
-#define OUTPUT_ADDR_CONST_EXTRA(FILE, X, FAIL) \
- if (GET_CODE (x) == TRUNCATE) \
- output_addr_const (FILE, XEXP (X, 0)); \
- else \
- goto FAIL;
+#define OUTPUT_ADDR_CONST_EXTRA(FILE, X, FAIL) \
+ if (! v850_output_addr_const_extra (FILE, X)) \
+ goto FAIL
/* This says how to output the assembler to define a global
uninitialized but not common symbol. */
/* This is how to output an element of a case-vector that is relative. */
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
- fprintf (FILE, "\t%s .L%d-.L%d\n", \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t%s %s.L%d-.L%d%s\n", \
(TARGET_BIG_SWITCH ? ".long" : ".short"), \
- VALUE, REL)
+ (! TARGET_BIG_SWITCH && TARGET_V850E ? "(" : ""), \
+ VALUE, REL, \
+ (! TARGET_BIG_SWITCH && TARGET_V850E ? ")>>1" : ""))
#define ASM_OUTPUT_ALIGN(FILE, LOG) \
if ((LOG) != 0) \
{ "pattern_is_ok_for_prologue", { PARALLEL }}, \
{ "pattern_is_ok_for_epilogue", { PARALLEL }}, \
{ "register_is_ok_for_epilogue",{ REG }}, \
+{ "pattern_is_ok_for_dispose", { PARALLEL }}, \
+{ "pattern_is_ok_for_prepare", { PARALLEL }}, \
+{ "register_is_ok_for_dispose", { REG }}, \
{ "not_power_of_two_operand", { CONST_INT }},
#endif /* ! GCC_V850_H */
;; GCC machine description for NEC V850
-;; Copyright (C) 1996, 1997, 1998, 1999 Free Software Foundation, Inc.
+;; Copyright (C) 1996, 1997, 1998, 1999, 2002 Free Software Foundation, Inc.
;; Contributed by Jeff Law (law@cygnus.com).
;; This file is part of GNU CC.
must be done with HIGH & LO_SUM patterns. */
if (CONSTANT_P (operands[1])
&& GET_CODE (operands[1]) != HIGH
+ && ! TARGET_V850E
&& !special_symbolref_operand (operands[1], VOIDmode)
&& !(GET_CODE (operands[1]) == CONST_INT
&& (CONST_OK_FOR_J (INTVAL (operands[1]))
}
}")
+;; This is the same as the following pattern, except that it includes
+;; support for arbitrary 32 bit immediates.
+
+;; ??? This always loads addresses using hilo. If the only use of this address
+;; was in a load/store, then we would get smaller code if we only loaded the
+;; upper part with hi, and then put the lower part in the load/store insn.
+
+(define_insn "*movsi_internal_v850e"
+ [(set (match_operand:SI 0 "general_operand" "=r,r,r,r,Q,r,r,m,m,r")
+ (match_operand:SI 1 "general_operand" "Jr,K,L,Q,Ir,m,R,r,I,i"))]
+ "TARGET_V850E
+ && (register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
+ "* return output_move_single (operands);"
+ [(set_attr "length" "2,4,4,2,2,4,4,4,4,6")
+ (set_attr "cc" "none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit,none_0hit")
+ (set_attr "type" "other,other,other,load,other,load,other,other,other,other")])
+
(define_insn "*movsi_internal"
[(set (match_operand:SI 0 "general_operand" "=r,r,r,r,Q,r,r,m,m")
(match_operand:SI 1 "movsi_source_operand" "Jr,K,L,Q,Ir,m,R,r,I"))]
(set_attr "cc" "none_0hit,none_0hit")
(set_attr "type" "mult")])
+;; ??? The scheduling info is probably wrong.
+
+;; ??? This instruction can also generate the 32 bit highpart, but using it
+;; may increase code size counter to the desired result.
+
+;; ??? This instructions can also give a DImode result.
+
+;; ??? There is unsigned version, but it matters only for the DImode/highpart
+;; results.
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0")
+ ;; %redact changeone /reg_or_int9/ 'register' unless nec-no-copyright
+ (match_operand:SI 2 "reg_or_int9_operand" "rO")))]
+ "TARGET_V850E"
+ "mul %2,%1,%."
+ [(set_attr "length" "4")
+ (set_attr "cc" "none_0hit")
+ (set_attr "type" "mult")])
+
+;; ----------------------------------------------------------------------
+;; DIVIDE INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+;; ??? These insns do set the Z/N condition codes, except that they are based
+;; on only one of the two results, so it doesn't seem to make sense to use
+;; them.
+
+;; ??? The scheduling info is probably wrong.
+
+(define_insn "divmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))
+ (set (match_operand:SI 3 "register_operand" "=r")
+ (mod:SI (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_V850E"
+ "div %2,%0,%3"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "other")])
+
+(define_insn "udivmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "register_operand" "r")))
+ (set (match_operand:SI 3 "register_operand" "=r")
+ (umod:SI (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_V850E"
+ "divu %2,%0,%3"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "other")])
+
+;; ??? There is a 2 byte instruction for generating only the quotient.
+;; However, it isn't clear how to compute the length field correctly.
+
+(define_insn "divmodhi4"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (div:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "register_operand" "r")))
+ (set (match_operand:HI 3 "register_operand" "=r")
+ (mod:HI (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_V850E"
+ "divh %2,%0,%3"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "other")])
+
+;; Half-words are sign-extended by default, so we must zero extend to a word
+;; here before doing the divide.
+
+(define_insn "udivmodhi4"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (udiv:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:HI 2 "register_operand" "r")))
+ (set (match_operand:HI 3 "register_operand" "=r")
+ (umod:HI (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_V850E"
+ "zxh %0 ; divhu %2,%0,%3"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")
+ (set_attr "type" "other")])
\f
;; ----------------------------------------------------------------------
;; AND INSTRUCTIONS
[(set_attr "length" "4")
(set_attr "cc" "none_0hit")])
+;; ----------------------------------------------------------------------
+;; CONDITIONAL MOVE INSTRUCTIONS
+;; ----------------------------------------------------------------------
+
+;; Instructions using cc0 aren't allowed to have input reloads, so we must
+;; hide the fact that this instruction uses cc0. We do so by including the
+;; compare instruction inside it.
+
+;; ??? This is very ugly. The right way to do this is to modify cmpsi so
+;; that it doesn't emit RTL, and then modify the bcc/scc patterns so that
+;; they emit RTL for the compare instruction. Unfortunately, this requires
+;; lots of changes that will be hard to sanitise. So for now, cmpsi still
+;; emits RTL, and I get the compare operands here from the previous insn.
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_dup 4) (match_dup 5)])
+ ;; %redact changeone /const/ 'int5' unless nec-no-copyright
+ (match_operand:SI 2 "reg_or_const_operand" "rJ")
+ ;; %redact changeone /const/ '0' unless nec-no-copyright
+ (match_operand:SI 3 "reg_or_const_operand" "rI")))]
+ "TARGET_V850E"
+ "
+{
+ rtx insn = get_last_insn_anywhere ();
+
+ if ( (GET_CODE (operands[2]) == CONST_INT
+ && GET_CODE (operands[3]) == CONST_INT))
+ {
+ int o2 = INTVAL (operands[2]);
+ int o3 = INTVAL (operands[3]);
+
+ if (o2 == 1 && o3 == 0)
+ FAIL; /* setf */
+ if (o3 == 1 && o2 == 0)
+ FAIL; /* setf */
+ if (o2 == 0 && (o3 < -16 || o3 > 15) && exact_log2 (o3) >= 0)
+ FAIL; /* setf + shift */
+ if (o3 == 0 && (o2 < -16 || o2 > 15) && exact_log2 (o2) >=0)
+ FAIL; /* setf + shift */
+ if (o2 != 0)
+ operands[2] = copy_to_mode_reg (SImode, operands[2]);
+ if (o3 !=0 )
+ operands[3] = copy_to_mode_reg (SImode, operands[3]);
+ }
+ else
+ {
+ if (GET_CODE (operands[2]) != REG)
+ operands[2] = copy_to_mode_reg (SImode,operands[2]);
+ if (GET_CODE (operands[3]) != REG)
+ operands[3] = copy_to_mode_reg (SImode, operands[3]);
+ }
+ if (GET_CODE (insn) == INSN
+ && GET_CODE (PATTERN (insn)) == SET
+ && SET_DEST (PATTERN (insn)) == cc0_rtx)
+ {
+ rtx src = SET_SRC (PATTERN (insn));
+
+ if (GET_CODE (src) == COMPARE)
+ {
+ operands[4] = XEXP (src, 0);
+ operands[5] = XEXP (src, 1);
+ }
+ else if (GET_CODE (src) == REG
+ || GET_CODE (src) == SUBREG)
+ {
+ operands[4] = src;
+ operands[5] = const0_rtx;
+ }
+ else
+ abort ();
+ }
+ else
+ abort ();
+}")
+
+;; ??? Clobbering the condition codes is overkill.
+
+;; ??? We sometimes emit an unnecessary compare instruction because the
+;; condition codes may have already been set by an earlier instruction,
+;; but we have no code here to avoid the compare if it is unnecessary.
+
+(define_insn "*movsicc_normal"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 4 "register_operand" "r")
+ (match_operand:SI 5 "reg_or_int5_operand" "rJ")])
+ (match_operand:SI 2 "reg_or_int5_operand" "rJ")
+ (match_operand:SI 3 "reg_or_0_operand" "rI")))]
+ "TARGET_V850E"
+ "cmp %5,%4 ; cmov %c1,%2,%z3,%0"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*movsicc_reversed"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 4 "register_operand" "r")
+ (match_operand:SI 5 "reg_or_int5_operand" "rJ")])
+ (match_operand:SI 2 "reg_or_0_operand" "rI")
+ (match_operand:SI 3 "reg_or_int5_operand" "rJ")))]
+ "TARGET_V850E"
+ "cmp %5,%4 ; cmov %C1,%3,%z2,%0"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*movsicc_tst1"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(zero_extract:SI
+ (match_operand:QI 2 "memory_operand" "m")
+ (const_int 1)
+ (match_operand 3 "const_int_operand" "n"))
+ (const_int 0)])
+ (match_operand:SI 4 "reg_or_int5_operand" "rJ")
+ (match_operand:SI 5 "reg_or_0_operand" "rI")))]
+ "TARGET_V850E"
+ "tst1 %3,%2 ; cmov %c1,%4,%z5,%0"
+ [(set_attr "length" "8")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*movsicc_tst1_reversed"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(zero_extract:SI
+ (match_operand:QI 2 "memory_operand" "m")
+ (const_int 1)
+ (match_operand 3 "const_int_operand" "n"))
+ (const_int 0)])
+ (match_operand:SI 4 "reg_or_0_operand" "rI")
+ (match_operand:SI 5 "reg_or_int5_operand" "rJ")))]
+ "TARGET_V850E"
+ "tst1 %3,%2 ; cmov %C1,%5,%z4,%0"
+ [(set_attr "length" "8")
+ (set_attr "cc" "clobber")])
+
+;; Matching for sasf requires combining 4 instructions, so we provide a
+;; dummy pattern to match the first 3, which will always be turned into the
+;; second pattern by subsequent combining. As above, we must include the
+;; comparison to avoid input reloads in an insn using cc0.
+
+(define_insn "*sasf_1"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operator 1 "comparison_operator" [(cc0) (const_int 0)])
+ (ashift:SI (match_operand:SI 2 "register_operand" "")
+ (const_int 1))))]
+ "TARGET_V850E"
+ "* abort ();")
+
+(define_insn "*sasf_2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ior:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 3 "register_operand" "r")
+ (match_operand:SI 4 "reg_or_int5_operand" "rJ")])
+ (ashift:SI (match_operand:SI 2 "register_operand" "0")
+ (const_int 1))))]
+ "TARGET_V850E"
+ "cmp %4,%3 ; sasf %c1,%0"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:SI 4 "register_operand" "")
+ (match_operand:SI 5 "reg_or_int5_operand" "")])
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))]
+ "TARGET_V850E
+ && ((INTVAL (operands[2]) ^ INTVAL (operands[3])) == 1)
+ && ((INTVAL (operands[2]) + INTVAL (operands[3])) != 1)
+ && (GET_CODE (operands[5]) == CONST_INT
+ || REGNO (operands[0]) != REGNO (operands[5]))
+ && REGNO (operands[0]) != REGNO (operands[4])"
+ [(set (match_dup 0) (match_dup 6))
+ (set (match_dup 0)
+ (ior:SI (match_op_dup 7 [(match_dup 4) (match_dup 5)])
+ (ashift:SI (match_dup 0) (const_int 1))))]
+ "
+{
+ operands[6] = GEN_INT (INTVAL (operands[2]) >> 1);
+ if (INTVAL (operands[2]) & 0x1)
+ operands[7] = operands[1];
+ else
+ operands[7] = gen_rtx (reverse_condition (GET_CODE (operands[1])),
+ GET_MODE (operands[1]), XEXP (operands[1], 0),
+ XEXP (operands[1], 1));
+}")
+;; ---------------------------------------------------------------------
+;; BYTE SWAP INSTRUCTIONS
+;; ---------------------------------------------------------------------
+
+(define_expand "rotlhi3"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (rotate:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:HI 2 "const_int_operand" "")))]
+ "TARGET_V850E"
+ "
+{
+ if (INTVAL (operands[2]) != 8)
+ FAIL;
+}")
+
+(define_insn "*rotlhi3_8"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (rotate:HI (match_operand:HI 1 "register_operand" "r")
+ (const_int 8)))]
+ "TARGET_V850E"
+ "bsh %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+(define_expand "rotlsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (rotate:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "TARGET_V850E"
+ "
+{
+ if (INTVAL (operands[2]) != 16)
+ FAIL;
+}")
+
+(define_insn "*rotlsi3_16"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 16)))]
+ "TARGET_V850E"
+ "hsw %1,%0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
\f
;; ----------------------------------------------------------------------
;; JUMP INSTRUCTIONS
[(set_attr "length" "2")
(set_attr "cc" "none")])
+(define_insn "switch"
+ [(set (pc)
+ (plus:SI
+ (sign_extend:SI
+ (mem:HI
+ (plus:SI (ashift:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 1))
+ (label_ref (match_operand 1 "" "")))))
+ (label_ref (match_dup 1))))]
+ "TARGET_V850E"
+ "switch %0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
(define_expand "casesi"
[(match_operand:SI 0 "register_operand" "")
(match_operand:SI 1 "register_operand" "")
/* Branch to the default label if out of range of the table. */
emit_jump_insn (gen_bgtu (operands[4]));
+ if (! TARGET_BIG_SWITCH && TARGET_V850E)
+ {
+ emit_jump_insn (gen_switch (reg, operands[3]));
+ DONE;
+ }
+
/* Shift index for the table array access. */
emit_insn (gen_ashlsi3 (reg, reg, GEN_INT (TARGET_BIG_SWITCH ? 2 : 1)));
/* Load the table address into a pseudo. */
;; EXTEND INSTRUCTIONS
;; ----------------------------------------------------------------------
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (zero_extend:SI
+ (match_operand:HI 1 "nonimmediate_operand" "0,r,T,m")))]
+ "TARGET_V850E"
+ "@
+ zxh %0
+ andi 65535,%1,%0
+ sld.hu %1,%0
+ ld.hu %1,%0"
+ [(set_attr "length" "2,4,2,4")
+ (set_attr "cc" "none_0hit,set_znv,none_0hit,none_0hit")])
(define_insn "zero_extendhisi2"
[(set (match_operand:SI 0 "register_operand" "=r")
[(set_attr "length" "4")
(set_attr "cc" "set_znv")])
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (zero_extend:SI
+ (match_operand:QI 1 "nonimmediate_operand" "0,r,T,m")))]
+ "TARGET_V850E"
+ "@
+ zxb %0
+ andi 255,%1,%0
+ sld.bu %1,%0
+ ld.bu %1,%0"
+ [(set_attr "length" "2,4,2,4")
+ (set_attr "cc" "none_0hit,set_znv,none_0hit,none_0hit")])
(define_insn "zero_extendqisi2"
[(set (match_operand:SI 0 "register_operand" "=r")
;;- sign extension instructions
+;; ??? The extendhisi2 pattern should not emit shifts for v850e?
+
+(define_insn "*extendhisi_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,Q,m")))]
+ "TARGET_V850E"
+ "@
+ sxh %0
+ sld.h %1,%0
+ ld.h %1,%0"
+ [(set_attr "length" "2,2,4")
+ (set_attr "cc" "none_0hit,none_0hit,none_0hit")])
;; ??? This is missing a sign extend from memory pattern to match the ld.h
;; instruction.
operands[2] = gen_reg_rtx (SImode);
}")
+;; ??? The extendqisi2 pattern should not emit shifts for v850e?
+
+(define_insn "*extendqisi_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,Q,m")))]
+ "TARGET_V850E"
+ "@
+ sxb %0
+ sld.b %1,%0
+ ld.b %1,%0"
+ [(set_attr "length" "2,2,4")
+ (set_attr "cc" "none_0hit,none_0hit,none_0hit")])
;; ??? This is missing a sign extend from memory pattern to match the ld.b
;; instruction.
;; RTXs. These RTXs will then be turned into a suitable call to a worker
;; function.
+;;
+;; Actually, convert the RTXs into a PREPARE instruction.
+;;
+(define_insn ""
+ [(match_parallel 0 "pattern_is_ok_for_prepare"
+ [(set (reg:SI 3)
+ (plus:SI (reg:SI 3) (match_operand:SI 1 "immediate_operand" "i")))
+ (set (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI 2 "immediate_operand" "i")))
+ (match_operand:SI 3 "register_is_ok_for_epilogue" "r"))])]
+ "TARGET_PROLOG_FUNCTION && TARGET_V850E"
+ "* return construct_prepare_instruction (operands[0]);
+ "
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")])
(define_insn ""
[(match_parallel 0 "pattern_is_ok_for_prologue"
(const_string "4")))
(set_attr "cc" "clobber")])
+;;
+;; Actually, turn the RTXs into a DISPOSE instruction.
+;;
+(define_insn ""
+ [(match_parallel 0 "pattern_is_ok_for_dispose"
+ [(return)
+ (set (reg:SI 3)
+ (plus:SI (reg:SI 3) (match_operand:SI 1 "immediate_operand" "i")))
+ (set (match_operand:SI 2 "register_is_ok_for_epilogue" "=r")
+ (mem:SI (plus:SI (reg:SI 3)
+ (match_operand:SI 3 "immediate_operand" "i"))))])]
+ "TARGET_PROLOG_FUNCTION && TARGET_V850E"
+ "* return construct_dispose_instruction (operands[0]);
+ "
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")])
+
;; This pattern will match a return RTX followed by any number of pop RTXs
;; and possible a stack adjustment as well. These RTXs will be turned into
;; a suitable call to a worker function.
(set_attr "cc" "clobber")])
;; Initialize an interrupt function. Do not depend on TARGET_PROLOG_FUNCTION.
+(define_insn "callt_save_interrupt"
+ [(unspec_volatile [(const_int 0)] 2)]
+ "TARGET_V850E && !TARGET_DISABLE_CALLT"
+ ;; The CALLT instruction stores the next address of CALLT to CTPC register
+ ;; without saving its previous value. So if the interrupt handler
+ ;; or its caller could possibily execute the CALLT insn, save_interrupt
+ ;; MUST NOT be called via CALLT.
+ "*
+{
+ output_asm_insn (\"addi -24, sp, sp\", operands);
+ output_asm_insn (\"st.w r10, 12[sp]\", operands);
+ output_asm_insn (\"stsr ctpc, r10\", operands);
+ output_asm_insn (\"st.w r10, 16[sp]\", operands);
+ output_asm_insn (\"stsr ctpsw, r10\", operands);
+ output_asm_insn (\"st.w r10, 20[sp]\", operands);
+ output_asm_insn (\"callt ctoff(__callt_save_interrupt)\", operands);
+ return \"\";
+}"
+ [(set_attr "length" "26")
+ (set_attr "cc" "none")])
+
+(define_insn "callt_return_interrupt"
+ [(unspec_volatile [(const_int 0)] 3)]
+ "TARGET_V850E && !TARGET_DISABLE_CALLT"
+ "callt ctoff(__callt_return_interrupt)"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")])
+
(define_insn "save_interrupt"
[(set (reg:SI 3) (plus:SI (reg:SI 3) (const_int -16)))
- (set (mem:SI (reg:SI 3)) (reg:SI 30))
- (set (mem:SI (plus:SI (reg:SI 3) (const_int -4))) (reg:SI 10))
- (set (mem:SI (plus:SI (reg:SI 3) (const_int -8))) (reg:SI 4))
- (set (mem:SI (plus:SI (reg:SI 3) (const_int -12))) (reg:SI 1))]
- "TARGET_V850 && ! TARGET_LONG_CALLS"
- "add -16, sp ; st.w r10, 12[sp] ; jarl __save_interrupt, r10"
- [(set_attr "length" "12")
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int -16))) (reg:SI 30))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int -12))) (reg:SI 4))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int -8))) (reg:SI 1))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int -4))) (reg:SI 10))]
+ ""
+ "*
+{
+ if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
+ return \"add -16,sp\;st.w r10,12[sp]\;jarl __save_interrupt,r10\";
+ else
+ {
+ output_asm_insn (\"add -16, sp\", operands);
+ output_asm_insn (\"st.w r10, 12[sp]\", operands);
+ output_asm_insn (\"st.w ep, 0[sp]\", operands);
+ output_asm_insn (\"st.w gp, 4[sp]\", operands);
+ output_asm_insn (\"st.w r1, 8[sp]\", operands);
+ output_asm_insn (\"movhi hi(__ep), r0, ep\", operands);
+ output_asm_insn (\"movea lo(__ep), ep, ep\", operands);
+ output_asm_insn (\"movhi hi(__gp), r0, gp\", operands);
+ output_asm_insn (\"movea lo(__gp), gp, gp\", operands);
+ return \"\";
+ }
+}"
+ [(set (attr "length")
+ (if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
+ (const_int 10)
+ (const_int 34)))
(set_attr "cc" "clobber")])
-
+
;; Restore r1, r4, r10, and return from the interrupt
-(define_insn "restore_interrupt"
+(define_insn "return_interrupt"
[(return)
- (set (reg:SI 3) (plus:SI (reg:SI 3) (const_int 16)))
- (set (reg:SI 30) (mem:SI (plus:SI (reg:SI 3) (const_int 12))))
- (set (reg:SI 10) (mem:SI (plus:SI (reg:SI 3) (const_int 8))))
- (set (reg:SI 4) (mem:SI (plus:SI (reg:SI 3) (const_int 4))))
- (set (reg:SI 1) (mem:SI (reg:SI 3)))]
+ (set (reg:SI 3) (plus:SI (reg:SI 3) (const_int 16)))
+ (set (reg:SI 10) (mem:SI (plus:SI (reg:SI 3) (const_int 12))))
+ (set (reg:SI 1) (mem:SI (plus:SI (reg:SI 3) (const_int 8))))
+ (set (reg:SI 4) (mem:SI (plus:SI (reg:SI 3) (const_int 4))))
+ (set (reg:SI 30) (mem:SI (reg:SI 3)))]
""
- "jr __return_interrupt"
- [(set_attr "length" "4")
+ "*
+{
+ if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
+ return \"jr __return_interrupt\";
+ else
+ {
+ output_asm_insn (\"ld.w 0[sp], ep\", operands);
+ output_asm_insn (\"ld.w 4[sp], gp\", operands);
+ output_asm_insn (\"ld.w 8[sp], r1\", operands);
+ output_asm_insn (\"ld.w 12[sp], r10\", operands);
+ output_asm_insn (\"addi 16, sp, sp\", operands);
+ output_asm_insn (\"reti\", operands);
+ return \"\";
+ }
+}"
+ [(set (attr "length")
+ (if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
+ (const_int 4)
+ (const_int 24)))
(set_attr "cc" "clobber")])
-
;; Save all registers except for the registers saved in save_interrupt when
;; an interrupt function makes a call.
;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
;; This is needed because the rest of the compiler is not ready to handle
;; insns this complicated.
+(define_insn "callt_save_all_interrupt"
+ [(unspec_volatile [(const_int 0)] 0)]
+ "TARGET_V850E && !TARGET_DISABLE_CALLT"
+ "callt ctoff(__callt_save_all_interrupt)"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
(define_insn "save_all_interrupt"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ "*
+{
+ if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
+ return \"jarl __save_all_interrupt,r10\";
+
+ output_asm_insn (\"addi -120, sp, sp\", operands);
+ output_asm_insn (\"mov ep, r1\", operands);
+ output_asm_insn (\"mov sp, ep\", operands);
+ output_asm_insn (\"sst.w r31, 116[ep]\", operands);
+ output_asm_insn (\"sst.w r2, 112[ep]\", operands);
+ output_asm_insn (\"sst.w gp, 108[ep]\", operands);
+ output_asm_insn (\"sst.w r6, 104[ep]\", operands);
+ output_asm_insn (\"sst.w r7, 100[ep]\", operands);
+ output_asm_insn (\"sst.w r8, 96[ep]\", operands);
+ output_asm_insn (\"sst.w r9, 92[ep]\", operands);
+ output_asm_insn (\"sst.w r11, 88[ep]\", operands);
+ output_asm_insn (\"sst.w r12, 84[ep]\", operands);
+ output_asm_insn (\"sst.w r13, 80[ep]\", operands);
+ output_asm_insn (\"sst.w r14, 76[ep]\", operands);
+ output_asm_insn (\"sst.w r15, 72[ep]\", operands);
+ output_asm_insn (\"sst.w r16, 68[ep]\", operands);
+ output_asm_insn (\"sst.w r17, 64[ep]\", operands);
+ output_asm_insn (\"sst.w r18, 60[ep]\", operands);
+ output_asm_insn (\"sst.w r19, 56[ep]\", operands);
+ output_asm_insn (\"sst.w r20, 52[ep]\", operands);
+ output_asm_insn (\"sst.w r21, 48[ep]\", operands);
+ output_asm_insn (\"sst.w r22, 44[ep]\", operands);
+ output_asm_insn (\"sst.w r23, 40[ep]\", operands);
+ output_asm_insn (\"sst.w r24, 36[ep]\", operands);
+ output_asm_insn (\"sst.w r25, 32[ep]\", operands);
+ output_asm_insn (\"sst.w r26, 28[ep]\", operands);
+ output_asm_insn (\"sst.w r27, 24[ep]\", operands);
+ output_asm_insn (\"sst.w r28, 20[ep]\", operands);
+ output_asm_insn (\"sst.w r29, 16[ep]\", operands);
+ output_asm_insn (\"mov r1, ep\", operands);
+ return \"\";
+}"
+ [(set (attr "length")
+ (if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
+ (const_int 4)
+ (const_int 62)
+ ))
+ (set_attr "cc" "clobber")])
+
+(define_insn "_save_all_interrupt"
[(unspec_volatile [(const_int 0)] 0)]
"TARGET_V850 && ! TARGET_LONG_CALLS"
"jarl __save_all_interrupt,r10"
[(set_attr "length" "4")
(set_attr "cc" "clobber")])
-
;; Restore all registers saved when an interrupt function makes a call.
;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
;; all of memory. This blocks insns from being moved across this point.
;; This is needed because the rest of the compiler is not ready to handle
;; insns this complicated.
+(define_insn "callt_restore_all_interrupt"
+ [(unspec_volatile [(const_int 0)] 1)]
+ "TARGET_V850E && !TARGET_DISABLE_CALLT"
+ "callt ctoff(__callt_restore_all_interrupt)"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
(define_insn "restore_all_interrupt"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "*
+{
+ if (TARGET_PROLOG_FUNCTION && !TARGET_LONG_CALLS)
+ return \"jarl __restore_all_interrupt,r10\";
+ else
+ {
+ output_asm_insn (\"mov ep, r1\", operands);
+ output_asm_insn (\"mov sp, ep\", operands);
+ output_asm_insn (\"sld.w 116[ep], r31\", operands);
+ output_asm_insn (\"sld.w 112[ep], r2\", operands);
+ output_asm_insn (\"sld.w 108[ep], gp\", operands);
+ output_asm_insn (\"sld.w 104[ep], r6\", operands);
+ output_asm_insn (\"sld.w 100[ep], r7\", operands);
+ output_asm_insn (\"sld.w 96[ep], r8\", operands);
+ output_asm_insn (\"sld.w 92[ep], r9\", operands);
+ output_asm_insn (\"sld.w 88[ep], r11\", operands);
+ output_asm_insn (\"sld.w 84[ep], r12\", operands);
+ output_asm_insn (\"sld.w 80[ep], r13\", operands);
+ output_asm_insn (\"sld.w 76[ep], r14\", operands);
+ output_asm_insn (\"sld.w 72[ep], r15\", operands);
+ output_asm_insn (\"sld.w 68[ep], r16\", operands);
+ output_asm_insn (\"sld.w 64[ep], r17\", operands);
+ output_asm_insn (\"sld.w 60[ep], r18\", operands);
+ output_asm_insn (\"sld.w 56[ep], r19\", operands);
+ output_asm_insn (\"sld.w 52[ep], r20\", operands);
+ output_asm_insn (\"sld.w 48[ep], r21\", operands);
+ output_asm_insn (\"sld.w 44[ep], r22\", operands);
+ output_asm_insn (\"sld.w 40[ep], r23\", operands);
+ output_asm_insn (\"sld.w 36[ep], r24\", operands);
+ output_asm_insn (\"sld.w 32[ep], r25\", operands);
+ output_asm_insn (\"sld.w 28[ep], r26\", operands);
+ output_asm_insn (\"sld.w 24[ep], r27\", operands);
+ output_asm_insn (\"sld.w 20[ep], r28\", operands);
+ output_asm_insn (\"sld.w 16[ep], r29\", operands);
+ output_asm_insn (\"mov r1, ep\", operands);
+ output_asm_insn (\"addi 120, sp, sp\", operands);
+ return \"\";
+ }
+}"
+ [(set (attr "length")
+ (if_then_else (ne (symbol_ref "TARGET_LONG_CALLS") (const_int 0))
+ (const_int 4)
+ (const_int 62)
+ ))
+ (set_attr "cc" "clobber")])
+
+(define_insn "_restore_all_interrupt"
[(unspec_volatile [(const_int 0)] 1)]
"TARGET_V850 && ! TARGET_LONG_CALLS"
"jarl __restore_all_interrupt,r10"
(set_attr "cc" "clobber")])
;; Save r6-r9 for a variable argument function
+(define_insn "save_r6_r9_v850e"
+ [(set (mem:SI (reg:SI 3)) (reg:SI 6))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int 4))) (reg:SI 7))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int 8))) (reg:SI 8))
+ (set (mem:SI (plus:SI (reg:SI 3) (const_int 12))) (reg:SI 9))
+ ]
+ "TARGET_PROLOG_FUNCTION && TARGET_V850E && !TARGET_DISABLE_CALLT"
+ "callt ctoff(__callt_save_r6_r9)"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
(define_insn "save_r6_r9"
[(set (mem:SI (reg:SI 3)) (reg:SI 6))
(set (mem:SI (plus:SI (reg:SI 3) (const_int 4))) (reg:SI 7))
-mlong-calls -mno-long-calls -mep -mno-ep @gol
-mprolog-function -mno-prolog-function -mspace @gol
-mtda=@var{n} -msda=@var{n} -mzda=@var{n} @gol
+-mapp-regs -mno-app-regs @gol
+-mdisable-callt -mno-disable-callt @gol
+-mv850e @gol
-mv850 -mbig-switch}
@emph{NS32K Options}
Generate code suitable for big switch tables. Use this option only if
the assembler/linker complain about out of range branches within a switch
table.
+
+@item -mapp-regs
+@opindex -mapp-regs
+This option will cause r2 and r5 to be used in the code generated by
+the compiler. This setting is the default.
+
+@item -mno-app-regs
+@opindex -mno-app-regs
+This option will cause r2 and r5 to be treated as fixed registers.
+
+@item -mv850e
+@opindex -mv850e
+Specify that the target processor is the V850E. The preprocessor
+constant @samp{__v850e__} will be defined if this option is used.
+
+If neither @option{-mv850} nor @option{-mv850e} are defined
+then a default target processor will be chosen and the relevant
+@samp{__v850*__} preprocessor constant will be defined.
+
+The preprocessor constants @samp{__v850} and @samp{__v851__} are always
+defined, regardless of which processor variant is the target.
+
+@item -mdisable-callt
+@opindex -mdisable-callt
+This option will suppress generation of the CALLT instruction for the
+v850e flavors of the v850 architecture. The default is
+@option{-mno-disable-callt} which allows the CALLT instruction to be used.
+
@end table
@node ARC Options