(define_insn_reservation "x_lr" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "lr"))
- "x-e1-st,x-wr-st")
+ "x-e1-st,x-wr-st")
-(define_insn_reservation "x_la" 1
+(define_insn_reservation "x_la" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "la"))
- "x-e1-st,x-wr-st")
+ "x-e1-st,x-wr-st")
-(define_insn_reservation "x_larl" 1
+(define_insn_reservation "x_larl" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "larl"))
- "x-e1-st,x-wr-st")
+ "x-e1-st,x-wr-st")
-(define_insn_reservation "x_load" 1
+(define_insn_reservation "x_load" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "load"))
- "x-e1-st+x-mem,x-wr-st")
+ "x-e1-st+x-mem,x-wr-st")
-(define_insn_reservation "x_store" 1
+(define_insn_reservation "x_store" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "store"))
- "x-e1-st+x_store_tok,x-wr-st")
+ "x-e1-st+x_store_tok,x-wr-st")
-(define_insn_reservation "x_branch" 1
+(define_insn_reservation "x_branch" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "branch"))
- "x_e1_r,x_wr_r")
+ "x_e1_r,x_wr_r")
-(define_insn_reservation "x_call" 5
+(define_insn_reservation "x_call" 5
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "jsr"))
"x-e1-np*5,x-wr-np")
-
+
(define_insn_reservation "x_mul_hi" 2
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "imulhi"))
(eq_attr "type" "idiv"))
"x-e1-np*10,x-wr-np")
-(define_insn_reservation "x_sem" 17
+(define_insn_reservation "x_sem" 17
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "sem"))
- "x-e1-np+x-mem,x-e1-np*16,x-wr-st")
+ "x-e1-np+x-mem,x-e1-np*16,x-wr-st")
;;
;; Multicycle insns
;;
-(define_insn_reservation "x_cs" 1
+(define_insn_reservation "x_cs" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "cs"))
- "x-e1-np,x-wr-np")
+ "x-e1-np,x-wr-np")
-(define_insn_reservation "x_vs" 1
+(define_insn_reservation "x_vs" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "vs"))
- "x-e1-np*10,x-wr-np")
+ "x-e1-np*10,x-wr-np")
-(define_insn_reservation "x_stm" 1
+(define_insn_reservation "x_stm" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "stm"))
- "(x-e1-np+x_store_tok)*10,x-wr-np")
+ "(x-e1-np+x_store_tok)*10,x-wr-np")
-(define_insn_reservation "x_lm" 1
+(define_insn_reservation "x_lm" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "lm"))
- "x-e1-np*10,x-wr-np")
+ "x-e1-np*10,x-wr-np")
-(define_insn_reservation "x_other" 1
+(define_insn_reservation "x_other" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "other"))
- "x-e1-np,x-wr-np")
+ "x-e1-np,x-wr-np")
;;
;; Floating point insns
;;
-(define_insn_reservation "x_fsimptf" 7
+(define_insn_reservation "x_fsimptf" 7
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "fsimptf"))
- "x_e1_t*2,x-wr-fp")
+ "x_e1_t*2,x-wr-fp")
-(define_insn_reservation "x_fsimpdf" 6
+(define_insn_reservation "x_fsimpdf" 6
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "fsimpdf,fmuldf"))
- "x_e1_t,x-wr-fp")
+ "x_e1_t,x-wr-fp")
-(define_insn_reservation "x_fsimpsf" 6
+(define_insn_reservation "x_fsimpsf" 6
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "fsimpsf,fmulsf"))
- "x_e1_t,x-wr-fp")
+ "x_e1_t,x-wr-fp")
(define_insn_reservation "x_fmultf" 33
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "fmultf"))
- "x_e1_t*27,x-wr-fp")
+ "x_e1_t*27,x-wr-fp")
(define_insn_reservation "x_fdivtf" 82
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "fdivtf,fsqrttf"))
- "x_e1_t*76,x-wr-fp")
+ "x_e1_t*76,x-wr-fp")
(define_insn_reservation "x_fdivdf" 36
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "fdivdf,fsqrtdf"))
- "x_e1_t*30,x-wr-fp")
+ "x_e1_t*30,x-wr-fp")
-(define_insn_reservation "x_fdivsf" 36
+(define_insn_reservation "x_fdivsf" 36
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "fdivsf,fsqrtsf"))
- "x_e1_t*30,x-wr-fp")
+ "x_e1_t*30,x-wr-fp")
-(define_insn_reservation "x_floadtf" 6
+(define_insn_reservation "x_floadtf" 6
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "floadtf"))
- "x_e1_t,x-wr-fp")
+ "x_e1_t,x-wr-fp")
-(define_insn_reservation "x_floaddf" 6
+(define_insn_reservation "x_floaddf" 6
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "floaddf"))
- "x_e1_t,x-wr-fp")
+ "x_e1_t,x-wr-fp")
-(define_insn_reservation "x_floadsf" 6
+(define_insn_reservation "x_floadsf" 6
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "floadsf"))
- "x_e1_t,x-wr-fp")
+ "x_e1_t,x-wr-fp")
-(define_insn_reservation "x_fstoredf" 1
+(define_insn_reservation "x_fstoredf" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "fstoredf"))
- "x_e1_t,x-wr-fp")
+ "x_e1_t,x-wr-fp")
-(define_insn_reservation "x_fstoresf" 1
+(define_insn_reservation "x_fstoresf" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "fstoresf"))
- "x_e1_t,x-wr-fp")
+ "x_e1_t,x-wr-fp")
(define_insn_reservation "x_ftrunctf" 16
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "ftrunctf"))
- "x_e1_t*10,x-wr-fp")
+ "x_e1_t*10,x-wr-fp")
(define_insn_reservation "x_ftruncdf" 11
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "ftruncdf"))
- "x_e1_t*5,x-wr-fp")
+ "x_e1_t*5,x-wr-fp")
-(define_insn_reservation "x_ftoi" 1
+(define_insn_reservation "x_ftoi" 1
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "ftoi"))
- "x_e1_t*3,x-wr-fp")
+ "x_e1_t*3,x-wr-fp")
-(define_insn_reservation "x_itof" 7
+(define_insn_reservation "x_itof" 7
(and (eq_attr "cpu" "z990,z9_109")
(eq_attr "type" "itoftf,itofdf,itofsf"))
- "x_e1_t*3,x-wr-fp")
+ "x_e1_t*3,x-wr-fp")
(define_bypass 1 "x_fsimpdf" "x_fstoredf")
(define_bypass 1 "x_fsimpsf" "x_fstoresf")
(define_bypass 1 "x_floaddf" "x_fsimpdf,x_fstoredf,x_floaddf")
-
+
(define_bypass 1 "x_floadsf" "x_fsimpsf,x_fstoresf,x_floadsf")
;;
-;; s390_agen_dep_p returns 1, if a register is set in the
+;; s390_agen_dep_p returns 1, if a register is set in the
;; first insn and used in the dependent insn to form a address.
;;
;;
;; If an instruction uses a register to address memory, it needs
;; to be set 5 cycles in advance.
-;;
+;;
-(define_bypass 5 "x_int,x_agen,x_lr"
+(define_bypass 5 "x_int,x_agen,x_lr"
"x_agen,x_la,x_branch,x_call,x_load,x_store,x_cs,x_stm,x_lm,x_other"
"s390_agen_dep_p")
-(define_bypass 9 "x_int,x_agen,x_lr"
+(define_bypass 9 "x_int,x_agen,x_lr"
"x_floadtf, x_floaddf, x_floadsf, x_fstoredf, x_fstoresf,\
x_fsimpdf, x_fsimpsf, x_fdivdf, x_fdivsf"
"s390_agen_dep_p")
;;
-;; A load type instruction uses a bypass to feed the result back
-;; to the address generation pipeline stage.
+;; A load type instruction uses a bypass to feed the result back
+;; to the address generation pipeline stage.
;;
-(define_bypass 4 "x_load"
+(define_bypass 4 "x_load"
"x_agen,x_la,x_branch,x_call,x_load,x_store,x_cs,x_stm,x_lm,x_other"
"s390_agen_dep_p")
"s390_agen_dep_p")
;;
-;; A load address type instruction uses a bypass to feed the
-;; result back to the address generation pipeline stage.
+;; A load address type instruction uses a bypass to feed the
+;; result back to the address generation pipeline stage.
;;
-(define_bypass 3 "x_larl,x_la"
+(define_bypass 3 "x_larl,x_la"
"x_agen,x_la,x_branch,x_call,x_load,x_store,x_cs,x_stm,x_lm,x_other"
"s390_agen_dep_p")
/* Define the specific costs for a given cpu. */
-struct processor_costs
+struct processor_costs
{
/* multiplication */
const int m; /* cost of an M instruction. */
const struct processor_costs *s390_cost;
static const
-struct processor_costs z900_cost =
+struct processor_costs z900_cost =
{
COSTS_N_INSNS (5), /* M */
COSTS_N_INSNS (10), /* MGHI */
};
static const
-struct processor_costs z990_cost =
+struct processor_costs z990_cost =
{
COSTS_N_INSNS (4), /* M */
COSTS_N_INSNS (2), /* MGHI */
};
static const
-struct processor_costs z9_109_cost =
+struct processor_costs z9_109_cost =
{
COSTS_N_INSNS (4), /* M */
COSTS_N_INSNS (2), /* MGHI */
HOST_WIDE_INT s390_stack_size = 0;
HOST_WIDE_INT s390_stack_guard = 0;
-/* The following structure is embedded in the machine
+/* The following structure is embedded in the machine
specific part of struct function. */
struct GTY (()) s390_frame_layout
int last_save_gpr;
int last_restore_gpr;
- /* Bits standing for floating point registers. Set, if the
- respective register has to be saved. Starting with reg 16 (f0)
+ /* Bits standing for floating point registers. Set, if the
+ respective register has to be saved. Starting with reg 16 (f0)
at the rightmost bit.
Bit 15 - 8 7 6 5 4 3 2 1 0
fpr 15 - 8 7 5 3 1 6 4 2 0
case CCZ1mode:
if (m2 == CCZmode)
return m1;
-
+
return VOIDmode;
default:
if (INTVAL (op2) == 0)
return CCTmode;
- /* Selected bits all one: CC3.
+ /* Selected bits all one: CC3.
e.g.: int a; if ((a & (16 + 128)) == 16 + 128) */
if (INTVAL (op2) == INTVAL (op1))
return CCT3mode;
case GT:
/* The only overflow condition of NEG and ABS happens when
-INT_MAX is used as parameter, which stays negative. So
- we have an overflow from a positive value to a negative.
+ we have an overflow from a positive value to a negative.
Using CCAP mode the resulting cc can be used for comparisons. */
if ((GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
&& GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT)
/* If constants are involved in an add instruction it is possible to use
the resulting cc for comparisons with zero. Knowing the sign of the
constant the overflow behavior gets predictable. e.g.:
- int a, b; if ((b = a + c) > 0)
+ int a, b; if ((b = a + c) > 0)
with c as a constant value: c < 0 -> CCAN and c >= 0 -> CCAP */
if (GET_CODE (op0) == PLUS && GET_CODE (XEXP (op0, 1)) == CONST_INT
&& CONST_OK_FOR_K (INTVAL (XEXP (op0, 1))))
&& GET_CODE (*op1) == CONST_INT
&& INTVAL (*op1) == 0xffff
&& SCALAR_INT_MODE_P (GET_MODE (*op0))
- && (nonzero_bits (*op0, GET_MODE (*op0))
+ && (nonzero_bits (*op0, GET_MODE (*op0))
& ~(unsigned HOST_WIDE_INT) 0xffff) == 0)
{
*op0 = gen_lowpart (HImode, *op0);
emit_insn (gen_rtx_SET (VOIDmode, cc, gen_rtx_COMPARE (mode, op0, op1)));
}
- return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
+ return gen_rtx_fmt_ee (code, VOIDmode, cc, const0_rtx);
}
/* Emit a SImode compare and swap instruction setting MEM to NEW_RTX if OLD
/* This overlapping check is used by peepholes merging memory block operations.
Overlapping operations would otherwise be recognized by the S/390 hardware
- and would fall back to a slower implementation. Allowing overlapping
+ and would fall back to a slower implementation. Allowing overlapping
operations would lead to slow code but not to wrong code. Therefore we are
- somewhat optimistic if we cannot prove that the memory blocks are
+ somewhat optimistic if we cannot prove that the memory blocks are
overlapping.
That's why we return false here although this may accept operations on
overlapping memory areas. */
error ("stack size must not be greater than 64k");
}
else if (s390_stack_guard)
- error ("-mstack-guard implies use of -mstack-size");
+ error ("-mstack-guard implies use of -mstack-size");
#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
if (!(target_flags_explicit & MASK_LONG_DOUBLE_128))
{
case UNSPEC_LTREF:
if (!disp)
- disp = gen_rtx_UNSPEC (Pmode,
+ disp = gen_rtx_UNSPEC (Pmode,
gen_rtvec (1, XVECEXP (base, 0, 0)),
UNSPEC_LTREL_OFFSET);
else
return false;
}
- if (!REG_P (base)
- || (GET_MODE (base) != SImode
+ if (!REG_P (base)
+ || (GET_MODE (base) != SImode
&& GET_MODE (base) != Pmode))
return false;
{
case UNSPEC_LTREF:
if (!disp)
- disp = gen_rtx_UNSPEC (Pmode,
+ disp = gen_rtx_UNSPEC (Pmode,
gen_rtvec (1, XVECEXP (indx, 0, 0)),
UNSPEC_LTREL_OFFSET);
else
return false;
}
- if (!REG_P (indx)
+ if (!REG_P (indx)
|| (GET_MODE (indx) != SImode
&& GET_MODE (indx) != Pmode))
return false;
/* Validate displacement. */
if (!disp)
{
- /* If virtual registers are involved, the displacement will change later
- anyway as the virtual registers get eliminated. This could make a
- valid displacement invalid, but it is more likely to make an invalid
- displacement valid, because we sometimes access the register save area
+ /* If virtual registers are involved, the displacement will change later
+ anyway as the virtual registers get eliminated. This could make a
+ valid displacement invalid, but it is more likely to make an invalid
+ displacement valid, because we sometimes access the register save area
via negative offsets to one of those registers.
Thus we don't check the displacement for validity here. If after
elimination the displacement turns out to be invalid after all,
this is fixed up by reload in any case. */
- if (base != arg_pointer_rtx
- && indx != arg_pointer_rtx
- && base != return_address_pointer_rtx
+ if (base != arg_pointer_rtx
+ && indx != arg_pointer_rtx
+ && base != return_address_pointer_rtx
&& indx != return_address_pointer_rtx
- && base != frame_pointer_rtx
+ && base != frame_pointer_rtx
&& indx != frame_pointer_rtx
- && base != virtual_stack_vars_rtx
+ && base != virtual_stack_vars_rtx
&& indx != virtual_stack_vars_rtx)
if (!DISP_IN_RANGE (offset))
return false;
/* Compute a (partial) cost for rtx X. Return true if the complete
cost has been computed, and false if subexpressions should be
- scanned. In either case, *TOTAL contains the cost result.
- CODE contains GET_CODE (x), OUTER_CODE contains the code
+ scanned. In either case, *TOTAL contains the cost result.
+ CODE contains GET_CODE (x), OUTER_CODE contains the code
of the superexpression of x. */
static bool
*total = COSTS_N_INSNS (1);
return false;
- case MULT:
+ case MULT:
switch (GET_MODE (x))
{
case SImode:
}
else
{
- if (ad.base
+ if (ad.base
&& !(REGNO (ad.base) >= FIRST_PSEUDO_REGISTER
|| REGNO_REG_CLASS (REGNO (ad.base)) == ADDR_REGS))
return false;
-
+
if (ad.indx
&& !(REGNO (ad.indx) >= FIRST_PSEUDO_REGISTER
|| REGNO_REG_CLASS (REGNO (ad.indx)) == ADDR_REGS))
gcc_unreachable ();
}
}
- else
+ else
gcc_assert (GET_CODE (addr) == PLUS);
}
if (GET_CODE (addr) == PLUS)
return x;
}
else if (GET_CODE (x) == PLUS
- && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
+ && (TLS_SYMBOLIC_CONST (XEXP (x, 0))
|| TLS_SYMBOLIC_CONST (XEXP (x, 1))))
{
return x;
MODE is the mode of the enclosing MEM. OPNUM is the operand number
and TYPE is the reload type of the current reload. */
-rtx
+rtx
legitimize_reload_address (rtx ad, enum machine_mode mode ATTRIBUTE_UNUSED,
int opnum, int type)
{
new_rtx = gen_rtx_PLUS (Pmode, tem, GEN_INT (lower));
push_reload (XEXP (tem, 1), 0, &XEXP (tem, 1), 0,
- BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
opnum, (enum reload_type) type);
return new_rtx;
}
return;
gcc_assert (GET_CODE (val) == CONST_INT || GET_MODE (val) == QImode);
-
+
if (GET_CODE (len) == CONST_INT && INTVAL (len) > 0 && INTVAL (len) <= 257)
{
if (val == const0_rtx && INTVAL (len) <= 256)
{
/* Initialize memory by storing the first byte. */
emit_move_insn (adjust_address (dst, QImode, 0), val);
-
+
if (INTVAL (len) > 1)
{
/* Initiate 1 byte overlap move.
rtx dstp1 = adjust_address (dst, VOIDmode, 1);
set_mem_size (dst, const1_rtx);
- emit_insn (gen_movmem_short (dstp1, dst,
+ emit_insn (gen_movmem_short (dstp1, dst,
GEN_INT (INTVAL (len) - 2)));
}
}
/* Initialize memory by storing the first byte. */
emit_move_insn (adjust_address (dst, QImode, 0), val);
-
+
/* If count is 1 we are done. */
emit_cmp_and_jump_insns (count, const1_rtx,
EQ, NULL_RTX, mode, 1, end_label);
}
p = rtvec_alloc (2);
- RTVEC_ELT (p, 0) =
+ RTVEC_ELT (p, 0) =
gen_rtx_SET (VOIDmode, dst, op_res);
- RTVEC_ELT (p, 1) =
+ RTVEC_ELT (p, 1) =
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
if (!register_operand (src, GET_MODE (dst)))
src = force_reg (GET_MODE (dst), src);
- op_res = gen_rtx_MINUS (GET_MODE (dst),
- gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
- gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
- gen_rtx_REG (cc_mode, CC_REGNUM),
+ op_res = gen_rtx_MINUS (GET_MODE (dst),
+ gen_rtx_MINUS (GET_MODE (dst), src, const0_rtx),
+ gen_rtx_fmt_ee (cmp_code, GET_MODE (dst),
+ gen_rtx_REG (cc_mode, CC_REGNUM),
const0_rtx));
p = rtvec_alloc (2);
- RTVEC_ELT (p, 0) =
+ RTVEC_ELT (p, 0) =
gen_rtx_SET (VOIDmode, dst, op_res);
- RTVEC_ELT (p, 1) =
+ RTVEC_ELT (p, 1) =
gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (CCmode, CC_REGNUM));
emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
set_mem_size (dest, GEN_INT (size));
s390_expand_movmem (dest, src_mem, GEN_INT (size));
}
-
+
/* (set (ze (mem)) (reg)). */
else if (register_operand (src, word_mode))
{
int stcmh_width = bitsize - GET_MODE_BITSIZE (SImode);
int size = stcmh_width / BITS_PER_UNIT;
- emit_move_insn (adjust_address (dest, SImode, size),
+ emit_move_insn (adjust_address (dest, SImode, size),
gen_lowpart (SImode, src));
set_mem_size (dest, GEN_INT (size));
emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest, GEN_INT
/* (set (ze (reg)) (const_int)). */
if (TARGET_ZARCH
- && register_operand (dest, word_mode)
+ && register_operand (dest, word_mode)
&& (bitpos % 16) == 0
&& (bitsize % 16) == 0
&& const_int_operand (src, VOIDmode))
putsize = GET_MODE_BITSIZE (putmode);
regpos -= putsize;
- emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
+ emit_move_insn (gen_rtx_ZERO_EXTRACT (word_mode, dest,
GEN_INT (putsize),
- GEN_INT (regpos)),
+ GEN_INT (regpos)),
gen_int_mode (val, putmode));
val >>= putsize;
}
{
val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
NULL_RTX, 1, OPTAB_DIRECT);
- return expand_simple_binop (SImode, ASHIFT, val, count,
+ return expand_simple_binop (SImode, ASHIFT, val, count,
NULL_RTX, 1, OPTAB_DIRECT);
}
/* Structure to hold the initial parameters for a compare_and_swap operation
- in HImode and QImode. */
+ in HImode and QImode. */
struct alignment_context
{
- rtx memsi; /* SI aligned memory location. */
+ rtx memsi; /* SI aligned memory location. */
rtx shift; /* Bit offset with regard to lsb. */
rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
rtx modemaski; /* ~modemask */
ac->shift = expand_simple_binop (SImode, MULT, ac->shift, GEN_INT (BITS_PER_UNIT),
NULL_RTX, 1, OPTAB_DIRECT);
/* Calculate masks. */
- ac->modemask = expand_simple_binop (SImode, ASHIFT,
+ ac->modemask = expand_simple_binop (SImode, ASHIFT,
GEN_INT (GET_MODE_MASK (mode)), ac->shift,
NULL_RTX, 1, OPTAB_DIRECT);
ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
/* Start CS loop. */
emit_label (csloop);
- /* val = "<mem>00..0<mem>"
+ /* val = "<mem>00..0<mem>"
* cmp = "00..0<cmp>00..0"
- * new = "00..0<new>00..0"
+ * new = "00..0<new>00..0"
*/
/* Patch cmp and new with val at correct position. */
cmpv, newv));
/* Check for changes outside mode. */
- resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
+ resv = expand_simple_binop (SImode, AND, res, ac.modemaski,
NULL_RTX, 1, OPTAB_DIRECT);
- cc = s390_emit_compare (NE, resv, val);
+ cc = s390_emit_compare (NE, resv, val);
emit_move_insn (val, resv);
/* Loop internal if so. */
s390_emit_jump (csloop, cc);
emit_label (csend);
-
+
/* Return the correct part of the bitfield. */
- convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
+ convert_move (target, expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
NULL_RTX, 1, OPTAB_DIRECT), 1);
}
val = expand_simple_binop (SImode, AND, val, ac.modemask,
NULL_RTX, 1, OPTAB_DIRECT);
/* FALLTHRU */
- case SET:
+ case SET:
if (ac.aligned && MEM_P (val))
store_bit_field (new_rtx, GET_MODE_BITSIZE (mode), 0, SImode, val);
else
}
-/* Find an annotated literal pool symbol referenced in RTX X,
- and store it at REF. Will abort if X contains references to
+/* Find an annotated literal pool symbol referenced in RTX X,
+ and store it at REF. Will abort if X contains references to
more than one such pool symbol; multiple references to the same
symbol are allowed, however.
if (*ref == NULL_RTX)
*ref = sym;
- else
+ else
gcc_assert (*ref == sym);
return;
}
}
-/* Replace every reference to the annotated literal pool
+/* Replace every reference to the annotated literal pool
symbol REF in X by its base plus OFFSET. */
static void
for (curr_pool = pool_list; curr_pool; curr_pool = curr_pool->next)
{
- rtx new_insn = gen_reload_base (cfun->machine->base_reg,
+ rtx new_insn = gen_reload_base (cfun->machine->base_reg,
curr_pool->label);
rtx insn = curr_pool->first_insn;
INSN_ADDRESSES_NEW (emit_insn_before (new_insn, insn), -1);
struct constant_pool *pool = s390_find_pool (pool_list, insn);
if (pool)
{
- rtx new_insn = gen_reload_base (cfun->machine->base_reg,
+ rtx new_insn = gen_reload_base (cfun->machine->base_reg,
pool->label);
INSN_ADDRESSES_NEW (emit_insn_after (new_insn, insn), -1);
}
}
-/* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
+/* Helper function for s390_regs_ever_clobbered. Sets the fields in DATA for all
clobbered hard regs in SETREG. */
static void
deal with this automatically. */
if (crtl->calls_eh_return || cfun->machine->has_landing_pad_p)
for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM ; i++)
- if (crtl->calls_eh_return
- || (cfun->machine->has_landing_pad_p
+ if (crtl->calls_eh_return
+ || (cfun->machine->has_landing_pad_p
&& df_regs_ever_live_p (EH_RETURN_DATA_REGNO (i))))
regs_ever_clobbered[EH_RETURN_DATA_REGNO (i)] = 1;
{
if (INSN_P (cur_insn))
note_stores (PATTERN (cur_insn),
- s390_reg_clobbered_rtx,
+ s390_reg_clobbered_rtx,
regs_ever_clobbered);
}
}
}
-/* Determine the frame area which actually has to be accessed
- in the function epilogue. The values are stored at the
+/* Determine the frame area which actually has to be accessed
+ in the function epilogue. The values are stored at the
given pointers AREA_BOTTOM (address of the lowest used stack
- address) and AREA_TOP (address of the first item which does
+ address) and AREA_TOP (address of the first item which does
not belong to the stack frame). */
static void
b = MIN (b, cfun_frame_layout.f4_offset + (i - 2) * 8);
t = MAX (t, cfun_frame_layout.f4_offset + (i - 1) * 8);
}
-
+
*area_bottom = b;
*area_top = t;
}
clobbered_regs[HARD_FRAME_POINTER_REGNUM] = 1;
if (flag_pic)
- clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
+ clobbered_regs[PIC_OFFSET_TABLE_REGNUM]
|= df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM);
- clobbered_regs[BASE_REGNUM]
+ clobbered_regs[BASE_REGNUM]
|= (cfun->machine->base_reg
&& REGNO (cfun->machine->base_reg) == BASE_REGNUM);
cfun_frame_layout.first_save_gpr_slot = i;
cfun_frame_layout.last_save_gpr_slot = j;
- for (i = cfun_frame_layout.first_save_gpr_slot;
- i < cfun_frame_layout.last_save_gpr_slot + 1;
+ for (i = cfun_frame_layout.first_save_gpr_slot;
+ i < cfun_frame_layout.last_save_gpr_slot + 1;
i++)
if (clobbered_regs[i])
break;
for (j = cfun_frame_layout.last_save_gpr_slot; j > i; j--)
if (clobbered_regs[j])
break;
-
+
if (i == cfun_frame_layout.last_save_gpr_slot + 1)
{
/* Nothing to save/restore. */
cfun_frame_layout.frame_size = get_frame_size ();
if (!TARGET_64BIT && cfun_frame_layout.frame_size > 0x7fff0000)
fatal_error ("total size of local variables exceeds architecture limit");
-
+
if (!TARGET_PACKED_STACK)
{
cfun_frame_layout.backchain_offset = 0;
{
cfun_frame_layout.backchain_offset = (STACK_POINTER_OFFSET
- UNITS_PER_WORD);
- cfun_frame_layout.gprs_offset
- = (cfun_frame_layout.backchain_offset
+ cfun_frame_layout.gprs_offset
+ = (cfun_frame_layout.backchain_offset
- (STACK_POINTER_REGNUM - cfun_frame_layout.first_save_gpr_slot + 1)
* UNITS_PER_WORD);
-
+
if (TARGET_64BIT)
{
- cfun_frame_layout.f4_offset
+ cfun_frame_layout.f4_offset
= (cfun_frame_layout.gprs_offset
- 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
-
- cfun_frame_layout.f0_offset
- = (cfun_frame_layout.f4_offset
+
+ cfun_frame_layout.f0_offset
+ = (cfun_frame_layout.f4_offset
- 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
}
else
{
/* On 31 bit we have to care about alignment of the
floating point regs to provide fastest access. */
- cfun_frame_layout.f0_offset
- = ((cfun_frame_layout.gprs_offset
+ cfun_frame_layout.f0_offset
+ = ((cfun_frame_layout.gprs_offset
& ~(STACK_BOUNDARY / BITS_PER_UNIT - 1))
- 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
-
- cfun_frame_layout.f4_offset
+
+ cfun_frame_layout.f4_offset
= (cfun_frame_layout.f0_offset
- 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
}
}
else /* no backchain */
{
- cfun_frame_layout.f4_offset
+ cfun_frame_layout.f4_offset
= (STACK_POINTER_OFFSET
- 8 * (cfun_fpr_bit_p (2) + cfun_fpr_bit_p (3)));
-
- cfun_frame_layout.f0_offset
+
+ cfun_frame_layout.f0_offset
= (cfun_frame_layout.f4_offset
- 8 * (cfun_fpr_bit_p (0) + cfun_fpr_bit_p (1)));
-
- cfun_frame_layout.gprs_offset
+
+ cfun_frame_layout.gprs_offset
= cfun_frame_layout.f0_offset - cfun_gprs_save_area_size;
}
if (TARGET_BACKCHAIN)
cfun_frame_layout.frame_size += UNITS_PER_WORD;
- /* No alignment trouble here because f8-f15 are only saved under
+ /* No alignment trouble here because f8-f15 are only saved under
64 bit. */
cfun_frame_layout.f8_offset = (MIN (MIN (cfun_frame_layout.f0_offset,
cfun_frame_layout.f4_offset),
for (i = 0; i < 8; i++)
if (cfun_fpr_bit_p (i))
cfun_frame_layout.frame_size += 8;
-
+
cfun_frame_layout.frame_size += cfun_gprs_save_area_size;
-
+
/* If under 31 bit an odd number of gprs has to be saved we have to adjust
the frame size to sustain 8 byte alignment of stack frames. */
cfun_frame_layout.frame_size = ((cfun_frame_layout.frame_size +
s390_register_info (clobbered_regs);
- df_set_regs_ever_live (BASE_REGNUM,
+ df_set_regs_ever_live (BASE_REGNUM,
clobbered_regs[BASE_REGNUM] ? true : false);
- df_set_regs_ever_live (RETURN_REGNUM,
+ df_set_regs_ever_live (RETURN_REGNUM,
clobbered_regs[RETURN_REGNUM] ? true : false);
- df_set_regs_ever_live (STACK_POINTER_REGNUM,
+ df_set_regs_ever_live (STACK_POINTER_REGNUM,
clobbered_regs[STACK_POINTER_REGNUM] ? true : false);
if (cfun->machine->base_reg)
case GENERAL_REGS:
if (REGNO_PAIR_OK (regno, mode))
{
- if (TARGET_64BIT
+ if (TARGET_64BIT
|| (mode != TFmode && mode != TCmode && mode != TDmode))
return true;
- }
+ }
break;
case CC_REGS:
if (GET_MODE_CLASS (mode) == MODE_CC)
default:
return false;
}
-
+
return false;
}
switch (from)
{
case FRAME_POINTER_REGNUM:
- offset = (get_frame_size()
+ offset = (get_frame_size()
+ STACK_POINTER_OFFSET
+ crtl->outgoing_args_size);
break;
for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
{
rtx mem = XEXP (XVECEXP (PATTERN (insn), 0, i), 0);
-
+
if (first + i <= 6)
set_mem_alias_set (mem, get_varargs_alias_set ());
}
/* Choose best register to use for temp use within prologue.
See below for why TPF must use the register 1. */
- if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
- && !current_function_is_leaf
+ if (!has_hard_reg_initial_val (Pmode, RETURN_REGNUM)
+ && !current_function_is_leaf
&& !TARGET_TPF_PROFILING)
temp_reg = gen_rtx_REG (Pmode, RETURN_REGNUM);
else
/* Save call saved gprs. */
if (cfun_frame_layout.first_save_gpr != -1)
{
- insn = save_gprs (stack_pointer_rtx,
- cfun_frame_layout.gprs_offset +
- UNITS_PER_WORD * (cfun_frame_layout.first_save_gpr
+ insn = save_gprs (stack_pointer_rtx,
+ cfun_frame_layout.gprs_offset +
+ UNITS_PER_WORD * (cfun_frame_layout.first_save_gpr
- cfun_frame_layout.first_save_gpr_slot),
- cfun_frame_layout.first_save_gpr,
+ cfun_frame_layout.first_save_gpr,
cfun_frame_layout.last_save_gpr);
emit_insn (insn);
}
if (cfun_fpr_bit_p (i))
{
insn = save_fpr (stack_pointer_rtx, offset, i + 16);
-
+
RTX_FRAME_RELATED_P (insn) = 1;
offset -= 8;
}
if (offset >= cfun_frame_layout.f8_offset)
next_fpr = i + 16;
}
-
+
if (!TARGET_PACKED_STACK)
next_fpr = cfun_save_high_fprs_p ? 31 : 0;
}
}
- if (s390_warn_framesize > 0
+ if (s390_warn_framesize > 0
&& cfun_frame_layout.frame_size >= s390_warn_framesize)
- warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
+ warning (0, "frame size of %qs is " HOST_WIDE_INT_PRINT_DEC " bytes",
current_function_name (), cfun_frame_layout.frame_size);
if (s390_warn_dynamicstack_p && cfun->calls_alloca)
if (DISP_IN_RANGE (INTVAL (frame_off)))
{
insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
- gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
frame_off));
insn = emit_insn (insn);
}
if (TARGET_BACKCHAIN)
{
if (cfun_frame_layout.backchain_offset)
- addr = gen_rtx_MEM (Pmode,
- plus_constant (stack_pointer_rtx,
+ addr = gen_rtx_MEM (Pmode,
+ plus_constant (stack_pointer_rtx,
cfun_frame_layout.backchain_offset));
else
- addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
+ addr = gen_rtx_MEM (Pmode, stack_pointer_rtx);
set_mem_alias_set (addr, get_frame_alias_set ());
insn = emit_insn (gen_move_insn (addr, temp_reg));
}
moved below the use of the stack slots. */
s390_emit_stack_tie ();
- insn = emit_insn (gen_add2_insn (temp_reg,
+ insn = emit_insn (gen_add2_insn (temp_reg,
GEN_INT (cfun_frame_layout.f8_offset)));
offset = 0;
cfun_frame_layout.frame_size
+ cfun_frame_layout.f8_offset
+ offset);
-
+
insn = save_fpr (temp_reg, offset, i);
offset += 8;
RTX_FRAME_RELATED_P (insn) = 1;
/* Check whether to use frame or stack pointer for restore. */
- frame_pointer = (frame_pointer_needed
+ frame_pointer = (frame_pointer_needed
? hard_frame_pointer_rtx : stack_pointer_rtx);
s390_frame_area (&area_bottom, &area_top);
}
}
}
-
+
}
else
{
else if (!TARGET_PACKED_STACK)
next_offset += 8;
}
-
+
}
/* Return register. */
if (global_regs[i])
{
addr = plus_constant (frame_pointer,
- offset + cfun_frame_layout.gprs_offset
+ offset + cfun_frame_layout.gprs_offset
+ (i - cfun_frame_layout.first_save_gpr_slot)
* UNITS_PER_WORD);
addr = gen_rtx_MEM (Pmode, addr);
addr = plus_constant (frame_pointer,
offset + cfun_frame_layout.gprs_offset
- + (RETURN_REGNUM
+ + (RETURN_REGNUM
- cfun_frame_layout.first_save_gpr_slot)
* UNITS_PER_WORD);
addr = gen_rtx_MEM (Pmode, addr);
insn = restore_gprs (frame_pointer,
offset + cfun_frame_layout.gprs_offset
- + (cfun_frame_layout.first_restore_gpr
+ + (cfun_frame_layout.first_restore_gpr
- cfun_frame_layout.first_save_gpr_slot)
* UNITS_PER_WORD,
cfun_frame_layout.first_restore_gpr,
t = make_tree (TREE_TYPE (sav), return_address_pointer_rtx);
t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
size_int (-RETURN_REGNUM * UNITS_PER_WORD));
-
+
t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
TREE_SIDE_EFFECTS (t) = 1;
expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
} */
static tree
-s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+s390_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
gimple_seq *post_p ATTRIBUTE_UNUSED)
{
tree f_gpr, f_fpr, f_ovf, f_sav;
t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
gimplify_and_add (t, pre_p);
- t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav,
size_int (sav_ofs));
- u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
+ u = build2 (MULT_EXPR, TREE_TYPE (reg), reg,
fold_convert (TREE_TYPE (reg), size_int (sav_scale)));
t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, fold_convert (sizetype, u));
t = ovf;
if (size < UNITS_PER_WORD)
- t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
size_int (UNITS_PER_WORD - size));
gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
gimplify_assign (addr, t, pre_p);
- t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t,
size_int (size));
gimplify_assign (ovf, t, pre_p);
{
*p1 = CC_REGNUM;
*p2 = INVALID_REGNUM;
-
+
return true;
}
/* If all special registers are in fact used, there's nothing we
can do, so no point in walking the insn list. */
- if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
+ if (cfun_frame_layout.first_save_gpr <= BASE_REGNUM
&& cfun_frame_layout.last_save_gpr >= BASE_REGNUM
- && (TARGET_CPU_ZARCH
- || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
+ && (TARGET_CPU_ZARCH
+ || (cfun_frame_layout.first_save_gpr <= RETURN_REGNUM
&& cfun_frame_layout.last_save_gpr >= RETURN_REGNUM)))
return;
if (cfun_frame_layout.first_save_gpr != -1)
{
- new_insn = save_gprs (base,
+ new_insn = save_gprs (base,
off + (cfun_frame_layout.first_save_gpr
- - first) * UNITS_PER_WORD,
+ - first) * UNITS_PER_WORD,
cfun_frame_layout.first_save_gpr,
cfun_frame_layout.last_save_gpr);
new_insn = emit_insn_before (new_insn, insn);
if (cfun_frame_layout.first_restore_gpr != -1)
{
- new_insn = restore_gprs (base,
+ new_insn = restore_gprs (base,
off + (cfun_frame_layout.first_restore_gpr
- - first) * UNITS_PER_WORD,
+ - first) * UNITS_PER_WORD,
cfun_frame_layout.first_restore_gpr,
cfun_frame_layout.last_restore_gpr);
new_insn = emit_insn_before (new_insn, insn);