/* BFM. */
if (speed)
*cost += extra_cost->alu.bfi;
- *cost += rtx_cost (op1, VOIDmode, (enum rtx_code) code, 1, speed);
+ *cost += rtx_cost (op1, VOIDmode, code, 1, speed);
}
return true;
*cost += extra_cost->alu.extend_arith;
op1 = aarch64_strip_extend (op1, true);
- *cost += rtx_cost (op1, VOIDmode,
- (enum rtx_code) GET_CODE (op1), 0, speed);
+ *cost += rtx_cost (op1, VOIDmode, GET_CODE (op1), 0, speed);
return true;
}
|| aarch64_shift_p (GET_CODE (new_op1)))
&& code != COMPARE)
{
- *cost += aarch64_rtx_mult_cost (new_op1, MULT,
- (enum rtx_code) code,
- speed);
+ *cost += aarch64_rtx_mult_cost (new_op1, MULT, code, speed);
return true;
}
*cost += extra_cost->alu.extend_arith;
op0 = aarch64_strip_extend (op0, true);
- *cost += rtx_cost (op0, VOIDmode,
- (enum rtx_code) GET_CODE (op0), 0, speed);
+ *cost += rtx_cost (op0, VOIDmode, GET_CODE (op0), 0, speed);
return true;
}
&& aarch64_mask_and_shift_for_ubfiz_p (int_mode, op1,
XEXP (op0, 1)))
{
- *cost += rtx_cost (XEXP (op0, 0), int_mode,
- (enum rtx_code) code, 0, speed);
+ *cost += rtx_cost (XEXP (op0, 0), int_mode, code, 0, speed);
if (speed)
*cost += extra_cost->alu.bfx;
{
/* We possibly get the immediate for free, this is not
modelled. */
- *cost += rtx_cost (op0, int_mode,
- (enum rtx_code) code, 0, speed);
+ *cost += rtx_cost (op0, int_mode, code, 0, speed);
if (speed)
*cost += extra_cost->alu.logical;
}
/* In both cases we want to cost both operands. */
- *cost += rtx_cost (new_op0, int_mode, (enum rtx_code) code,
- 0, speed);
- *cost += rtx_cost (op1, int_mode, (enum rtx_code) code,
- 1, speed);
+ *cost += rtx_cost (new_op0, int_mode, code, 0, speed);
+ *cost += rtx_cost (op1, int_mode, code, 1, speed);
return true;
}
/* MVN-shifted-reg. */
if (op0 != x)
{
- *cost += rtx_cost (op0, mode, (enum rtx_code) code, 0, speed);
+ *cost += rtx_cost (op0, mode, code, 0, speed);
if (speed)
*cost += extra_cost->alu.log_shift;
rtx newop1 = XEXP (op0, 1);
rtx op0_stripped = aarch64_strip_shift (newop0);
- *cost += rtx_cost (newop1, mode, (enum rtx_code) code, 1, speed);
+ *cost += rtx_cost (newop1, mode, code, 1, speed);
*cost += rtx_cost (op0_stripped, mode, XOR, 0, speed);
if (speed)
&& known_eq (INTVAL (XEXP (op1, 1)),
GET_MODE_BITSIZE (mode) - 1))
{
- *cost += rtx_cost (op0, mode, (rtx_code) code, 0, speed);
+ *cost += rtx_cost (op0, mode, code, 0, speed);
/* We already demanded XEXP (op1, 0) to be REG_P, so
don't recurse into it. */
return true;
/* We can trust that the immediates used will be correct (there
are no by-register forms), so we need only cost op0. */
- *cost += rtx_cost (XEXP (x, 0), VOIDmode, (enum rtx_code) code, 0, speed);
+ *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed);
return true;
case MULT:
&& aarch64_vec_fpconst_pow_of_2 (XEXP (x, 1)) > 0)
|| aarch64_fpconst_pow_of_2 (XEXP (x, 1)) > 0))
{
- *cost += rtx_cost (XEXP (x, 0), VOIDmode, (rtx_code) code,
- 0, speed);
+ *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed);
return true;
}
- *cost += rtx_cost (x, VOIDmode, (enum rtx_code) code, 0, speed);
+ *cost += rtx_cost (x, VOIDmode, code, 0, speed);
return true;
case ABS:
case E_SFmode:
cmp_mode = SFmode;
- cc_mode = aarch64_select_cc_mode ((rtx_code) code, op0, op1);
+ cc_mode = aarch64_select_cc_mode (code, op0, op1);
icode = cc_mode == CCFPEmode ? CODE_FOR_fcmpesf : CODE_FOR_fcmpsf;
break;
case E_DFmode:
cmp_mode = DFmode;
- cc_mode = aarch64_select_cc_mode ((rtx_code) code, op0, op1);
+ cc_mode = aarch64_select_cc_mode (code, op0, op1);
icode = cc_mode == CCFPEmode ? CODE_FOR_fcmpedf : CODE_FOR_fcmpdf;
break;
*gen_seq = get_insns ();
end_sequence ();
- return gen_rtx_fmt_ee ((rtx_code) code, cc_mode,
+ return gen_rtx_fmt_ee (code, cc_mode,
gen_rtx_REG (cc_mode, CC_REGNUM), const0_rtx);
}
case E_SFmode:
cmp_mode = SFmode;
- cc_mode = aarch64_select_cc_mode ((rtx_code) cmp_code, op0, op1);
+ cc_mode = aarch64_select_cc_mode (cmp_code, op0, op1);
break;
case E_DFmode:
cmp_mode = DFmode;
- cc_mode = aarch64_select_cc_mode ((rtx_code) cmp_code, op0, op1);
+ cc_mode = aarch64_select_cc_mode (cmp_code, op0, op1);
break;
default:
end_sequence ();
target = gen_rtx_REG (cc_mode, CC_REGNUM);
- aarch64_cond = aarch64_get_condition_code_1 (cc_mode, (rtx_code) cmp_code);
+ aarch64_cond = aarch64_get_condition_code_1 (cc_mode, cmp_code);
if (bit_code != AND)
{
*gen_seq = get_insns ();
end_sequence ();
- return gen_rtx_fmt_ee ((rtx_code) cmp_code, VOIDmode, target, const0_rtx);
+ return gen_rtx_fmt_ee (cmp_code, VOIDmode, target, const0_rtx);
}
#undef TARGET_GEN_CCMP_FIRST