}
}
/* Get the conditional controlling the bb exit edge. */
- gimple *cond_stmt = last_stmt (guard_bb);
+ gimple *cond_stmt = *gsi_last_bb (guard_bb);
if (gimple_code (cond_stmt) == GIMPLE_COND)
{
/* The true edge corresponds to the uninteresting condition.
dump_flags_t flags)
{
edge e;
- gimple *stmt;
-
- stmt = last_stmt (bb);
- if (stmt && gimple_code (stmt) == GIMPLE_COND)
+ if (safe_is_a <gcond *> (*gsi_last_bb (bb)))
{
edge true_edge, false_edge;
if (single_pred_p (pred2) && single_pred (pred2) == pred1
&& num_stmts_in_pred2 == 0)
{
- gimple *cond_stmt = last_stmt (pred1);
- if (cond_stmt && gimple_code (cond_stmt) == GIMPLE_COND)
+ if (gcond *cond_stmt = dyn_cast <gcond *> (*gsi_last_bb (pred1)))
{
tree lhs = gimple_cond_lhs (cond_stmt);
tree rhs = gimple_cond_rhs (cond_stmt);
an equivalence from to the joiner. */
bool found_cprop_opportunity = false;
basic_block dom = get_immediate_dominator (CDI_DOMINATORS, bb);
- gcond *cond = as_a <gcond *> (last_stmt (dom));
+ gcond *cond = as_a <gcond *> (*gsi_last_bb (dom));
if (gimple_cond_code (cond) == EQ_EXPR
|| gimple_cond_code (cond) == NE_EXPR)
for (unsigned i = 0; i < 2; ++i)
{
if (single_pred_p (bb))
{
- edge e = single_pred_edge (bb);
- basic_block pred = e->src;
- gimple *stmt;
+ basic_block pred = single_pred (bb);
if (loop_depth (pred->loop_father) > loop_depth (bb->loop_father))
return NULL;
- stmt = last_stmt (pred);
-
- if (stmt && gimple_code (stmt) == GIMPLE_COND)
- return as_a<gcond *> (stmt);
+ return safe_dyn_cast <gcond *> (*gsi_last_bb (pred));
}
return NULL;
class ipa_node_params *params_summary,
basic_block bb)
{
- gimple *last;
tree op, op2;
int index;
struct agg_position_info aggpos;
tree param_type;
expr_eval_ops param_ops;
- last = last_stmt (bb);
- if (!last || gimple_code (last) != GIMPLE_COND)
+ gcond *last = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
+ if (!last)
return;
if (!is_gimple_ip_invariant (gimple_cond_rhs (last)))
return;
class ipa_node_params *params_summary,
basic_block bb)
{
- gimple *lastg;
tree op;
int index;
struct agg_position_info aggpos;
tree param_type;
expr_eval_ops param_ops;
- lastg = last_stmt (bb);
- if (!lastg || gimple_code (lastg) != GIMPLE_SWITCH)
+ gswitch *last = safe_dyn_cast <gswitch *> (*gsi_last_bb (bb));
+ if (!last)
return;
- gswitch *last = as_a <gswitch *> (lastg);
op = gimple_switch_index (last);
if (!decompose_param_expr (fbi, last, op, &index, ¶m_type, &aggpos,
¶m_ops))
edge e;
edge_iterator ei;
basic_block first_bb = NULL;
- gimple *stmt;
if (single_pred_p (bb))
{
if (!first_bb)
return false;
- stmt = last_stmt (first_bb);
+ gcond *stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (first_bb));
if (!stmt
- || gimple_code (stmt) != GIMPLE_COND
|| !is_gimple_ip_invariant (gimple_cond_rhs (stmt)))
return false;
gcall *call, gphi *phi)
{
HOST_WIDE_INT offset;
- gimple *assign, *cond;
+ gimple *assign;
basic_block phi_bb, assign_bb, cond_bb;
tree tmp, parm, expr, obj;
int index, i;
return;
cond_bb = single_pred (assign_bb);
- cond = last_stmt (cond_bb);
+ gcond *cond = safe_dyn_cast <gcond *> (*gsi_last_bb (cond_bb));
if (!cond
- || gimple_code (cond) != GIMPLE_COND
|| gimple_cond_code (cond) != NE_EXPR
|| gimple_cond_lhs (cond) != parm
|| !integer_zerop (gimple_cond_rhs (cond)))
/* Third, let's see that the branching is done depending on the least
significant bit of the pfn. */
- gimple *branch = last_stmt (bb);
- if (!branch || gimple_code (branch) != GIMPLE_COND)
+ gcond *branch = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
+ if (!branch)
return;
if ((gimple_cond_code (branch) != NE_EXPR
enum tree_code loop_bound_code,
int loop_bound_step)
{
- gimple *stmt;
tree compare_var, compare_base;
enum tree_code compare_code;
tree compare_step_var;
if (predicted_by_loop_heuristics_p (bb))
return;
- stmt = last_stmt (bb);
- if (!stmt || gimple_code (stmt) != GIMPLE_COND)
+ gcond *stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
+ if (!stmt)
return;
- if (!is_comparison_with_loop_invariant_p (as_a <gcond *> (stmt),
+ if (!is_comparison_with_loop_invariant_p (stmt,
loop, &compare_var,
&compare_code,
&compare_step_var,
gimple *lhs_def_stmt;
gphi *phi_stmt;
tree cmp_rhs, cmp_lhs;
- gimple *last;
- gcond *cmp_stmt;
- last = last_stmt (exit_edge->src);
- if (!last)
- return;
- cmp_stmt = dyn_cast <gcond *> (last);
+ gcond *cmp_stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (exit_edge->src));
if (!cmp_stmt)
return;
stmt = as_a <gcond *> (nb_iter->stmt);
break;
}
- if (!stmt && last_stmt (loop->header)
- && gimple_code (last_stmt (loop->header)) == GIMPLE_COND)
- stmt = as_a <gcond *> (last_stmt (loop->header));
+ if (!stmt)
+ stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (loop->header));
if (stmt)
is_comparison_with_loop_invariant_p (stmt, loop,
&loop_bound_var,
&& single_succ_p (preheader_edge->src))
preheader_edge = single_pred_edge (preheader_edge->src);
- gimple *stmt = last_stmt (preheader_edge->src);
/* Pattern match fortran loop preheader:
_16 = BUILTIN_EXPECT (_15, 1, PRED_FORTRAN_LOOP_PREHEADER);
_17 = (logical(kind=4)) _16;
headers produced by fortran frontend and in this case we want
to predict paths leading to this preheader. */
+ gcond *stmt
+ = safe_dyn_cast <gcond *> (*gsi_last_bb (preheader_edge->src));
if (stmt
- && gimple_code (stmt) == GIMPLE_COND
&& gimple_cond_code (stmt) == NE_EXPR
&& TREE_CODE (gimple_cond_lhs (stmt)) == SSA_NAME
&& integer_zerop (gimple_cond_rhs (stmt)))
static void
tree_predict_by_opcode (basic_block bb)
{
- gimple *stmt = last_stmt (bb);
edge then_edge;
tree op0, op1;
tree type;
enum br_predictor predictor;
HOST_WIDE_INT probability;
+ gimple *stmt = *gsi_last_bb (bb);
if (!stmt)
return;
for (i = 0; i < n; i++)
{
- gimple *cond_stmt;
gphi *phi;
basic_block bb1, bb2;
edge e1, e2;
bb = bb_order[i];
- cond_stmt = last_stmt (bb);
/* Check to see if the last statement is a GIMPLE_COND. */
- if (!cond_stmt
- || gimple_code (cond_stmt) != GIMPLE_COND)
+ gcond *cond_stmt = safe_dyn_cast <gcond *> (*gsi_last_bb (bb));
+ if (!cond_stmt)
continue;
e1 = EDGE_SUCC (bb, 0);
/* If there are other edges into the middle block make
CFG cleanup deal with the edge removal to avoid
updating dominators here in a non-trivial way. */
- gcond *cond = as_a <gcond *> (last_stmt (cond_block));
+ gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_block));
if (keep_edge->flags & EDGE_FALSE_VALUE)
gimple_cond_make_false (cond);
else if (keep_edge->flags & EDGE_TRUE_VALUE)
if (!empty_block_p (middle_bb))
return false;
- gimple *stmt = last_stmt (cond_bb);
+ gcond *stmt = as_a <gcond *> (*gsi_last_bb (cond_bb));
tree lhs = gimple_cond_lhs (stmt);
tree rhs = gimple_cond_rhs (stmt);
edge e0, edge e1, gphi *phi, tree arg0, tree arg1)
{
gimple_stmt_iterator gsi;
- gimple *cond;
edge true_edge, false_edge;
enum tree_code code;
bool empty_or_with_defined_p = true;
empty_or_with_defined_p = false;
}
- cond = last_stmt (cond_bb);
+ gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_bb));
code = gimple_cond_code (cond);
/* This transformation is only valid for equality comparisons. */
if (HONOR_NANS (type) || HONOR_SIGNED_ZEROS (type))
return false;
- gcond *cond = as_a <gcond *> (last_stmt (cond_bb));
+ gcond *cond = as_a <gcond *> (*gsi_last_bb (cond_bb));
enum tree_code cmp = gimple_cond_code (cond);
tree rhs = gimple_cond_rhs (cond);
if (!empty_block_p (middle_bb))
return false;
- gcond *cond1 = as_a <gcond *> (last_stmt (cond_bb));
+ gcond *cond1 = as_a <gcond *> (*gsi_last_bb (cond_bb));
enum tree_code cmp1 = gimple_cond_code (cond1);
switch (cmp1)
{
tree arg2 = gimple_phi_arg_def (phi, cond2_phi_edge->dest_idx);
if (!tree_fits_shwi_p (arg2))
return false;
- gimple *cond2 = last_stmt (cond2_bb);
- if (cond2 == NULL || gimple_code (cond2) != GIMPLE_COND)
+ gcond *cond2 = safe_dyn_cast <gcond *> (*gsi_last_bb (cond2_bb));
+ if (!cond2)
return false;
enum tree_code cmp2 = gimple_cond_code (cond2);
tree lhs2 = gimple_cond_lhs (cond2);
tree arg3 = arg2;
basic_block cond3_bb = cond2_bb;
edge cond3_phi_edge = cond2_phi_edge;
- gimple *cond3 = cond2;
+ gcond *cond3 = cond2;
enum tree_code cmp3 = cmp2;
tree lhs3 = lhs2;
tree rhs3 = rhs2;
else
cond3_phi_edge = EDGE_SUCC (cond3_bb, 0);
arg3 = gimple_phi_arg_def (phi, cond3_phi_edge->dest_idx);
- cond3 = last_stmt (cond3_bb);
- if (cond3 == NULL || gimple_code (cond3) != GIMPLE_COND)
+ cond3 = safe_dyn_cast <gcond *> (*gsi_last_bb (cond3_bb));
+ if (!cond3)
return false;
cmp3 = gimple_cond_code (cond3);
lhs3 = gimple_cond_lhs (cond3);
edge e1, edge e2, gphi *phi,
tree arg0, tree arg1)
{
- gimple *cond;
gimple_stmt_iterator gsi, gsi_from;
gimple *call;
gimple *cast = NULL;
arg = gimple_assign_rhs1 (cast);
}
- cond = last_stmt (cond_bb);
+ gcond *cond = dyn_cast <gcond *> (*gsi_last_bb (cond_bb));
/* Cond_bb has a check for b_4 [!=|==] 0 before calling the popcount/clz/ctz
builtin. */
- if (gimple_code (cond) != GIMPLE_COND
+ if (!cond
|| (gimple_cond_code (cond) != NE_EXPR
&& gimple_cond_code (cond) != EQ_EXPR)
|| !integer_zerop (gimple_cond_rhs (cond))
&& bbinfo[idx].op == NULL_TREE
&& ops[bbinfo[idx].first_idx]->op != NULL_TREE)
{
- gcond *cond_stmt = as_a <gcond *> (last_stmt (bb));
+ gcond *cond_stmt = as_a <gcond *> (*gsi_last_bb (bb));
if (idx > max_idx)
max_idx = idx;
&& EDGE_COUNT (idom2->succs) == 2);
/* Verify the controlling stmt is the same. */
- gcond *last1 = as_a <gcond *> (last_stmt (idom1));
- gcond *last2 = as_a <gcond *> (last_stmt (idom2));
+ gcond *last1 = as_a <gcond *> (*gsi_last_bb (idom1));
+ gcond *last2 = as_a <gcond *> (*gsi_last_bb (idom2));
bool inverted_p;
if (! cond_stmts_equal_p (last1, vp1->cclhs, vp1->ccrhs,
last2, vp2->cclhs, vp2->ccrhs,
{
basic_block idom1 = get_immediate_dominator (CDI_DOMINATORS, vp1->block);
if (EDGE_COUNT (idom1->succs) == 2)
- if (gcond *last1 = safe_dyn_cast <gcond *> (last_stmt (idom1)))
+ if (gcond *last1 = safe_dyn_cast <gcond *> (*gsi_last_bb (idom1)))
{
/* ??? We want to use SSA_VAL here. But possibly not
allow VN_TOP. */
{
basic_block idom1 = get_immediate_dominator (CDI_DOMINATORS, vp1->block);
if (EDGE_COUNT (idom1->succs) == 2)
- if (gcond *last1 = safe_dyn_cast <gcond *> (last_stmt (idom1)))
+ if (gcond *last1 = safe_dyn_cast <gcond *> (*gsi_last_bb (idom1)))
{
/* ??? We want to use SSA_VAL here. But possibly not
allow VN_TOP. */
edge_iterator ei;
edge e;
FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
- if (greturn *ret = safe_dyn_cast <greturn *> (last_stmt (e->src)))
+ if (greturn *ret = safe_dyn_cast <greturn *> (*gsi_last_bb (e->src)))
{
tree val = gimple_return_retval (ret);
/* ??? Easy to handle simple indirections with some work.
if (EDGE_COUNT (bb->succs) <= 1)
return;
- gimple *stmt = last_stmt (bb);
+ gimple *stmt = *gsi_last_bb (bb);
if (!stmt)
return;
going to be able to eliminate its branch. */
if (j > 0)
{
- gimple *last = last_stmt (bb);
+ gimple *last = *gsi_last_bb (bb);
if (last
&& (gimple_code (last) == GIMPLE_SWITCH
|| gimple_code (last) == GIMPLE_GOTO))
jump_threader::thread_outgoing_edges (basic_block bb)
{
int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL);
- gimple *last;
if (!flag_thread_jumps)
return;
will be traversed when the incoming edge from BB is traversed. */
if (single_succ_to_potentially_threadable_block (bb))
thread_across_edge (single_succ_edge (bb));
- else if ((last = last_stmt (bb))
- && gimple_code (last) == GIMPLE_COND
+ else if (safe_is_a <gcond *> (*gsi_last_bb (bb))
&& EDGE_COUNT (bb->succs) == 2
&& (EDGE_SUCC (bb, 0)->flags & flags) == 0
&& (EDGE_SUCC (bb, 1)->flags & flags) == 0)
FOR_EACH_BB_FN (bb, fun)
{
- gimple *stmt = last_stmt (bb);
- if (stmt && gimple_code (stmt) == GIMPLE_SWITCH)
+ if (gswitch *stmt = safe_dyn_cast <gswitch *> (*gsi_last_bb (bb)))
{
if (dump_file)
{
}
switch_conversion sconv;
- sconv.expand (as_a <gswitch *> (stmt));
+ sconv.expand (stmt);
cfg_altered |= sconv.m_cfg_altered;
if (!sconv.m_reason)
{
FOR_EACH_BB_FN (bb, fun)
{
- gimple *stmt = last_stmt (bb);
- gswitch *swtch;
- if (stmt && (swtch = dyn_cast<gswitch *> (stmt)))
+ if (gswitch *swtch = safe_dyn_cast <gswitch *> (*gsi_last_bb (bb)))
{
if (!O0)
group_case_labels_stmt (swtch);
bool changed = false;
basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
tree param;
- gimple *stmt;
edge_iterator ei;
if (!suitable_for_tail_opt_p ())
{
/* Only traverse the normal exits, i.e. those that end with return
statement. */
- stmt = last_stmt (e->src);
-
- if (stmt
- && gimple_code (stmt) == GIMPLE_RETURN)
+ if (safe_is_a <greturn *> (*gsi_last_bb (e->src)))
find_tail_calls (e->src, &tailcalls);
}
/* Modify the remaining return statements. */
FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
- stmt = last_stmt (e->src);
-
- if (stmt
- && gimple_code (stmt) == GIMPLE_RETURN)
+ if (safe_is_a <greturn *> (*gsi_last_bb (e->src)))
adjust_return_value (e->src, m_acc, a_acc);
}
}
{
gcc_assert (scalar_loop);
condition_bb = gimple_bb (loop_vectorized_call);
- cond = as_a <gcond *> (last_stmt (condition_bb));
+ cond = as_a <gcond *> (*gsi_last_bb (condition_bb));
gimple_cond_set_condition_from_tree (cond, cond_expr);
update_stmt (cond);
/* When we have a stmt ending this block and defining a
value we have to insert on edges when inserting after it for
a vector containing its definition. Avoid this for now. */
- if (gimple *last = last_stmt (bb))
+ if (gimple *last = *gsi_last_bb (bb))
if (gimple_get_lhs (last)
&& is_ctrl_altering_stmt (last))
{
edge e;
FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
{
- greturn *ret = safe_dyn_cast <greturn *> (last_stmt (e->src));
+ greturn *ret = safe_dyn_cast <greturn *> (*gsi_last_bb (e->src));
if (!ret)
continue;
if (gimple_return_retval (ret) == decl)
gimple *g;
do
{
- g = last_stmt (bb);
+ g = *gsi_last_bb (bb);
if ((g && gimple_code (g) == GIMPLE_COND)
|| !single_succ_p (bb))
break;
basic_block bb;
basic_block entry;
class loop *outer, *orig;
- gimple_stmt_iterator gsi;
- gimple *g;
if (loop->orig_loop_num == 0)
return NULL;
for (; bb != entry && flow_bb_inside_loop_p (outer, bb);
bb = get_immediate_dominator (CDI_DOMINATORS, bb))
{
- g = last_stmt (bb);
- if (g == NULL || gimple_code (g) != GIMPLE_COND)
+ gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ if (!safe_is_a <gcond *> (*gsi))
continue;
- gsi = gsi_for_stmt (g);
gsi_prev (&gsi);
if (gsi_end_p (gsi))
continue;
- g = gsi_stmt (gsi);
+ gimple *g = gsi_stmt (gsi);
/* The guarding internal function call must have the same distribution
alias id. */
if (gimple_call_internal_p (g, IFN_LOOP_DIST_ALIAS)