The PR shows that we'll ICE eventually when last_clique wraps. The
following avoids this by refusing to hand out new cliques after
exhausting them. We then use zero (no clique) as conservative
fallback.
PR middle-end/112785
* function.h (get_new_clique): New inline function handling
last_clique overflow.
* cfgrtl.cc (duplicate_insn_chain): Use it.
* tree-cfg.cc (gimple_duplicate_bb): Likewise.
* tree-inline.cc (remap_dependence_clique): Likewise.
{
gcc_assert
(MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
- newc = ++cfun->last_clique;
+ newc = get_new_clique (cfun);
}
/* We cannot adjust MR_DEPENDENCE_CLIQUE in-place
since MEM_EXPR is shared so make a copy and
fn->x_current_loops = loops;
}
+/* Get a new unique dependence clique or zero if none is left. */
+
+inline unsigned short
+get_new_clique (function *fn)
+{
+ unsigned short clique = fn->last_clique + 1;
+ if (clique != 0)
+ fn->last_clique = clique;
+ return clique;
+}
+
/* For backward compatibility... eventually these should all go away. */
#define current_function_funcdef_no (cfun->funcdef_no)
if (!existed)
{
gcc_assert (MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
- newc = ++cfun->last_clique;
+ newc = get_new_clique (cfun);
}
MR_DEPENDENCE_CLIQUE (op) = newc;
}
/* Clique 1 is reserved for local ones set by PTA. */
if (cfun->last_clique == 0)
cfun->last_clique = 1;
- newc = ++cfun->last_clique;
+ newc = get_new_clique (cfun);
}
return newc;
}