&& (TREE_CODE (rhs1) != SSA_NAME
|| !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs1)))
{
- if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
- || (bitint_precision_kind (TREE_TYPE (rhs1))
- < bitint_prec_large))
- continue;
if (is_gimple_assign (use_stmt))
switch (gimple_assign_rhs_code (use_stmt))
{
- case MULT_EXPR:
case TRUNC_DIV_EXPR:
case TRUNC_MOD_EXPR:
case FLOAT_EXPR:
+ /* For division, modulo and casts to floating
+ point, avoid representing unsigned operands
+ using negative prec if they were sign-extended
+ from narrower precision. */
+ if (TYPE_UNSIGNED (TREE_TYPE (s))
+ && !TYPE_UNSIGNED (TREE_TYPE (rhs1))
+ && (TYPE_PRECISION (TREE_TYPE (s))
+ > TYPE_PRECISION (TREE_TYPE (rhs1))))
+ goto force_name;
+ /* FALLTHRU */
+ case MULT_EXPR:
+ if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
+ || (bitint_precision_kind (TREE_TYPE (rhs1))
+ < bitint_prec_large))
+ continue;
/* Uses which use handle_operand_addr can't
deal with nested casts. */
if (TREE_CODE (rhs1) == SSA_NAME
default:
break;
}
+ if (TREE_CODE (TREE_TYPE (rhs1)) != BITINT_TYPE
+ || (bitint_precision_kind (TREE_TYPE (rhs1))
+ < bitint_prec_large))
+ continue;
if ((TYPE_PRECISION (TREE_TYPE (rhs1))
>= TYPE_PRECISION (TREE_TYPE (s)))
&& mergeable_op (use_stmt))
--- /dev/null
+/* PR tree-optimization/113614 */
+/* { dg-do run { target bitint } } */
+/* { dg-options "-std=c23 -pedantic-errors" } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "*" } { "-O0" "-O2" } } */
+/* { dg-skip-if "" { ! run_expensive_tests } { "-flto" } { "" } } */
+
+_BitInt(8) a;
+_BitInt(8) b;
+_BitInt(8) c;
+
+#if __BITINT_MAXWIDTH__ >= 256
+_BitInt(256)
+foo (_BitInt(8) y, unsigned _BitInt(256) z)
+{
+ unsigned _BitInt(256) d = -y;
+ z /= d;
+ return z + a + b + c;
+}
+#endif
+
+int
+main ()
+{
+#if __BITINT_MAXWIDTH__ >= 256
+ if (foo (0xfwb, 0x24euwb))
+ __builtin_abort ();
+#endif
+ return 0;
+}