]> git.ipfire.org Git - thirdparty/qemu.git/commitdiff
tcg/optimize: Simplify fold_and constant checks
authorRichard Henderson <richard.henderson@linaro.org>
Mon, 9 Dec 2024 23:48:02 +0000 (17:48 -0600)
committerRichard Henderson <richard.henderson@linaro.org>
Mon, 30 Jun 2025 13:42:38 +0000 (07:42 -0600)
If operand 2 is constant, then the computation of z_mask
and a_mask will produce the same results as the explicit
checks via fold_xi_to_i and fold_xi_to_x.  Shift the call
of fold_xx_to_x down below the ti_is_const(t2) check.

Reviewed-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
tcg/optimize.c

index 06ccf39d64212d92ae8f725c8b9318a1f1696053..f3a2328fe403866787a29401a1c7bc133b7361eb 100644 (file)
@@ -1434,10 +1434,7 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
     uint64_t z_mask, o_mask, s_mask, a_mask;
     TempOptInfo *t1, *t2;
 
-    if (fold_const2_commutative(ctx, op) ||
-        fold_xi_to_i(ctx, op, 0) ||
-        fold_xi_to_x(ctx, op, -1) ||
-        fold_xx_to_x(ctx, op)) {
+    if (fold_const2_commutative(ctx, op)) {
         return true;
     }
 
@@ -1473,6 +1470,8 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
                     op->args[3] = len;
                 }
             }
+        } else {
+            fold_xx_to_x(ctx, op);
         }
     }
     return true;