static bool fold_or(OptContext *ctx, TCGOp *op)
{
- uint64_t z_mask, s_mask;
+ uint64_t z_mask, o_mask, s_mask, a_mask;
TempOptInfo *t1, *t2;
if (fold_const2_commutative(ctx, op) ||
t1 = arg_info(op->args[1]);
t2 = arg_info(op->args[2]);
+
z_mask = t1->z_mask | t2->z_mask;
+ o_mask = t1->o_mask | t2->o_mask;
s_mask = t1->s_mask & t2->s_mask;
- return fold_masks_zs(ctx, op, z_mask, s_mask);
+
+ /* Affected bits are those not known one, masked by those known zero. */
+ a_mask = ~t1->o_mask & t2->z_mask;
+
+ return fold_masks_zosa(ctx, op, z_mask, o_mask, s_mask, a_mask);
}
static bool fold_orc(OptContext *ctx, TCGOp *op)