int shiftrt = 0;
tree res_ops[2];
machine_mode mode;
+ bool convert_before_shift = false;
*load = NULL;
*psignbit = false;
if (*load)
loc[3] = gimple_location (*load);
exp = res_ops[0];
+ /* This looks backwards, but we're going back the def chain, so if we
+ find the conversion here, after finding a shift, that's because the
+ convert appears before the shift, and we should thus adjust the bit
+ pos and size because of the shift after adjusting it due to type
+ conversion. */
+ convert_before_shift = true;
}
/* Identify the load, if there is one. */
*pvolatilep = volatilep;
/* Adjust shifts... */
+ if (convert_before_shift
+ && outer_type && *pbitsize > TYPE_PRECISION (outer_type))
+ {
+ HOST_WIDE_INT excess = *pbitsize - TYPE_PRECISION (outer_type);
+ if (*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
+ *pbitpos += excess;
+ *pbitsize -= excess;
+ }
+
if (shiftrt)
{
if (!*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
}
/* ... and bit position. */
- if (outer_type && *pbitsize > TYPE_PRECISION (outer_type))
+ if (!convert_before_shift
+ && outer_type && *pbitsize > TYPE_PRECISION (outer_type))
{
HOST_WIDE_INT excess = *pbitsize - TYPE_PRECISION (outer_type);
if (*preversep ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
if (get_best_mode (end_bit - first_bit, first_bit, 0, ll_end_region,
ll_align, BITS_PER_WORD, volatilep, &lnmode))
l_split_load = false;
+ /* ??? If ll and rl share the same load, reuse that?
+ See PR 118206 -> gcc.dg/field-merge-18.c */
else
{
/* Consider the possibility of recombining loads if any of the
/* Apply masks. */
for (int j = 0; j < 2; j++)
if (mask[j] != wi::mask (0, true, mask[j].get_precision ()))
- op[j] = build2_loc (locs[j][2], BIT_AND_EXPR, type,
- op[j], wide_int_to_tree (type, mask[j]));
+ op[j] = fold_build2_loc (locs[j][2], BIT_AND_EXPR, type,
+ op[j], wide_int_to_tree (type, mask[j]));
- cmp[i] = build2_loc (i ? rloc : lloc, wanted_code, truth_type,
- op[0], op[1]);
+ cmp[i] = fold_build2_loc (i ? rloc : lloc, wanted_code, truth_type,
+ op[0], op[1]);
}
/* Reorder the compares if needed. */
if (parts == 1)
result = cmp[0];
else if (!separatep || !maybe_separate)
- result = build2_loc (rloc, orig_code, truth_type, cmp[0], cmp[1]);
+ {
+ /* Only fold if any of the cmp is known, otherwise we may lose the
+ sequence point, and that may prevent further optimizations. */
+ if (TREE_CODE (cmp[0]) == INTEGER_CST
+ || TREE_CODE (cmp[1]) == INTEGER_CST)
+ result = fold_build2_loc (rloc, orig_code, truth_type, cmp[0], cmp[1]);
+ else
+ result = build2_loc (rloc, orig_code, truth_type, cmp[0], cmp[1]);
+ }
else
{
result = cmp[0];
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O1" } */
+
+/* PR tree-optimization/118206 */
+/* Check that shifts, whether before or after narrowing conversions, mask out
+ the bits that are to be discarded. */
+
+/* This only uses bits from the least significant byte in the short. */
+__attribute__((noipa)) int
+foo (const void *x)
+{
+ unsigned short b;
+ __builtin_memcpy (&b, x, sizeof (short));
+ if ((b & 15) != 8)
+ return 1;
+ if ((((unsigned char) b) >> 4) > 7)
+ return 1;
+ return 0;
+}
+
+__attribute__((noipa)) int
+bar (const void *x)
+{
+ unsigned short b;
+ __builtin_memcpy (&b, x, sizeof (short));
+ if ((b & 15) != 8)
+ return 1;
+ if ((unsigned char)(b >> 4) > 7)
+ return 1;
+ return 0;
+}
+
+int
+main ()
+{
+ unsigned short a = 0x78 - 0x80 - 0x80;
+ if (foo (&a) != 0 || bar (&a) != (a > 0xff))
+ __builtin_abort ();
+ unsigned short b = 0x88;
+ if (foo (&b) != 1 || bar (&b) != 1)
+ __builtin_abort ();
+ unsigned short c = 8;
+ if (foo (&c) != 0 || bar (&c) != 0)
+ __builtin_abort ();
+ return 0;
+}