(define_insn_and_split "*extenddi_truncate<mode>"
[(set (match_operand:DI 0 "register_operand" "=d")
(sign_extend:DI
- (truncate:SHORT (match_operand:DI 1 "register_operand" "d"))))]
+ (truncate:SUBDI (match_operand:DI 1 "register_operand" "d"))))]
"TARGET_64BIT && !TARGET_MIPS16 && !ISA_HAS_EXTS"
"#"
"&& reload_completed"
return non_mem_decl_p (base);
}
+/* Helper function of expand_assignment. Check if storing field of
+ size BITSIZE at position BITPOS overlaps with the most significant
+ bit of TO_RTX, known to be SUBREG_PROMOTED_VAR_P.
+ Updating this field requires an explicit extension. */
+static bool
+store_field_updates_msb_p (poly_int64 bitpos, poly_int64 bitsize, rtx to_rtx)
+{
+ poly_int64 to_size = GET_MODE_SIZE (GET_MODE (to_rtx));
+ poly_int64 bitnum = BYTES_BIG_ENDIAN ? to_size - bitsize - bitpos : bitpos;
+ return maybe_eq (bitnum + bitsize, to_size);
+}
+
/* Expand an assignment that stores the value of FROM into TO. If NONTEMPORAL
is true, try generating a nontemporal store. */
&& known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (to_rtx))))
result = store_expr (from, to_rtx, 0, nontemporal, false);
/* Check if the field overlaps the MSB, requiring extension. */
- else if (maybe_eq (bitpos + bitsize,
- GET_MODE_BITSIZE (GET_MODE (to_rtx))))
+ else if (store_field_updates_msb_p (bitpos, bitsize, to_rtx))
{
scalar_int_mode imode = subreg_unpromoted_mode (to_rtx);
scalar_int_mode omode = subreg_promoted_mode (to_rtx);
if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
return true;
+ /* This explicit TRUNCATE may be needed on targets that require
+ MODE to be suitably extended when stored in X. Targets such as
+ mips64 use (sign_extend:DI (truncate:SI (reg:DI x))) to perform
+ an explicit extension, avoiding use of (subreg:SI (reg:DI x))
+ which is assumed to already be extended. */
+ scalar_int_mode imode, omode;
+ if (is_a <scalar_int_mode> (mode, &imode)
+ && is_a <scalar_int_mode> (GET_MODE (x), &omode)
+ && targetm.mode_rep_extended (imode, omode) != UNKNOWN)
+ return false;
+
/* See if we already satisfy the requirements of MODE. If yes we
can just switch to MODE. */
if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-mabi=64 -O2" } */
+
+#define COUNT 10
+
+typedef unsigned short u16;
+typedef unsigned int u32;
+
+typedef struct NeedleAddress
+{
+ u16 nId;
+ u16 mId;
+} NeedleAddress;
+
+u32 __attribute__ ((noinline)) prepareNeedle(const u16 upper, const u16 lower)
+{
+ u32 needleAddress = 0;
+ NeedleAddress *const addr = (NeedleAddress*)(&needleAddress);
+ addr->mId = upper;
+ addr->nId = lower;
+ return needleAddress;
+}
+
+const u32* __attribute__ ((noinline)) findNeedle(const u32 needle, const u32* begin, const u32* end)
+{
+ while ( begin != end && needle != *begin )
+ {
+ ++begin;
+ }
+ return begin;
+}
+
+int main()
+{
+ u32 needle = prepareNeedle(0xDCBA, 0xABCD);
+
+ u32 haystack[COUNT] = {};
+ for (int i = 0; i < COUNT; i++)
+ haystack[i] = needle;
+
+ const u32* result = findNeedle(needle, haystack, haystack + COUNT);
+ if (result == haystack + COUNT)
+ __builtin_abort ();
+ return 0;
+}
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-mabi=64 -Os" } */
+
+#define COUNT 10
+
+typedef unsigned short u16;
+typedef unsigned int u32;
+
+typedef struct NeedleAddress
+{
+ u16 nId;
+ u16 mId;
+} NeedleAddress;
+
+u32 __attribute__ ((noinline)) prepareNeedle(const u16 upper, const u16 lower)
+{
+ u32 needleAddress = 0;
+ NeedleAddress *const addr = (NeedleAddress*)(&needleAddress);
+ addr->mId = upper;
+ addr->nId = lower;
+ return needleAddress;
+}
+
+const u32* __attribute__ ((noinline)) findNeedle(const u32 needle, const u32* begin, const u32* end)
+{
+ while ( begin != end && needle != *begin )
+ {
+ ++begin;
+ }
+ return begin;
+}
+
+int main()
+{
+ u32 needle = prepareNeedle(0xDCBA, 0xABCD);
+
+ u32 haystack[COUNT] = {};
+ for (int i = 0; i < COUNT; i++)
+ haystack[i] = needle;
+
+ const u32* result = findNeedle(needle, haystack, haystack + COUNT);
+ if (result == haystack + COUNT)
+ __builtin_abort ();
+ return 0;
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mabi=64 -O2 -march=octeon2" } */
+
+#define COUNT 10
+
+typedef unsigned short u16;
+typedef unsigned int u32;
+
+typedef struct NeedleAddress
+{
+ u16 nId;
+ u16 mId;
+} NeedleAddress;
+
+u32 __attribute__ ((noinline)) prepareNeedle(const u16 upper, const u16 lower)
+{
+ u32 needleAddress = 0;
+ NeedleAddress *const addr = (NeedleAddress*)(&needleAddress);
+ addr->mId = upper;
+ addr->nId = lower;
+ return needleAddress;
+}
+
+const u32* __attribute__ ((noinline)) findNeedle(const u32 needle, const u32* begin, const u32* end)
+{
+ while ( begin != end && needle != *begin )
+ {
+ ++begin;
+ }
+ return begin;
+}
+
+int main()
+{
+ u32 needle = prepareNeedle(0xDCBA, 0xABCD);
+
+ u32 haystack[COUNT] = {};
+ for (int i = 0; i < COUNT; i++)
+ haystack[i] = needle;
+
+ const u32* result = findNeedle(needle, haystack, haystack + COUNT);
+ if (result == haystack + COUNT)
+ __builtin_abort ();
+ return 0;
+}
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-mabi=64 -Os -march=octeon2" } */
+
+#define COUNT 10
+
+typedef unsigned short u16;
+typedef unsigned int u32;
+
+typedef struct NeedleAddress
+{
+ u16 nId;
+ u16 mId;
+} NeedleAddress;
+
+u32 __attribute__ ((noinline)) prepareNeedle(const u16 upper, const u16 lower)
+{
+ u32 needleAddress = 0;
+ NeedleAddress *const addr = (NeedleAddress*)(&needleAddress);
+ addr->mId = upper;
+ addr->nId = lower;
+ return needleAddress;
+}
+
+const u32* __attribute__ ((noinline)) findNeedle(const u32 needle, const u32* begin, const u32* end)
+{
+ while ( begin != end && needle != *begin )
+ {
+ ++begin;
+ }
+ return begin;
+}
+
+int main()
+{
+ u32 needle = prepareNeedle(0xDCBA, 0xABCD);
+
+ u32 haystack[COUNT] = {};
+ for (int i = 0; i < COUNT; i++)
+ haystack[i] = needle;
+
+ const u32* result = findNeedle(needle, haystack, haystack + COUNT);
+ if (result == haystack + COUNT)
+ __builtin_abort ();
+ return 0;
+}