else if (bytes == GET_MODE_SIZE (QImode))
return QImode;
else if (bytes < GET_MODE_SIZE (SImode)
- && TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
+ && !targetm.slow_unaligned_access (SImode, align * BITS_PER_UNIT)
&& offset >= GET_MODE_SIZE (SImode) - bytes)
/* This matches the case were we have SImode and 3 bytes
and offset >= 1 and permits us to move back one and overlap
unwanted bytes off of the input. */
return SImode;
else if (word_mode_ok && bytes < UNITS_PER_WORD
- && TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
+ && !targetm.slow_unaligned_access (word_mode, align * BITS_PER_UNIT)
&& offset >= UNITS_PER_WORD-bytes)
/* Similarly, if we can use DImode it will get matched here and
can do an overlapping read that ends at the end of the block. */
load_mode_size = GET_MODE_SIZE (load_mode);
if (bytes >= load_mode_size)
cmp_bytes = load_mode_size;
- else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
+ else if (!targetm.slow_unaligned_access (load_mode,
+ align * BITS_PER_UNIT))
{
/* Move this load back so it doesn't go past the end.
P8/P9 can do this efficiently. */
if (!CONST_INT_P (align_rtx))
return false;
- unsigned int base_align = UINTVAL (align_rtx) / BITS_PER_UNIT;
+ unsigned int align_by_bits = UINTVAL (align_rtx);
+ unsigned int base_align = align_by_bits / BITS_PER_UNIT;
gcc_assert (GET_MODE (target) == SImode);
/* The code generated for p7 and older is not faster than glibc
memcmp if alignment is small and length is not short, so bail
out to avoid those conditions. */
- if (!TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
+ if (targetm.slow_unaligned_access (word_mode, align_by_bits)
&& ((base_align == 1 && bytes > 16)
|| (base_align == 2 && bytes > 32)))
return false;
load_mode_size = GET_MODE_SIZE (load_mode);
if (bytes_to_compare >= load_mode_size)
cmp_bytes = load_mode_size;
- else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
+ else if (!targetm.slow_unaligned_access (load_mode,
+ align * BITS_PER_UNIT))
{
/* Move this load back so it doesn't go past the end.
P8/P9 can do this efficiently. */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-require-effective-target opt_mstrict_align } */
+/* { dg-options "-O2 -mstrict-align" } */
+/* { dg-final { scan-assembler-times {\mb[l]? memcmp\M} 1 } } */
+
+/* Test that it calls library for block memory compare when strict-align
+ is set. The flag causes rs6000_slow_unaligned_access returns true. */
+
+int foo (const char* s1, const char* s2)
+{
+ return __builtin_memcmp (s1, s2, 20);
+}