}
}
+/* For YMM/ZMM store or YMM/ZMM extract. Return mode for the source
+ operand of SRC DEFs in the same basic block before INSN. */
+
+static int
+ix86_avx_u128_mode_source (rtx_insn *insn, const_rtx src)
+{
+ basic_block bb = BLOCK_FOR_INSN (insn);
+ rtx_insn *end = BB_END (bb);
+
+ /* Return AVX_U128_DIRTY if there is no DEF in the same basic
+ block. */
+ int status = AVX_U128_DIRTY;
+
+ for (df_ref def = DF_REG_DEF_CHAIN (REGNO (src));
+ def; def = DF_REF_NEXT_REG (def))
+ if (DF_REF_BB (def) == bb)
+ {
+ /* Ignore DEF from different basic blocks. */
+ rtx_insn *def_insn = DF_REF_INSN (def);
+
+ /* Check if DEF_INSN is before INSN. */
+ rtx_insn *next;
+ for (next = NEXT_INSN (def_insn);
+ next != nullptr && next != end && next != insn;
+ next = NEXT_INSN (next))
+ ;
+
+ /* Skip if DEF_INSN isn't before INSN. */
+ if (next != insn)
+ continue;
+
+ /* Return AVX_U128_DIRTY if the source operand of DEF_INSN
+ isn't constant zero. */
+
+ if (CALL_P (def_insn))
+ {
+ bool avx_upper_reg_found = false;
+ note_stores (def_insn,
+ ix86_check_avx_upper_stores,
+ &avx_upper_reg_found);
+
+ /* Return AVX_U128_DIRTY if call returns AVX. */
+ if (avx_upper_reg_found)
+ return AVX_U128_DIRTY;
+
+ continue;
+ }
+
+ rtx set = single_set (def_insn);
+ if (!set)
+ return AVX_U128_DIRTY;
+
+ rtx dest = SET_DEST (set);
+
+ /* Skip if DEF_INSN is not an AVX load. Return AVX_U128_DIRTY
+ if the source operand isn't constant zero. */
+ if (ix86_check_avx_upper_register (dest)
+ && standard_sse_constant_p (SET_SRC (set),
+ GET_MODE (dest)) != 1)
+ return AVX_U128_DIRTY;
+
+ /* We get here only if all AVX loads are from constant zero. */
+ status = AVX_U128_ANY;
+ }
+
+ return status;
+}
+
/* Return needed mode for entity in optimize_mode_switching pass. */
static int
ix86_avx_u128_mode_needed (rtx_insn *insn)
{
+ if (DEBUG_INSN_P (insn))
+ return AVX_U128_ANY;
+
if (CALL_P (insn))
{
rtx link;
return AVX_U128_CLEAN;
}
+ subrtx_iterator::array_type array;
+
rtx set = single_set (insn);
if (set)
{
else
return AVX_U128_ANY;
}
- else if (ix86_check_avx_upper_register (src))
+ else
{
- /* This is an YMM/ZMM store. Check for the source operand
- of SRC DEFs in the same basic block before INSN. */
- basic_block bb = BLOCK_FOR_INSN (insn);
- rtx_insn *end = BB_END (bb);
-
- /* Return AVX_U128_DIRTY if there is no DEF in the same basic
- block. */
- int status = AVX_U128_DIRTY;
-
- for (df_ref def = DF_REG_DEF_CHAIN (REGNO (src));
- def; def = DF_REF_NEXT_REG (def))
- if (DF_REF_BB (def) == bb)
+ FOR_EACH_SUBRTX (iter, array, src, NONCONST)
+ if (ix86_check_avx_upper_register (*iter))
{
- /* Ignore DEF from different basic blocks. */
- rtx_insn *def_insn = DF_REF_INSN (def);
-
- /* Check if DEF_INSN is before INSN. */
- rtx_insn *next;
- for (next = NEXT_INSN (def_insn);
- next != nullptr && next != end && next != insn;
- next = NEXT_INSN (next))
- ;
-
- /* Skip if DEF_INSN isn't before INSN. */
- if (next != insn)
- continue;
-
- /* Return AVX_U128_DIRTY if the source operand of
- DEF_INSN isn't constant zero. */
-
- if (CALL_P (def_insn))
- {
- bool avx_upper_reg_found = false;
- note_stores (def_insn, ix86_check_avx_upper_stores,
- &avx_upper_reg_found);
-
- /* Return AVX_U128_DIRTY if call returns AVX. */
- if (avx_upper_reg_found)
- return AVX_U128_DIRTY;
-
- continue;
- }
-
- set = single_set (def_insn);
- if (!set)
- return AVX_U128_DIRTY;
-
- dest = SET_DEST (set);
-
- /* Skip if DEF_INSN is not an AVX load. */
- if (ix86_check_avx_upper_register (dest))
- {
- src = SET_SRC (set);
- /* Return AVX_U128_DIRTY if the source operand isn't
- constant zero. */
- if (standard_sse_constant_p (src, GET_MODE (dest))
- != 1)
- return AVX_U128_DIRTY;
- }
-
- /* We get here only if all AVX loads are from constant
- zero. */
- status = AVX_U128_ANY;
+ int status = ix86_avx_u128_mode_source (insn, *iter);
+ if (status == AVX_U128_DIRTY)
+ return status;
}
-
- return status;
}
/* This isn't YMM/ZMM load/store. */
Hardware changes state only when a 256bit register is written to,
but we need to prevent the compiler from moving optimal insertion
point above eventual read from 256bit or 512 bit register. */
- subrtx_iterator::array_type array;
FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
if (ix86_check_avx_upper_register (*iter))
return AVX_U128_DIRTY;
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O3 -mtune=skylake -Wno-attributes" } */
+
+#include <x86intrin.h>
+#include <stdint.h>
+
+__attribute__((always_inline, target("avx2")))
+static __m256i
+load8bit_4x4_avx2(const uint8_t *const src, const uint32_t stride)
+{
+ __m128i src01, src23;
+ src01 = _mm_cvtsi32_si128(*(int32_t*)(src + 0 * stride));
+ src23 = _mm_insert_epi32(src23, *(int32_t *)(src + 3 * stride), 1);
+ return _mm256_setr_m128i(src01, src23);
+}
+
+__attribute__ ((noinline, noipa, target("avx2")))
+uint32_t
+compute4x_m_sad_avx2_intrin(uint8_t *src, uint32_t src_stride,
+ uint8_t *ref, uint32_t ref_stride,
+ uint32_t height)
+{
+ __m128i xmm0;
+ __m256i ymm = _mm256_setzero_si256();
+ uint32_t y;
+
+ for (y = 0; y < height; y += 4) {
+ const __m256i src0123 = load8bit_4x4_avx2(src, src_stride);
+ const __m256i ref0123 = load8bit_4x4_avx2(ref, ref_stride);
+ ymm = _mm256_add_epi32(ymm, _mm256_sad_epu8(src0123, ref0123));
+ src += src_stride << 2;
+ ref += ref_stride << 2;
+ }
+
+ xmm0 = _mm_add_epi32(_mm256_castsi256_si128(ymm),
+ _mm256_extracti128_si256(ymm, 1));
+
+ return (uint32_t)_mm_cvtsi128_si32(xmm0);
+}
+
+/* Expect assembly like:
+
+ vextracti128 $0x1, %ymm3, %xmm3
+ vpaddd %xmm3, %xmm0, %xmm0
+ vmovd %xmm0, %eax
+ vzeroupper
+
+rather than:
+
+ vzeroupper
+ vextracti128 $0x1, %ymm3, %xmm3
+ vpaddd %xmm3, %xmm0, %xmm0
+ vmovd %xmm0, %eax
+
+ */
+
+/* { dg-final { scan-assembler "\[ \t\]+vextracti128\[ \t\]+\[^\n\]+\n\[ \t\]+vpaddd\[ \t\]+\[^\n\]+\n\[ \t\]+vmovd\[ \t\]+\[^\n\]+\n\[ \t\]+vzeroupper" } } */