(set_attr "prefix" "orig,orig,maybe_evex")
(set_attr "mode" "TI")])
-(define_insn_and_split "*sse4_1_<code>v8qiv8hi2<mask_name>_2"
+(define_insn_and_split "*sse4_1_<code>v8qiv8hi2_2"
[(set (match_operand:V8HI 0 "register_operand")
(any_extend:V8HI
(vec_select:V8QI
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_SSE4_1 && <mask_avx512bw_condition> && <mask_avx512vl_condition>
+ "TARGET_SSE4_1
&& ix86_pre_reload_split ()"
"#"
"&& 1"
(any_extend:V8HI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V8QImode, 0);")
+(define_insn_and_split "*avx512vl_<code>v8qiv8hi2_mask_2"
+ [(set (match_operand:V8HI 0 "register_operand")
+ (vec_merge:V8HI
+ (any_extend:V8HI
+ (vec_select:V8QI
+ (subreg:V16QI
+ (vec_concat:V2DI
+ (match_operand:DI 1 "memory_operand")
+ (const_int 0)) 0)
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)
+ (const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)])))
+ (match_operand:V8HI 2 "nonimm_or_0_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_AVX512VL && TARGET_AVX512BW
+ && ix86_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V8HI
+ (any_extend:V8HI (match_dup 1))
+ (match_dup 2)
+ (match_dup 3)))]
+ "operands[1] = adjust_address_nv (operands[1], V8QImode, 0);")
+
(define_insn_and_split "*sse4_1_zero_extendv8qiv8hi2_3"
[(set (match_operand:V16QI 0 "register_operand" "=Yr,*x,Yw")
(vec_select:V16QI
(set_attr "prefix" "maybe_evex")
(set_attr "mode" "OI")])
-(define_insn_and_split "*avx2_<code>v8qiv8si2<mask_name>_2"
+(define_insn_and_split "*avx2_<code>v8qiv8si2_2"
[(set (match_operand:V8SI 0 "register_operand")
(any_extend:V8SI
(vec_select:V8QI
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
- "TARGET_AVX2 && <mask_avx512vl_condition>
+ "TARGET_AVX2
&& ix86_pre_reload_split ()"
"#"
"&& 1"
(any_extend:V8SI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V8QImode, 0);")
+(define_insn_and_split "*avx512vl_<code>v8qiv8si2_mask_2"
+ [(set (match_operand:V8SI 0 "register_operand")
+ (vec_merge:V8SI
+ (any_extend:V8SI
+ (vec_select:V8QI
+ (subreg:V16QI
+ (vec_concat:V2DI
+ (match_operand:DI 1 "memory_operand")
+ (const_int 0)) 0)
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)
+ (const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)])))
+ (match_operand:V8SI 2 "nonimm_or_0_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_AVX512VL
+ && ix86_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V8SI
+ (any_extend:V8SI (match_dup 1))
+ (match_dup 2)
+ (match_dup 3)))]
+ "operands[1] = adjust_address_nv (operands[1], V8QImode, 0);")
+
+
(define_expand "<insn>v8qiv8si2"
[(set (match_operand:V8SI 0 "register_operand")
(any_extend:V8SI
(set_attr "prefix" "orig,orig,maybe_evex")
(set_attr "mode" "TI")])
-(define_insn_and_split "*sse4_1_<code>v4qiv4si2<mask_name>_2"
+(define_insn_and_split "*sse4_1_<code>v4qiv4si2_2"
[(set (match_operand:V4SI 0 "register_operand")
(any_extend:V4SI
(vec_select:V4QI
(any_extend:V4SI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V4QImode, 0);")
+(define_insn_and_split "*avx512vl_<code>v4qiv4si2_mask_2"
+ [(set (match_operand:V4SI 0 "register_operand")
+ (vec_merge:V4SI
+ (any_extend:V4SI
+ (vec_select:V4QI
+ (subreg:V16QI
+ (vec_merge:V4SI
+ (vec_duplicate:V4SI
+ (match_operand:SI 1 "memory_operand"))
+ (const_vector:V4SI
+ [(const_int 0) (const_int 0)
+ (const_int 0) (const_int 0)])
+ (const_int 1)) 0)
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)])))
+ (match_operand:V4SI 2 "nonimm_or_0_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_AVX512VL
+ && ix86_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V4SI
+ (any_extend:V4SI (match_dup 1))
+ (match_dup 2)
+ (match_dup 3)))]
+ "operands[1] = adjust_address_nv (operands[1], V4QImode, 0);")
+
+
(define_expand "<insn>v4qiv4si2"
[(set (match_operand:V4SI 0 "register_operand")
(any_extend:V4SI
(set_attr "prefix" "orig,orig,maybe_evex")
(set_attr "mode" "TI")])
-(define_insn_and_split "*sse4_1_<code>v4hiv4si2<mask_name>_2"
+(define_insn_and_split "*sse4_1_<code>v4hiv4si2_2"
[(set (match_operand:V4SI 0 "register_operand")
(any_extend:V4SI
(vec_select:V4HI
(const_int 0)) 0)
(parallel [(const_int 0) (const_int 1)
(const_int 2) (const_int 3)]))))]
- "TARGET_SSE4_1 && <mask_avx512vl_condition>
+ "TARGET_SSE4_1
&& ix86_pre_reload_split ()"
"#"
"&& 1"
(any_extend:V4SI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V4HImode, 0);")
+(define_insn_and_split "*avx512vl_<code>v4hiv4si2_mask_2"
+ [(set (match_operand:V4SI 0 "register_operand")
+ (vec_merge:V4SI
+ (any_extend:V4SI
+ (vec_select:V4HI
+ (subreg:V8HI
+ (vec_concat:V2DI
+ (match_operand:DI 1 "memory_operand")
+ (const_int 0)) 0)
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)])))
+ (match_operand:V4SI 2 "nonimm_or_0_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_AVX512VL
+ && ix86_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V4SI
+ (any_extend:V4SI (match_dup 1))
+ (match_dup 2)
+ (match_dup 3)))]
+ "operands[1] = adjust_address_nv (operands[1], V4HImode, 0);")
+
+
(define_expand "<insn>v4hiv4si2"
[(set (match_operand:V4SI 0 "register_operand")
(any_extend:V4SI
(set_attr "prefix" "evex")
(set_attr "mode" "XI")])
-(define_insn_and_split "*avx512f_<code>v8qiv8di2<mask_name>_2"
+(define_insn_and_split "*avx512f_<code>v8qiv8di2_2"
[(set (match_operand:V8DI 0 "register_operand")
(any_extend:V8DI
(vec_select:V8QI
(any_extend:V8DI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V8QImode, 0);")
+(define_insn_and_split "*avx512f_<code>v8qiv8di2_mask_2"
+ [(set (match_operand:V8DI 0 "register_operand")
+ (vec_merge:V8DI
+ (any_extend:V8DI
+ (vec_select:V8QI
+ (subreg:V16QI
+ (vec_concat:V2DI
+ (match_operand:DI 1 "memory_operand")
+ (const_int 0)) 0)
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)
+ (const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)])))
+ (match_operand:V8DI 2 "nonimm_or_0_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_AVX512F && ix86_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V8DI
+ (any_extend:V8DI (match_dup 1))
+ (match_dup 2)
+ (match_dup 3)))]
+ "operands[1] = adjust_address_nv (operands[1], V8QImode, 0);")
+
+
(define_expand "<insn>v8qiv8di2"
[(set (match_operand:V8DI 0 "register_operand")
(any_extend:V8DI
(set_attr "prefix" "maybe_evex")
(set_attr "mode" "OI")])
-(define_insn_and_split "*avx2_<code>v4qiv4di2<mask_name>_2"
+(define_insn_and_split "*avx2_<code>v4qiv4di2_2"
[(set (match_operand:V4DI 0 "register_operand")
(any_extend:V4DI
(vec_select:V4QI
(const_int 1)) 0)
(parallel [(const_int 0) (const_int 1)
(const_int 2) (const_int 3)]))))]
- "TARGET_AVX2 && <mask_avx512vl_condition>
+ "TARGET_AVX2
&& ix86_pre_reload_split ()"
"#"
"&& 1"
(any_extend:V4DI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V4QImode, 0);")
+(define_insn_and_split "*avx512vl_<code>v4qiv4di2_mask_2"
+ [(set (match_operand:V4DI 0 "register_operand")
+ (vec_merge:V4DI
+ (any_extend:V4DI
+ (vec_select:V4QI
+ (subreg:V16QI
+ (vec_merge:V4SI
+ (vec_duplicate:V4SI
+ (match_operand:SI 1 "memory_operand"))
+ (const_vector:V4SI
+ [(const_int 0) (const_int 0)
+ (const_int 0) (const_int 0)])
+ (const_int 1)) 0)
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)])))
+ (match_operand:V4DI 2 "nonimm_or_0_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_AVX512VL
+ && ix86_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V4DI
+ (any_extend:V4DI (match_dup 1))
+ (match_dup 2)
+ (match_dup 3)))]
+ "operands[1] = adjust_address_nv (operands[1], V4QImode, 0);")
+
+
(define_expand "<insn>v4qiv4di2"
[(set (match_operand:V4DI 0 "register_operand")
(any_extend:V4DI
(set_attr "prefix" "maybe_evex")
(set_attr "mode" "OI")])
-(define_insn_and_split "*avx2_<code>v4hiv4di2<mask_name>_2"
+(define_insn_and_split "*avx2_<code>v4hiv4di2_2"
[(set (match_operand:V4DI 0 "register_operand")
(any_extend:V4DI
(vec_select:V4HI
(const_int 0)) 0)
(parallel [(const_int 0) (const_int 1)
(const_int 2) (const_int 3)]))))]
- "TARGET_AVX2 && <mask_avx512vl_condition>
+ "TARGET_AVX2
&& ix86_pre_reload_split ()"
"#"
"&& 1"
(any_extend:V4DI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V4HImode, 0);")
+(define_insn_and_split "*avx512vl_<code>v4hiv4di2_mask_2"
+ [(set (match_operand:V4DI 0 "register_operand")
+ (vec_merge:V4DI
+ (any_extend:V4DI
+ (vec_select:V4HI
+ (subreg:V8HI
+ (vec_concat:V2DI
+ (match_operand:DI 1 "memory_operand")
+ (const_int 0)) 0)
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)])))
+ (match_operand:V4DI 2 "nonimm_or_0_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_AVX512VL
+ && ix86_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V4DI
+ (any_extend:V4DI (match_dup 1))
+ (match_dup 2)
+ (match_dup 3)))]
+ "operands[1] = adjust_address_nv (operands[1], V4HImode, 0);")
+
+
(define_expand "<insn>v4hiv4di2"
[(set (match_operand:V4DI 0 "register_operand")
(any_extend:V4DI
(set_attr "prefix" "orig,orig,maybe_evex")
(set_attr "mode" "TI")])
-(define_insn_and_split "*sse4_1_<code>v2hiv2di2<mask_name>_2"
+(define_insn_and_split "*sse4_1_<code>v2hiv2di2_2"
[(set (match_operand:V2DI 0 "register_operand")
(any_extend:V2DI
(vec_select:V2HI
(const_int 0) (const_int 0)])
(const_int 1)) 0)
(parallel [(const_int 0) (const_int 1)]))))]
- "TARGET_SSE4_1 && <mask_avx512vl_condition>
+ "TARGET_SSE4_1
&& ix86_pre_reload_split ()"
"#"
"&& 1"
(any_extend:V2DI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V2HImode, 0);")
+(define_insn_and_split "*avx512vl_<code>v2hiv2di2_mask_2"
+ [(set (match_operand:V2DI 0 "register_operand")
+ (vec_merge:V2DI
+ (any_extend:V2DI
+ (vec_select:V2HI
+ (subreg:V8HI
+ (vec_merge:V4SI
+ (vec_duplicate:V4SI
+ (match_operand:SI 1 "memory_operand"))
+ (const_vector:V4SI
+ [(const_int 0) (const_int 0)
+ (const_int 0) (const_int 0)])
+ (const_int 1)) 0)
+ (parallel [(const_int 0) (const_int 1)])))
+ (match_operand:V2DI 2 "nonimm_or_0_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_AVX512VL
+ && ix86_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V2DI
+ (any_extend:V2DI (match_dup 1))
+ (match_dup 2)
+ (match_dup 3)))]
+ "operands[1] = adjust_address_nv (operands[1], V2HImode, 0);")
+
+
(define_expand "<insn>v2hiv2di2"
[(set (match_operand:V2DI 0 "register_operand")
(any_extend:V2DI
(set_attr "prefix" "orig,orig,maybe_evex")
(set_attr "mode" "TI")])
-(define_insn_and_split "*sse4_1_<code>v2siv2di2<mask_name>_2"
+(define_insn_and_split "*sse4_1_<code>v2siv2di2_2"
[(set (match_operand:V2DI 0 "register_operand")
(any_extend:V2DI
(vec_select:V2SI
(match_operand:DI 1 "memory_operand")
(const_int 0)) 0)
(parallel [(const_int 0) (const_int 1)]))))]
- "TARGET_SSE4_1 && <mask_avx512vl_condition>
+ "TARGET_SSE4_1
&& ix86_pre_reload_split ()"
"#"
"&& 1"
(any_extend:V2DI (match_dup 1)))]
"operands[1] = adjust_address_nv (operands[1], V2SImode, 0);")
+(define_insn_and_split "*avx512vl_<code>v2siv2di2_mask_2"
+ [(set (match_operand:V2DI 0 "register_operand")
+ (vec_merge:V2DI
+ (any_extend:V2DI
+ (vec_select:V2SI
+ (subreg:V4SI
+ (vec_concat:V2DI
+ (match_operand:DI 1 "memory_operand")
+ (const_int 0)) 0)
+ (parallel [(const_int 0) (const_int 1)])))
+ (match_operand:V2DI 2 "nonimm_or_0_operand")
+ (match_operand:QI 3 "register_operand")))]
+ "TARGET_AVX512VL
+ && ix86_pre_reload_split ()"
+ "#"
+ "&& 1"
+ [(set (match_dup 0)
+ (vec_merge:V2DI
+ (any_extend:V2DI (match_dup 1))
+ (match_dup 2)
+ (match_dup 3)))]
+ "operands[1] = adjust_address_nv (operands[1], V2SImode, 0);")
+
(define_insn_and_split "*sse4_1_zero_extendv2siv2di2_3"
[(set (match_operand:V4SI 0 "register_operand" "=Yr,*x,v")
(vec_select:V4SI
--- /dev/null
+/* PR target/123779 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -mavx512vl -mavx512bw" } */
+
+typedef char __v16qi __attribute__((__vector_size__(16)));
+typedef short __v8hi __attribute__((__vector_size__(16)));
+typedef int __v4si __attribute__((__vector_size__(16)));
+typedef long long __v2di __attribute__((__vector_size__(16)));
+
+typedef int __v8si __attribute__((__vector_size__(32)));
+typedef long long __v4di __attribute__((__vector_size__(32)));
+typedef long long __v8di __attribute__((__vector_size__(64)));
+typedef unsigned char __mmask8;
+
+long long g_mem;
+
+#define MAKE_TEST_BLEND(NAME, DEST_T, SRC_INIT_T, PMOV_INPUT_CAST_T, \
+ PMOV_FUNC, BLEND_FUNC, BLEND_CAST_T, LOAD_VAL_TYPE, ...) \
+ DEST_T dest_##NAME, mask_src1_##NAME, mask_src2_##NAME, mask_##NAME, res_##NAME; \
+ void test_##NAME() \
+ { \
+ mask_##NAME = mask_src1_##NAME < mask_src2_##NAME; \
+ LOAD_VAL_TYPE val = (LOAD_VAL_TYPE)g_mem; \
+ SRC_INIT_T src_vec = { __VA_ARGS__ }; \
+ DEST_T extended = (DEST_T)PMOV_FUNC ((PMOV_INPUT_CAST_T)src_vec); \
+ res_##NAME = (DEST_T)BLEND_FUNC ( \
+ (BLEND_CAST_T)dest_##NAME, \
+ (BLEND_CAST_T)extended, \
+ (BLEND_CAST_T)mask_##NAME); \
+ }
+
+#define MAKE_TEST_MASK(NAME, DEST_T, SRC_INIT_T, PMOV_INPUT_CAST_T, \
+ PMOV_MASK_FUNC, LOAD_VAL_TYPE, ...) \
+ DEST_T dest_##NAME, res_##NAME; \
+ __mmask8 mask_##NAME; \
+ void test_##NAME() \
+ { \
+ LOAD_VAL_TYPE val = (LOAD_VAL_TYPE) g_mem; \
+ SRC_INIT_T src_vec = { __VA_ARGS__ }; \
+ res_##NAME = PMOV_MASK_FUNC ((PMOV_INPUT_CAST_T)src_vec, \
+ dest_##NAME, \
+ mask_##NAME);\
+ }
+
+
+MAKE_TEST_BLEND(v8qi_v8hi, __v8hi, __v2di, __v16qi, __builtin_ia32_pmovzxbw128, __builtin_ia32_pblendvb128, __v16qi, long long, val, 0)
+MAKE_TEST_BLEND(v4qi_v4si, __v4si, __v4si, __v16qi, __builtin_ia32_pmovzxbd128, __builtin_ia32_pblendvb128, __v16qi, int, val, 0, 0, 0)
+MAKE_TEST_BLEND(v4hi_v4si, __v4si, __v2di, __v8hi, __builtin_ia32_pmovzxwd128, __builtin_ia32_pblendvb128, __v16qi, long long, val, 0)
+MAKE_TEST_BLEND(v2hi_v2di, __v2di, __v4si, __v8hi, __builtin_ia32_pmovzxwq128, __builtin_ia32_pblendvb128, __v16qi, int, val, 0, 0, 0)
+MAKE_TEST_BLEND(v2si_v2di, __v2di, __v2di, __v4si, __builtin_ia32_pmovzxdq128, __builtin_ia32_pblendvb128, __v16qi, long long, val, 0)
+
+
+MAKE_TEST_MASK(v8qi_v8si, __v8si, __v2di, __v16qi, __builtin_ia32_pmovzxbd256_mask, long long, val, 0)
+MAKE_TEST_MASK(v4qi_v4di, __v4di, __v4si, __v16qi, __builtin_ia32_pmovzxbq256_mask, int, val, 0, 0, 0)
+MAKE_TEST_MASK(v4hi_v4di, __v4di, __v2di, __v8hi, __builtin_ia32_pmovzxwq256_mask, long long, val, 0)
+MAKE_TEST_MASK(v8qi_v8di, __v8di, __v2di, __v16qi, __builtin_ia32_pmovzxbq512_mask, long long, val, 0)