The following one line patch improves the code generated for V8QI and V4QI
shifts when AV512BW and AVX512VL functionality is available.
For the testcase (from gcc.target/i386/vect-shiftv8qi.c):
typedef signed char v8qi __attribute__ ((__vector_size__ (8)));
v8qi foo (v8qi x) { return x >> 5; }
GCC with -O2 -march=cascadelake currently generates:
foo: movl $
67372036, %eax
vpsraw $5, %xmm0, %xmm2
vpbroadcastd %eax, %xmm1
movl $
117901063, %eax
vpbroadcastd %eax, %xmm3
vmovdqa %xmm1, %xmm0
vmovdqa %xmm3, -24(%rsp)
vpternlogd $120, -24(%rsp), %xmm2, %xmm0
vpsubb %xmm1, %xmm0, %xmm0
ret
with this patch we now generate the much improved:
foo: vpmovsxbw %xmm0, %xmm0
vpsraw $5, %xmm0, %xmm0
vpmovwb %xmm0, %xmm0
ret
This patch also fixes the FAILs of gcc.target/i386/vect-shiftv[48]qi.c
when run with the additional -march=cascadelake flag, by splitting these
tests into two; one form testing code generation with -msse2 (and
-mno-avx512vl) as originally intended, and the other testing AVX512
code generation with an explicit -march=cascadelake.
2024-05-10 Roger Sayle <roger@nextmovesoftware.com>
Hongtao Liu <hongtao.liu@intel.com>
gcc/ChangeLog
* config/i386/i386-expand.cc (ix86_expand_vecop_qihi_partial):
Don't attempt ix86_expand_vec_shift_qihi_constant on SSE4.1.
gcc/testsuite/ChangeLog
* gcc.target/i386/vect-shiftv4qi.c: Specify -mno-avx512vl.
* gcc.target/i386/vect-shiftv8qi.c: Likewise.
* gcc.target/i386/vect-shiftv4qi-2.c: New test case.
* gcc.target/i386/vect-shiftv8qi-2.c: Likewise.
if (CONST_INT_P (op2)
&& (code == ASHIFT || code == LSHIFTRT || code == ASHIFTRT)
+ /* With AVX512 it's cheaper to do vpmovsxbw/op/vpmovwb.
+ Even with SSE4.1 the alternative is better. */
+ && !TARGET_SSE4_1
&& ix86_expand_vec_shift_qihi_constant (code, qdest, qop1, qop2))
{
emit_move_insn (dest, gen_lowpart (qimode, qdest));
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2 -march=cascadelake" } */
+
+#define N 4
+
+typedef unsigned char __vu __attribute__ ((__vector_size__ (N)));
+typedef signed char __vi __attribute__ ((__vector_size__ (N)));
+
+__vu sll (__vu a, int n)
+{
+ return a << n;
+}
+
+__vu sll_c (__vu a)
+{
+ return a << 5;
+}
+
+/* { dg-final { scan-assembler-times "vpsllw" 2 } } */
+
+__vu srl (__vu a, int n)
+{
+ return a >> n;
+}
+
+__vu srl_c (__vu a)
+{
+ return a >> 5;
+}
+
+/* { dg-final { scan-assembler-times "vpsrlw" 2 } } */
+
+__vi sra (__vi a, int n)
+{
+ return a >> n;
+}
+
+__vi sra_c (__vi a)
+{
+ return a >> 5;
+}
+
+/* { dg-final { scan-assembler-times "vpsraw" 2 } } */
/* { dg-do compile } */
-/* { dg-options "-O2 -msse2" } */
+/* { dg-options "-O2 -msse2 -mno-avx2 -mno-avx512vl" } */
#define N 4
--- /dev/null
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-O2 -march=cascadelake" } */
+
+#define N 8
+
+typedef unsigned char __vu __attribute__ ((__vector_size__ (N)));
+typedef signed char __vi __attribute__ ((__vector_size__ (N)));
+
+__vu sll (__vu a, int n)
+{
+ return a << n;
+}
+
+__vu sll_c (__vu a)
+{
+ return a << 5;
+}
+
+/* { dg-final { scan-assembler-times "vpsllw" 2 } } */
+
+__vu srl (__vu a, int n)
+{
+ return a >> n;
+}
+
+__vu srl_c (__vu a)
+{
+ return a >> 5;
+}
+
+/* { dg-final { scan-assembler-times "vpsrlw" 2 } } */
+
+__vi sra (__vi a, int n)
+{
+ return a >> n;
+}
+
+__vi sra_c (__vi a)
+{
+ return a >> 5;
+}
+
+/* { dg-final { scan-assembler-times "vpsraw" 2 } } */
/* { dg-do compile { target { ! ia32 } } } */
-/* { dg-options "-O2 -msse2" } */
+/* { dg-options "-O2 -msse2 -mno-avx2 -mno-avx512vl" } */
#define N 8