For compile test, we should generate valid asm except for special purposes.
Fix the compile test that generates invalid asm.
gcc/testsuite/ChangeLog:
* gcc.target/i386/apx-egprs-names.c: Use ax for short and
al for char instead of eax.
* gcc.target/i386/avx512bw-kandnq-1.c: Do not run the test
under -m32 since kmovq with register is invalid. Use long
long to use 64 bit register instead of 32 bit register for
kmovq.
* gcc.target/i386/avx512bw-kandq-1.c: Ditto.
* gcc.target/i386/avx512bw-knotq-1.c: Ditto.
* gcc.target/i386/avx512bw-korq-1.c: Ditto.
* gcc.target/i386/avx512bw-kshiftlq-1.c: Ditto.
* gcc.target/i386/avx512bw-kshiftrq-1.c: Ditto.
* gcc.target/i386/avx512bw-kxnorq-1.c: Ditto.
* gcc.target/i386/avx512bw-kxorq-1.c: Ditto.
register int b __asm ("r30");
register short c __asm ("r29");
register char d __asm ("r28");
- __asm__ __volatile__ ("mov %0, %%rax" : : "r" (a) : "rax");
- __asm__ __volatile__ ("mov %0, %%eax" : : "r" (b) : "eax");
- __asm__ __volatile__ ("mov %0, %%eax" : : "r" (c) : "eax");
- __asm__ __volatile__ ("mov %0, %%eax" : : "r" (d) : "eax");
+ __asm__ __volatile__ ("movq %0, %%rax" : : "r" (a) : "rax");
+ __asm__ __volatile__ ("movl %0, %%eax" : : "r" (b) : "eax");
+ __asm__ __volatile__ ("movw %0, %%ax" : : "r" (c) : "ax");
+ __asm__ __volatile__ ("movb %0, %%al" : : "r" (d) : "al");
}
-/* { dg-do compile } */
+/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-mavx512bw -O2" } */
/* { dg-final { scan-assembler-times "kandnq\[ \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
__mmask64 k1, k2, k3;
volatile __m512i x = _mm512_setzero_si512 ();
- __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
- __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
+ __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
+ __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2ULL) );
k3 = _kandn_mask64 (k1, k2);
x = _mm512_mask_add_epi8 (x, k3, x, x);
-/* { dg-do compile } */
+/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-mavx512bw -O2" } */
/* { dg-final { scan-assembler-times "kandq\[ \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
__mmask64 k1, k2, k3;
volatile __m512i x = _mm512_setzero_epi32();
- __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
- __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
+ __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
+ __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2ULL) );
k3 = _kand_mask64 (k1, k2);
x = _mm512_mask_add_epi8 (x, k3, x, x);
-/* { dg-do compile } */
+/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-mavx512bw -O2" } */
/* { dg-final { scan-assembler-times "knotq\[ \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
__mmask64 k1, k2;
volatile __m512i x = _mm512_setzero_si512 ();
- __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (45) );
+ __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (45ULL) );
k2 = _knot_mask64 (k1);
x = _mm512_mask_add_epi8 (x, k1, x, x);
-/* { dg-do compile } */
+/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-mavx512bw -O2" } */
/* { dg-final { scan-assembler-times "korq\[ \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
__mmask64 k1, k2, k3;
volatile __m512i x = _mm512_setzero_si512 ();
- __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
- __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
+ __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
+ __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2ULL) );
k3 = _kor_mask64 (k1, k2);
x = _mm512_mask_add_epi8 (x, k3, x, x);
-/* { dg-do compile } */
+/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-mavx512bw -O2" } */
/* { dg-final { scan-assembler-times "kshiftlq\[ \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
unsigned int i = 5;
volatile __m512i x = _mm512_setzero_si512 ();
- __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
+ __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
k2 = _kshiftli_mask64 (k1, i);
x = _mm512_mask_add_epi8 (x, k2, x, x);
-/* { dg-do compile } */
+/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-mavx512bw -O2" } */
/* { dg-final { scan-assembler-times "kshiftrq\[ \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
unsigned int i = 5;
volatile __m512i x = _mm512_setzero_si512 ();
- __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
+ __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
k2 = _kshiftri_mask64 (k1, i);
x = _mm512_mask_add_epi8 (x, k2, x, x);
-/* { dg-do compile } */
+/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-mavx512bw -O2" } */
/* { dg-final { scan-assembler-times "kxnorq\[ \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
__mmask64 k1, k2, k3;
volatile __m512i x = _mm512_setzero_si512 ();
- __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
- __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
+ __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
+ __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2ULL) );
k3 = _kxnor_mask64 (k1, k2);
x = _mm512_mask_add_epi8 (x, k3, x, x);
-/* { dg-do compile } */
+/* { dg-do compile { target { ! ia32 } } } */
/* { dg-options "-mavx512bw -O2" } */
/* { dg-final { scan-assembler-times "kxorq\[ \\t\]+\[^\{\n\]*%k\[0-7\](?:\n|\[ \\t\]+#)" 1 } } */
__mmask64 k1, k2, k3;
volatile __m512i x = _mm512_setzero_si512 ();
- __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1) );
- __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2) );
+ __asm__( "kmovq %1, %0" : "=k" (k1) : "r" (1ULL) );
+ __asm__( "kmovq %1, %0" : "=k" (k2) : "r" (2ULL) );
k3 = _kxor_mask64 (k1, k2);
x = _mm512_mask_add_epi8 (x, k3, x, x);