]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
x86: Fix -O0 remaining intrinsic macros [PR94832]
authorJakub Jelinek <jakub@redhat.com>
Wed, 29 Apr 2020 15:31:26 +0000 (17:31 +0200)
committerJakub Jelinek <jakub@redhat.com>
Wed, 29 Apr 2020 15:31:26 +0000 (17:31 +0200)
A few other macros seem to suffer from the same issue.  What I've done was:
cat gcc/config/i386/*intrin.h | sed -e ':x /\\$/ { N; s/\\\n//g ; bx }' \
| grep '^[[:blank:]]*#[[:blank:]]*define[[:blank:]].*(' | sed 's/[  ]\+/ /g' \
> /tmp/macros
and then looking for regexps:
)[a-zA-Z]
) [a-zA-Z]
[a-zA-Z][-+*/%]
[a-zA-Z] [-+*/%]
[-+*/%][a-zA-Z]
[-+*/%] [a-zA-Z]
in the resulting file.

2020-04-29  Jakub Jelinek  <jakub@redhat.com>

PR target/94832
* config/i386/avx512bwintrin.h (_mm512_alignr_epi8,
_mm512_mask_alignr_epi8, _mm512_maskz_alignr_epi8): Wrap macro operands
used in casts into parens.
* config/i386/avx512fintrin.h (_mm512_cvt_roundps_ph, _mm512_cvtps_ph,
_mm512_mask_cvt_roundps_ph, _mm512_mask_cvtps_ph,
_mm512_maskz_cvt_roundps_ph, _mm512_maskz_cvtps_ph,
_mm512_mask_cmp_epi64_mask, _mm512_mask_cmp_epi32_mask,
_mm512_mask_cmp_epu64_mask, _mm512_mask_cmp_epu32_mask,
_mm512_mask_cmp_round_pd_mask, _mm512_mask_cmp_round_ps_mask,
_mm512_mask_cmp_pd_mask, _mm512_mask_cmp_ps_mask): Likewise.
* config/i386/avx512vlbwintrin.h (_mm256_mask_alignr_epi8,
_mm256_maskz_alignr_epi8, _mm_mask_alignr_epi8, _mm_maskz_alignr_epi8,
_mm256_mask_cmp_epu8_mask): Likewise.
* config/i386/avx512vlintrin.h (_mm_mask_cvtps_ph, _mm_maskz_cvtps_ph,
_mm256_mask_cvtps_ph, _mm256_maskz_cvtps_ph): Likewise.
* config/i386/f16cintrin.h (_mm_cvtps_ph, _mm256_cvtps_ph): Likewise.
* config/i386/shaintrin.h (_mm_sha1rnds4_epu32): Likewise.

gcc/ChangeLog
gcc/config/i386/avx512bwintrin.h
gcc/config/i386/avx512fintrin.h
gcc/config/i386/avx512vlbwintrin.h
gcc/config/i386/avx512vlintrin.h
gcc/config/i386/f16cintrin.h
gcc/config/i386/shaintrin.h

index 16e05d1fa60c2c7649cc3b13fefb740e533788f7..72e38d89308226698b0656f2c266a4c2aeab4159 100644 (file)
@@ -1,5 +1,24 @@
 2020-04-29  Jakub Jelinek  <jakub@redhat.com>
 
+       PR target/94832
+       * config/i386/avx512bwintrin.h (_mm512_alignr_epi8,
+       _mm512_mask_alignr_epi8, _mm512_maskz_alignr_epi8): Wrap macro operands
+       used in casts into parens.
+       * config/i386/avx512fintrin.h (_mm512_cvt_roundps_ph, _mm512_cvtps_ph,
+       _mm512_mask_cvt_roundps_ph, _mm512_mask_cvtps_ph,
+       _mm512_maskz_cvt_roundps_ph, _mm512_maskz_cvtps_ph,
+       _mm512_mask_cmp_epi64_mask, _mm512_mask_cmp_epi32_mask,
+       _mm512_mask_cmp_epu64_mask, _mm512_mask_cmp_epu32_mask,
+       _mm512_mask_cmp_round_pd_mask, _mm512_mask_cmp_round_ps_mask,
+       _mm512_mask_cmp_pd_mask, _mm512_mask_cmp_ps_mask): Likewise.
+       * config/i386/avx512vlbwintrin.h (_mm256_mask_alignr_epi8,
+       _mm256_maskz_alignr_epi8, _mm_mask_alignr_epi8, _mm_maskz_alignr_epi8,
+       _mm256_mask_cmp_epu8_mask): Likewise.
+       * config/i386/avx512vlintrin.h (_mm_mask_cvtps_ph, _mm_maskz_cvtps_ph,
+       _mm256_mask_cvtps_ph, _mm256_maskz_cvtps_ph): Likewise.
+       * config/i386/f16cintrin.h (_mm_cvtps_ph, _mm256_cvtps_ph): Likewise.
+       * config/i386/shaintrin.h (_mm_sha1rnds4_epu32): Likewise.
+
        PR target/94832
        * config/i386/avx2intrin.h (_mm_mask_i32gather_pd,
        _mm256_mask_i32gather_pd, _mm_mask_i64gather_pd,
index c886e5a31e91b145bc224c4f64331b48c506637a..d19c10444714c9fd147454a37fe1992ffbcbf7c3 100644 (file)
@@ -3128,16 +3128,16 @@ _mm512_bsrli_epi128 (__m512i __A, const int __N)
 #define _mm512_alignr_epi8(X, Y, N)                                                \
   ((__m512i) __builtin_ia32_palignr512 ((__v8di)(__m512i)(X),                      \
                                        (__v8di)(__m512i)(Y),                       \
-                                       (int)(N * 8)))
+                                       (int)((N) * 8)))
 
 #define _mm512_mask_alignr_epi8(W, U, X, Y, N)                                     \
   ((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X),                 \
-                                           (__v8di)(__m512i)(Y), (int)(N * 8),     \
+                                           (__v8di)(__m512i)(Y), (int)((N) * 8),   \
                                            (__v8di)(__m512i)(W), (__mmask64)(U)))
 
 #define _mm512_maskz_alignr_epi8(U, X, Y, N)                                       \
   ((__m512i) __builtin_ia32_palignr512_mask ((__v8di)(__m512i)(X),                 \
-                                            (__v8di)(__m512i)(Y), (int)(N * 8),    \
+                                            (__v8di)(__m512i)(Y), (int)((N) * 8),  \
                                             (__v8di)(__m512i)                      \
                                             _mm512_setzero_si512 (),               \
                                             (__mmask64)(U)))
index c86982ab9c8bcac3f0cd5fe41f5b851c2d3710c2..012cf4eb31e2065fd447968bffefd3d8fb760384 100644 (file)
@@ -8570,22 +8570,22 @@ _mm512_maskz_cvtps_ph (__mmask16 __W, __m512 __A, const int __I)
     (__m512)__builtin_ia32_vcvtph2ps512_mask((__v16hi)(A), (__v16sf)_mm512_setzero_ps(), U, B)
 
 #define _mm512_cvt_roundps_ph(A, I)                                             \
-  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
     (__v16hi)_mm256_undefined_si256 (), -1))
 #define _mm512_cvtps_ph(A, I)                                           \
-  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
     (__v16hi)_mm256_undefined_si256 (), -1))
 #define _mm512_mask_cvt_roundps_ph(U, W, A, I)                          \
-  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
     (__v16hi)(__m256i)(U), (__mmask16) (W)))
 #define _mm512_mask_cvtps_ph(U, W, A, I)                                \
-  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
     (__v16hi)(__m256i)(U), (__mmask16) (W)))
 #define _mm512_maskz_cvt_roundps_ph(W, A, I)                                    \
-  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
     (__v16hi)_mm256_setzero_si256 (), (__mmask16) (W)))
 #define _mm512_maskz_cvtps_ph(W, A, I)                                  \
-  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) A, (int) (I),\
+  ((__m256i) __builtin_ia32_vcvtps2ph512_mask ((__v16sf)(__m512) (A), (int) (I),\
     (__v16hi)_mm256_setzero_si256 (), (__mmask16) (W)))
 #endif
 
@@ -10081,32 +10081,32 @@ _mm_mask_cmp_round_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y,
 #define _mm512_mask_cmp_epi64_mask(M, X, Y, P)                         \
   ((__mmask8) __builtin_ia32_cmpq512_mask ((__v8di)(__m512i)(X),       \
                                           (__v8di)(__m512i)(Y), (int)(P),\
-                                          (__mmask8)M))
+                                          (__mmask8)(M)))
 
 #define _mm512_mask_cmp_epi32_mask(M, X, Y, P)                         \
   ((__mmask16) __builtin_ia32_cmpd512_mask ((__v16si)(__m512i)(X),     \
                                            (__v16si)(__m512i)(Y), (int)(P), \
-                                           (__mmask16)M))
+                                           (__mmask16)(M)))
 
 #define _mm512_mask_cmp_epu64_mask(M, X, Y, P)                         \
   ((__mmask8) __builtin_ia32_ucmpq512_mask ((__v8di)(__m512i)(X),      \
                                            (__v8di)(__m512i)(Y), (int)(P),\
-                                           (__mmask8)M))
+                                           (__mmask8)(M)))
 
 #define _mm512_mask_cmp_epu32_mask(M, X, Y, P)                         \
   ((__mmask16) __builtin_ia32_ucmpd512_mask ((__v16si)(__m512i)(X),    \
                                             (__v16si)(__m512i)(Y), (int)(P), \
-                                            (__mmask16)M))
+                                            (__mmask16)(M)))
 
 #define _mm512_mask_cmp_round_pd_mask(M, X, Y, P, R)                   \
   ((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X),      \
                                            (__v8df)(__m512d)(Y), (int)(P),\
-                                           (__mmask8)M, R))
+                                           (__mmask8)(M), R))
 
 #define _mm512_mask_cmp_round_ps_mask(M, X, Y, P, R)                   \
   ((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X),     \
                                             (__v16sf)(__m512)(Y), (int)(P),\
-                                            (__mmask16)M, R))
+                                            (__mmask16)(M), R))
 
 #define _mm_cmp_round_sd_mask(X, Y, P, R)                              \
   ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X),         \
@@ -15498,12 +15498,12 @@ _mm_mask_cmp_ss_mask (__mmask8 __M, __m128 __X, __m128 __Y, const int __P)
 #define _mm512_mask_cmp_pd_mask(M, X, Y, P)                                    \
   ((__mmask8) __builtin_ia32_cmppd512_mask ((__v8df)(__m512d)(X),      \
                                            (__v8df)(__m512d)(Y), (int)(P),\
-                                           (__mmask8)M, _MM_FROUND_CUR_DIRECTION))
+                                           (__mmask8)(M), _MM_FROUND_CUR_DIRECTION))
 
 #define _mm512_mask_cmp_ps_mask(M, X, Y, P)                                    \
   ((__mmask16) __builtin_ia32_cmpps512_mask ((__v16sf)(__m512)(X),     \
                                             (__v16sf)(__m512)(Y), (int)(P),\
-                                            (__mmask16)M,_MM_FROUND_CUR_DIRECTION))
+                                            (__mmask16)(M),_MM_FROUND_CUR_DIRECTION))
 
 #define _mm_cmp_sd_mask(X, Y, P)                                       \
   ((__mmask8) __builtin_ia32_cmpsd_mask ((__v2df)(__m128d)(X),         \
index 19293e4d6f0d5d814381c725e753acd690f0a601..bee2639d60a07d99db58d8676b1463dbc512c51a 100644 (file)
@@ -1787,7 +1787,7 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
 #else
 #define _mm256_mask_alignr_epi8(W, U, X, Y, N)                                     \
   ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X),                 \
-                                           (__v4di)(__m256i)(Y), (int)(N * 8),     \
+                                           (__v4di)(__m256i)(Y), (int)((N) * 8),   \
                                            (__v4di)(__m256i)(X), (__mmask32)(U)))
 
 #define _mm256_mask_srli_epi16(W, U, A, B)                              \
@@ -1864,18 +1864,18 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
 
 #define _mm256_maskz_alignr_epi8(U, X, Y, N)                                       \
   ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X),                 \
-                                           (__v4di)(__m256i)(Y), (int)(N * 8),     \
+                                           (__v4di)(__m256i)(Y), (int)((N) * 8),   \
                                            (__v4di)(__m256i)_mm256_setzero_si256 (),   \
                                            (__mmask32)(U)))
 
 #define _mm_mask_alignr_epi8(W, U, X, Y, N)                                        \
   ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X),                 \
-                                           (__v2di)(__m128i)(Y), (int)(N * 8),     \
+                                           (__v2di)(__m128i)(Y), (int)((N) * 8),   \
                                            (__v2di)(__m128i)(X), (__mmask16)(U)))
 
 #define _mm_maskz_alignr_epi8(U, X, Y, N)                                          \
   ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X),                 \
-                                           (__v2di)(__m128i)(Y), (int)(N * 8),     \
+                                           (__v2di)(__m128i)(Y), (int)((N) * 8),   \
                                            (__v2di)(__m128i)_mm_setzero_si128 (),  \
                                            (__mmask16)(U)))
 
@@ -2033,7 +2033,7 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
 #define _mm256_mask_cmp_epu8_mask(M, X, Y, P)                          \
   ((__mmask32) __builtin_ia32_ucmpb256_mask ((__v32qi)(__m256i)(X),    \
                                            (__v32qi)(__m256i)(Y), (int)(P),\
-                                           (__mmask32)M))
+                                           (__mmask32)(M)))
 #endif
 
 extern __inline __mmask32
index 7685bdfa391cb7a7965925e1dc04a4389c31fe35..cb6cc0ce7821ae3c99d26faa8ab1fd8c3fdedf43 100644 (file)
@@ -13466,19 +13466,19 @@ _mm256_permutex_pd (__m256d __X, const int __M)
         (__mmask8)(U)))
 
 #define _mm_mask_cvtps_ph(W, U, A, I)                                          \
-  ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) A, (int) (I),      \
+  ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) (A), (int) (I),   \
       (__v8hi)(__m128i) (W), (__mmask8) (U)))
 
 #define _mm_maskz_cvtps_ph(U, A, I)                                            \
-  ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) A, (int) (I),      \
+  ((__m128i) __builtin_ia32_vcvtps2ph_mask ((__v4sf)(__m128) (A), (int) (I),   \
       (__v8hi)(__m128i) _mm_setzero_si128 (), (__mmask8) (U)))
 
 #define _mm256_mask_cvtps_ph(W, U, A, I)                                       \
-  ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) A, (int) (I),  \
+  ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) (A), (int) (I),        \
       (__v8hi)(__m128i) (W), (__mmask8) (U)))
 
 #define _mm256_maskz_cvtps_ph(U, A, I)                                         \
-  ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) A, (int) (I),   \
+  ((__m128i) __builtin_ia32_vcvtps2ph256_mask ((__v8sf)(__m256) (A), (int) (I),        \
       (__v8hi)(__m128i) _mm_setzero_si128 (), (__mmask8) (U)))
 
 #define _mm256_mask_srai_epi32(W, U, A, B)                             \
index a4033cf0e87619c911b28eeb71c06b23bb6402a5..8276e8d652155cdeaafff339f4602ed79f53ee23 100644 (file)
@@ -84,10 +84,10 @@ _mm256_cvtps_ph (__m256 __A, const int __I)
     }))
 
 #define _mm_cvtps_ph(A, I) \
-  ((__m128i) __builtin_ia32_vcvtps2ph ((__v4sf)(__m128) A, (int) (I)))
+  ((__m128i) __builtin_ia32_vcvtps2ph ((__v4sf)(__m128) (A), (int) (I)))
 
 #define _mm256_cvtps_ph(A, I) \
-  ((__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf)(__m256) A, (int) (I)))
+  ((__m128i) __builtin_ia32_vcvtps2ph256 ((__v8sf)(__m256) (A), (int) (I)))
 #endif /* __OPTIMIZE */
 
 #ifdef __DISABLE_F16C__
index 38011dda17a6ed8f080ae91d5a6961cc62eb5ab1..13833b261c615380eedb1297b4a7126aa0bbdda8 100644 (file)
@@ -64,8 +64,8 @@ _mm_sha1rnds4_epu32 (__m128i __A, __m128i __B, const int __I)
 }
 #else
 #define _mm_sha1rnds4_epu32(A, B, I)                               \
-  ((__m128i) __builtin_ia32_sha1rnds4 ((__v4si)(__m128i)A,         \
-                                      (__v4si)(__m128i)B, (int)I))
+  ((__m128i) __builtin_ia32_sha1rnds4 ((__v4si)(__m128i)(A),       \
+                                      (__v4si)(__m128i)(B), (int)(I)))
 #endif
 
 extern __inline __m128i