]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
x86-64: Add memset family functions with 256-bit EVEX
authorH.J. Lu <hjl.tools@gmail.com>
Fri, 5 Mar 2021 15:15:03 +0000 (07:15 -0800)
committerH.J. Lu <hjl.tools@gmail.com>
Thu, 27 Jan 2022 19:44:17 +0000 (11:44 -0800)
Update ifunc-memset.h/ifunc-wmemset.h to select the function optimized
with 256-bit EVEX instructions using YMM16-YMM31 registers to avoid RTM
abort with usable AVX512VL and AVX512BW since VZEROUPPER isn't needed at
function exit.

(cherry picked from commit 1b968b6b9b3aac702ac2f133e0dd16cfdbb415ee)

sysdeps/x86_64/multiarch/Makefile
sysdeps/x86_64/multiarch/ifunc-impl-list.c
sysdeps/x86_64/multiarch/ifunc-memset.h
sysdeps/x86_64/multiarch/ifunc-wmemset.h
sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S [new file with mode: 0644]
sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S

index 2a5a3dd71b8f58c5c355370cbdc14357bc905095..9c44bd6a9bd3b4cea594061f26a947e492ada1e2 100644 (file)
@@ -47,6 +47,7 @@ sysdep_routines += strncat-c stpncpy-c strncpy-c \
                   memchr-evex \
                   memmove-evex-unaligned-erms \
                   memrchr-evex \
+                  memset-evex-unaligned-erms \
                   rawmemchr-evex \
                   stpcpy-evex \
                   stpncpy-evex \
index ec787754704fc13aaeb161af691dbfd4c4ec7091..97d237d263218fe8fb71f2ba12ec9793f400b932 100644 (file)
@@ -160,6 +160,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
              IFUNC_IMPL_ADD (array, i, __memset_chk,
                              HAS_ARCH_FEATURE (AVX2_Usable),
                              __memset_chk_avx2_unaligned_erms)
+             IFUNC_IMPL_ADD (array, i, __memset_chk,
+                             (HAS_ARCH_FEATURE (AVX512VL_Usable)
+                              && HAS_ARCH_FEATURE (AVX512BW_Usable)),
+                             __memset_chk_evex_unaligned)
+             IFUNC_IMPL_ADD (array, i, __memset_chk,
+                             (HAS_ARCH_FEATURE (AVX512VL_Usable)
+                              && HAS_ARCH_FEATURE (AVX512BW_Usable)),
+                             __memset_chk_evex_unaligned_erms)
              IFUNC_IMPL_ADD (array, i, __memset_chk,
                              HAS_ARCH_FEATURE (AVX512F_Usable),
                              __memset_chk_avx512_unaligned_erms)
@@ -185,6 +193,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
              IFUNC_IMPL_ADD (array, i, memset,
                              HAS_ARCH_FEATURE (AVX2_Usable),
                              __memset_avx2_unaligned_erms)
+             IFUNC_IMPL_ADD (array, i, memset,
+                             (HAS_ARCH_FEATURE (AVX512VL_Usable)
+                              && HAS_ARCH_FEATURE (AVX512BW_Usable)),
+                             __memset_evex_unaligned)
+             IFUNC_IMPL_ADD (array, i, memset,
+                             (HAS_ARCH_FEATURE (AVX512VL_Usable)
+                              && HAS_ARCH_FEATURE (AVX512BW_Usable)),
+                             __memset_evex_unaligned_erms)
              IFUNC_IMPL_ADD (array, i, memset,
                              HAS_ARCH_FEATURE (AVX512F_Usable),
                              __memset_avx512_unaligned_erms)
@@ -555,6 +571,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
              IFUNC_IMPL_ADD (array, i, wmemset,
                              HAS_ARCH_FEATURE (AVX2_Usable),
                              __wmemset_avx2_unaligned)
+             IFUNC_IMPL_ADD (array, i, wmemset,
+                             HAS_ARCH_FEATURE (AVX512VL_Usable),
+                             __wmemset_evex_unaligned)
              IFUNC_IMPL_ADD (array, i, wmemset,
                              HAS_ARCH_FEATURE (AVX512F_Usable),
                              __wmemset_avx512_unaligned))
@@ -723,6 +742,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
              IFUNC_IMPL_ADD (array, i, __wmemset_chk,
                              HAS_ARCH_FEATURE (AVX2_Usable),
                              __wmemset_chk_avx2_unaligned)
+             IFUNC_IMPL_ADD (array, i, __wmemset_chk,
+                             HAS_ARCH_FEATURE (AVX512VL_Usable),
+                             __wmemset_chk_evex_unaligned)
              IFUNC_IMPL_ADD (array, i, __wmemset_chk,
                              HAS_ARCH_FEATURE (AVX512F_Usable),
                              __wmemset_chk_avx512_unaligned))
index c98d7577fc0adf82c72b88bd242a4923980025ca..7e7e9dcec18c7e68a3885dda9a88bdd34ffbf99a 100644 (file)
@@ -27,6 +27,10 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms)
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms)
   attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned)
+  attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms)
+  attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned)
   attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms)
@@ -56,10 +60,22 @@ IFUNC_SELECTOR (void)
 
   if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
     {
-      if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
-       return OPTIMIZE (avx2_unaligned_erms);
-      else
-       return OPTIMIZE (avx2_unaligned);
+      if (CPU_FEATURES_ARCH_P (cpu_features, AVX512VL_Usable)
+         && CPU_FEATURES_ARCH_P (cpu_features, AVX512BW_Usable))
+       {
+         if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
+           return OPTIMIZE (evex_unaligned_erms);
+
+         return OPTIMIZE (evex_unaligned);
+       }
+
+      if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
+       {
+         if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
+           return OPTIMIZE (avx2_unaligned_erms);
+
+         return OPTIMIZE (avx2_unaligned);
+       }
     }
 
   if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
index f480b822884f02cc7693308af2133dcdaa045c74..15021cf7cf49295b604631f57ab506397587544c 100644 (file)
@@ -20,6 +20,7 @@
 
 extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden;
 
 static inline void *
@@ -27,14 +28,18 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
+  if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     {
       if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
-         && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
+         && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)
+         && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
        return OPTIMIZE (avx512_unaligned);
-      else
+
+      if (CPU_FEATURES_ARCH_P (cpu_features, AVX512VL_Usable))
+       return OPTIMIZE (evex_unaligned);
+
+      if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
        return OPTIMIZE (avx2_unaligned);
     }
 
diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
new file mode 100644 (file)
index 0000000..ae0a4d6
--- /dev/null
@@ -0,0 +1,24 @@
+#if IS_IN (libc)
+# define VEC_SIZE      32
+# define XMM0          xmm16
+# define YMM0          ymm16
+# define VEC0          ymm16
+# define VEC(i)                VEC##i
+# define VMOVU         vmovdqu64
+# define VMOVA         vmovdqa64
+# define VZEROUPPER
+
+# define MEMSET_VDUP_TO_VEC0_AND_SET_RETURN(d, r) \
+  movq r, %rax; \
+  vpbroadcastb d, %VEC0
+
+# define WMEMSET_VDUP_TO_VEC0_AND_SET_RETURN(d, r) \
+  movq r, %rax; \
+  vpbroadcastd d, %VEC0
+
+# define SECTION(p)            p##.evex
+# define MEMSET_SYMBOL(p,s)    p##_evex_##s
+# define WMEMSET_SYMBOL(p,s)   p##_evex_##s
+
+# include "memset-vec-unaligned-erms.S"
+#endif
index 5e0d307d859f46fb9453dcffdea6874b30c96b1f..6736dba1da66c83152a94d8c92c68ab3682f4838 100644 (file)
 # define WMEMSET_CHK_SYMBOL(p,s)       WMEMSET_SYMBOL(p, s)
 #endif
 
+#ifndef XMM0
+# define XMM0                          xmm0
+#endif
+
+#ifndef YMM0
+# define YMM0                          ymm0
+#endif
+
 #ifndef VZEROUPPER
 # if VEC_SIZE > 16
 #  define VZEROUPPER                   vzeroupper
@@ -77,7 +85,7 @@
 ENTRY (__bzero)
        mov     %RDI_LP, %RAX_LP /* Set return value.  */
        mov     %RSI_LP, %RDX_LP /* Set n.  */
-       pxor    %xmm0, %xmm0
+       pxor    %XMM0, %XMM0
        jmp     L(entry_from_bzero)
 END (__bzero)
 weak_alias (__bzero, bzero)
@@ -233,7 +241,7 @@ L(less_vec):
        cmpb    $16, %dl
        jae     L(between_16_31)
 # endif
-       MOVQ    %xmm0, %rcx
+       MOVQ    %XMM0, %rcx
        cmpb    $8, %dl
        jae     L(between_8_15)
        cmpb    $4, %dl
@@ -248,16 +256,16 @@ L(less_vec):
 # if VEC_SIZE > 32
        /* From 32 to 63.  No branch when size == 32.  */
 L(between_32_63):
-       vmovdqu %ymm0, -32(%rdi,%rdx)
-       vmovdqu %ymm0, (%rdi)
+       VMOVU   %YMM0, -32(%rdi,%rdx)
+       VMOVU   %YMM0, (%rdi)
        VZEROUPPER
        ret
 # endif
 # if VEC_SIZE > 16
        /* From 16 to 31.  No branch when size == 16.  */
 L(between_16_31):
-       vmovdqu %xmm0, -16(%rdi,%rdx)
-       vmovdqu %xmm0, (%rdi)
+       VMOVU   %XMM0, -16(%rdi,%rdx)
+       VMOVU   %XMM0, (%rdi)
        VZEROUPPER
        ret
 # endif