]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
x86-64: Add memset family functions with 256-bit EVEX
authorH.J. Lu <hjl.tools@gmail.com>
Fri, 5 Mar 2021 15:15:03 +0000 (07:15 -0800)
committerH.J. Lu <hjl.tools@gmail.com>
Thu, 27 Jan 2022 20:47:19 +0000 (12:47 -0800)
Update ifunc-memset.h/ifunc-wmemset.h to select the function optimized
with 256-bit EVEX instructions using YMM16-YMM31 registers to avoid RTM
abort with usable AVX512VL and AVX512BW since VZEROUPPER isn't needed at
function exit.

(cherry picked from commit 1b968b6b9b3aac702ac2f133e0dd16cfdbb415ee)

sysdeps/x86_64/multiarch/Makefile
sysdeps/x86_64/multiarch/ifunc-impl-list.c
sysdeps/x86_64/multiarch/ifunc-memset.h
sysdeps/x86_64/multiarch/ifunc-wmemset.h
sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S [new file with mode: 0644]
sysdeps/x86_64/multiarch/memset-vec-unaligned-erms.S

index e75805a64530371269136c7afa20e8119945833e..062e6c0b201ab69797fa486efada776fbee24844 100644 (file)
@@ -44,6 +44,7 @@ sysdep_routines += strncat-c stpncpy-c strncpy-c \
                   memchr-evex \
                   memmove-evex-unaligned-erms \
                   memrchr-evex \
+                  memset-evex-unaligned-erms \
                   rawmemchr-evex \
                   stpcpy-evex \
                   stpncpy-evex \
index d7814a965f42f5ce1e0ebfa65bd88951701bce36..7ef94fe401f85f21baa72a8f0ea52ee06208f08f 100644 (file)
@@ -160,6 +160,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
              IFUNC_IMPL_ADD (array, i, __memset_chk,
                              HAS_ARCH_FEATURE (AVX2_Usable),
                              __memset_chk_avx2_unaligned_erms)
+             IFUNC_IMPL_ADD (array, i, __memset_chk,
+                             (HAS_ARCH_FEATURE (AVX512VL_Usable)
+                              && HAS_ARCH_FEATURE (AVX512BW_Usable)),
+                             __memset_chk_evex_unaligned)
+             IFUNC_IMPL_ADD (array, i, __memset_chk,
+                             (HAS_ARCH_FEATURE (AVX512VL_Usable)
+                              && HAS_ARCH_FEATURE (AVX512BW_Usable)),
+                             __memset_chk_evex_unaligned_erms)
              IFUNC_IMPL_ADD (array, i, __memset_chk,
                              HAS_ARCH_FEATURE (AVX512F_Usable),
                              __memset_chk_avx512_unaligned_erms)
@@ -185,6 +193,14 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
              IFUNC_IMPL_ADD (array, i, memset,
                              HAS_ARCH_FEATURE (AVX2_Usable),
                              __memset_avx2_unaligned_erms)
+             IFUNC_IMPL_ADD (array, i, memset,
+                             (HAS_ARCH_FEATURE (AVX512VL_Usable)
+                              && HAS_ARCH_FEATURE (AVX512BW_Usable)),
+                             __memset_evex_unaligned)
+             IFUNC_IMPL_ADD (array, i, memset,
+                             (HAS_ARCH_FEATURE (AVX512VL_Usable)
+                              && HAS_ARCH_FEATURE (AVX512BW_Usable)),
+                             __memset_evex_unaligned_erms)
              IFUNC_IMPL_ADD (array, i, memset,
                              HAS_ARCH_FEATURE (AVX512F_Usable),
                              __memset_avx512_unaligned_erms)
@@ -543,6 +559,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
              IFUNC_IMPL_ADD (array, i, wmemset,
                              HAS_ARCH_FEATURE (AVX2_Usable),
                              __wmemset_avx2_unaligned)
+             IFUNC_IMPL_ADD (array, i, wmemset,
+                             HAS_ARCH_FEATURE (AVX512VL_Usable),
+                             __wmemset_evex_unaligned)
              IFUNC_IMPL_ADD (array, i, wmemset,
                              HAS_ARCH_FEATURE (AVX512F_Usable),
                              __wmemset_avx512_unaligned))
@@ -711,6 +730,9 @@ __libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array,
              IFUNC_IMPL_ADD (array, i, __wmemset_chk,
                              HAS_ARCH_FEATURE (AVX2_Usable),
                              __wmemset_chk_avx2_unaligned)
+             IFUNC_IMPL_ADD (array, i, __wmemset_chk,
+                             HAS_ARCH_FEATURE (AVX512VL_Usable),
+                             __wmemset_chk_evex_unaligned)
              IFUNC_IMPL_ADD (array, i, __wmemset_chk,
                              HAS_ARCH_FEATURE (AVX512F_Usable),
                              __wmemset_chk_avx512_unaligned))
index 19b5ae676c2d5d5386fb686b6cbbe04d9bfe5903..fea6c832f43f6d0487af82abf7fd7b2d5250e4a2 100644 (file)
@@ -27,6 +27,10 @@ extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned_erms)
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned_erms)
   attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned)
+  attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned_erms)
+  attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned)
   attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned_erms)
@@ -56,10 +60,22 @@ IFUNC_SELECTOR (void)
 
   if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable))
     {
-      if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
-       return OPTIMIZE (avx2_unaligned_erms);
-      else
-       return OPTIMIZE (avx2_unaligned);
+      if (CPU_FEATURES_ARCH_P (cpu_features, AVX512VL_Usable)
+         && CPU_FEATURES_ARCH_P (cpu_features, AVX512BW_Usable))
+       {
+         if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
+           return OPTIMIZE (evex_unaligned_erms);
+
+         return OPTIMIZE (evex_unaligned);
+       }
+
+      if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
+       {
+         if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
+           return OPTIMIZE (avx2_unaligned_erms);
+
+         return OPTIMIZE (avx2_unaligned);
+       }
     }
 
   if (CPU_FEATURES_CPU_P (cpu_features, ERMS))
index 2f1085f5fc483c708dbd742518858c0cf3dc4605..fae721cdb0d2d15b68b99c97b1c7dcf9fe7eabee 100644 (file)
@@ -20,6 +20,7 @@
 
 extern __typeof (REDIRECT_NAME) OPTIMIZE (sse2_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx2_unaligned) attribute_hidden;
+extern __typeof (REDIRECT_NAME) OPTIMIZE (evex_unaligned) attribute_hidden;
 extern __typeof (REDIRECT_NAME) OPTIMIZE (avx512_unaligned) attribute_hidden;
 
 static inline void *
@@ -27,14 +28,18 @@ IFUNC_SELECTOR (void)
 {
   const struct cpu_features* cpu_features = __get_cpu_features ();
 
-  if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER)
-      && CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
+  if (CPU_FEATURES_ARCH_P (cpu_features, AVX2_Usable)
       && CPU_FEATURES_ARCH_P (cpu_features, AVX_Fast_Unaligned_Load))
     {
       if (CPU_FEATURES_ARCH_P (cpu_features, AVX512F_Usable)
-         && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512))
+         && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_AVX512)
+         && !CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
        return OPTIMIZE (avx512_unaligned);
-      else
+
+      if (CPU_FEATURES_ARCH_P (cpu_features, AVX512VL_Usable))
+       return OPTIMIZE (evex_unaligned);
+
+      if (!CPU_FEATURES_ARCH_P (cpu_features, Prefer_No_VZEROUPPER))
        return OPTIMIZE (avx2_unaligned);
     }
 
diff --git a/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S b/sysdeps/x86_64/multiarch/memset-evex-unaligned-erms.S
new file mode 100644 (file)
index 0000000..ae0a4d6
--- /dev/null
@@ -0,0 +1,24 @@
+#if IS_IN (libc)
+# define VEC_SIZE      32
+# define XMM0          xmm16
+# define YMM0          ymm16
+# define VEC0          ymm16
+# define VEC(i)                VEC##i
+# define VMOVU         vmovdqu64
+# define VMOVA         vmovdqa64
+# define VZEROUPPER
+
+# define MEMSET_VDUP_TO_VEC0_AND_SET_RETURN(d, r) \
+  movq r, %rax; \
+  vpbroadcastb d, %VEC0
+
+# define WMEMSET_VDUP_TO_VEC0_AND_SET_RETURN(d, r) \
+  movq r, %rax; \
+  vpbroadcastd d, %VEC0
+
+# define SECTION(p)            p##.evex
+# define MEMSET_SYMBOL(p,s)    p##_evex_##s
+# define WMEMSET_SYMBOL(p,s)   p##_evex_##s
+
+# include "memset-vec-unaligned-erms.S"
+#endif
index a44f1bc3b2de9b8bb61b6a8afd4fafd7011192bf..9f14e956d1416434a809a77b450cdc0a5c14f2d2 100644 (file)
 # define WMEMSET_CHK_SYMBOL(p,s)       WMEMSET_SYMBOL(p, s)
 #endif
 
+#ifndef XMM0
+# define XMM0                          xmm0
+#endif
+
+#ifndef YMM0
+# define YMM0                          ymm0
+#endif
+
 #ifndef VZEROUPPER
 # if VEC_SIZE > 16
 #  define VZEROUPPER                   vzeroupper
@@ -77,7 +85,7 @@
 ENTRY (__bzero)
        mov     %RDI_LP, %RAX_LP /* Set return value.  */
        mov     %RSI_LP, %RDX_LP /* Set n.  */
-       pxor    %xmm0, %xmm0
+       pxor    %XMM0, %XMM0
        jmp     L(entry_from_bzero)
 END (__bzero)
 weak_alias (__bzero, bzero)
@@ -233,7 +241,7 @@ L(less_vec):
        cmpb    $16, %dl
        jae     L(between_16_31)
 # endif
-       MOVQ    %xmm0, %rcx
+       MOVQ    %XMM0, %rcx
        cmpb    $8, %dl
        jae     L(between_8_15)
        cmpb    $4, %dl
@@ -248,16 +256,16 @@ L(less_vec):
 # if VEC_SIZE > 32
        /* From 32 to 63.  No branch when size == 32.  */
 L(between_32_63):
-       vmovdqu %ymm0, -32(%rdi,%rdx)
-       vmovdqu %ymm0, (%rdi)
+       VMOVU   %YMM0, -32(%rdi,%rdx)
+       VMOVU   %YMM0, (%rdi)
        VZEROUPPER
        ret
 # endif
 # if VEC_SIZE > 16
        /* From 16 to 31.  No branch when size == 16.  */
 L(between_16_31):
-       vmovdqu %xmm0, -16(%rdi,%rdx)
-       vmovdqu %xmm0, (%rdi)
+       VMOVU   %XMM0, -16(%rdi,%rdx)
+       VMOVU   %XMM0, (%rdi)
        VZEROUPPER
        ret
 # endif