]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/x86_64/multiarch/memmove.S
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / x86_64 / multiarch / memmove.S
CommitLineData
c867597b
L
1/* Multiple versions of memmove
2 All versions must be listed in ifunc-impl-list.c.
bfff8b1b 3 Copyright (C) 2016-2017 Free Software Foundation, Inc.
c867597b
L
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
19
20#include <sysdep.h>
21#include <init-arch.h>
22
23/* Define multiple versions only for the definition in lib and for
24 DSO. */
25#if IS_IN (libc)
26 .text
27ENTRY(__libc_memmove)
28 .type __libc_memmove, @gnu_indirect_function
29 LOAD_RTLD_GLOBAL_RO_RDX
13efa86e
L
30 lea __memmove_erms(%rip), %RAX_LP
31 HAS_ARCH_FEATURE (Prefer_ERMS)
32 jnz 2f
c867597b
L
33 HAS_ARCH_FEATURE (AVX512F_Usable)
34 jz 1f
35 lea __memmove_avx512_no_vzeroupper(%rip), %RAX_LP
36 HAS_ARCH_FEATURE (Prefer_No_VZEROUPPER)
37 jnz 2f
38 lea __memmove_avx512_unaligned_erms(%rip), %RAX_LP
39 HAS_CPU_FEATURE (ERMS)
40 jnz 2f
41 lea __memmove_avx512_unaligned(%rip), %RAX_LP
42 ret
c867597b
L
431: lea __memmove_avx_unaligned(%rip), %RAX_LP
44 HAS_ARCH_FEATURE (AVX_Fast_Unaligned_Load)
45 jz L(Fast_Unaligned_Load)
46 HAS_CPU_FEATURE (ERMS)
47 jz 2f
48 lea __memmove_avx_unaligned_erms(%rip), %RAX_LP
49 ret
50L(Fast_Unaligned_Load):
51 lea __memmove_sse2_unaligned(%rip), %RAX_LP
52 HAS_ARCH_FEATURE (Fast_Unaligned_Copy)
53 jz L(SSSE3)
54 HAS_CPU_FEATURE (ERMS)
55 jz 2f
56 lea __memmove_sse2_unaligned_erms(%rip), %RAX_LP
57 ret
58L(SSSE3):
59 HAS_CPU_FEATURE (SSSE3)
60 jz 2f
61 lea __memmove_ssse3_back(%rip), %RAX_LP
62 HAS_ARCH_FEATURE (Fast_Copy_Backward)
63 jnz 2f
64 lea __memmove_ssse3(%rip), %RAX_LP
652: ret
66END(__libc_memmove)
67#endif
68
69#if IS_IN (libc)
70# define MEMMOVE_SYMBOL(p,s) p##_sse2_##s
71
72# ifdef SHARED
73libc_hidden_ver (__memmove_sse2_unaligned, memmove)
74libc_hidden_ver (__memcpy_sse2_unaligned, memcpy)
75libc_hidden_ver (__mempcpy_sse2_unaligned, mempcpy)
76libc_hidden_ver (__mempcpy_sse2_unaligned, __mempcpy)
77
78# undef libc_hidden_builtin_def
79/* It doesn't make sense to send libc-internal memmove calls through a PLT.
80 The speedup we get from using SSE2 instructions is likely eaten away
81 by the indirect call in the PLT. */
82# define libc_hidden_builtin_def
83# endif
84strong_alias (__libc_memmove, memmove)
85#endif
86
87#if !defined SHARED || !IS_IN (libc)
88weak_alias (__mempcpy, mempcpy)
89#endif
90
91#include "../memmove.S"
92
93#if defined SHARED && IS_IN (libc)
94# include <shlib-compat.h>
95# if SHLIB_COMPAT (libc, GLIBC_2_2_5, GLIBC_2_14)
96/* Use __memmove_sse2_unaligned to support overlapping addresses. */
97compat_symbol (libc, __memmove_sse2_unaligned, memcpy, GLIBC_2_2_5);
98# endif
99#endif