1 /* memset optimized with AVX512 for KNL hardware.
2 Copyright (C) 2015-2019 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include "asm-syntax.h"
25 # define MEMSET __memset_avx512_no_vzeroupper
26 # define MEMSET_CHK __memset_chk_avx512_no_vzeroupper
29 .section .text.avx512,"ax",@progbits
33 jb HIDDEN_JUMPTARGET (__chk_fail)
39 /* Clear the upper 32 bits. */
42 vpxor %xmm0, %xmm0, %xmm0
44 lea (%rdi, %rdx), %rsi
46 vpshufb %xmm0, %xmm1, %xmm0
50 vbroadcastss %xmm0, %zmm2
55 vmovups %zmm2, 0x40(%rdi)
56 vmovups %zmm2, 0x80(%rdi)
57 vmovups %zmm2, 0xC0(%rdi)
58 vmovups %zmm2, -0x100(%rsi)
59 vmovups %zmm2, -0xC0(%rsi)
60 vmovups %zmm2, -0x80(%rsi)
61 vmovups %zmm2, -0x40(%rsi)
68 vmovups %zmm2, 0x40(%rdi)
69 vmovups %zmm2, -0x80(%rsi)
70 vmovups %zmm2, -0x40(%rsi)
77 vmovups %zmm2, -0x40(%rsi)
84 vmovdqu %ymm2, -0x20(%rsi)
89 vmovdqu %xmm0, -0x10(%rsi)
96 vmovq %xmm0, -0x08(%rsi)
104 mov %ecx, -0x04(%rsi)
122 mov __x86_shared_cache_size_half(%rip), %rcx
126 ja L(1024bytesormore)
128 vmovups %zmm2, (%rdi)
129 vmovups %zmm2, 0x40(%rdi)
130 vmovups %zmm2, 0x80(%rdi)
131 vmovups %zmm2, 0xC0(%rdi)
132 vmovups %zmm2, 0x100(%rdi)
133 vmovups %zmm2, 0x140(%rdi)
134 vmovups %zmm2, 0x180(%rdi)
135 vmovups %zmm2, 0x1C0(%rdi)
136 vmovups %zmm2, -0x200(%rsi)
137 vmovups %zmm2, -0x1C0(%rsi)
138 vmovups %zmm2, -0x180(%rsi)
139 vmovups %zmm2, -0x140(%rsi)
140 vmovups %zmm2, -0x100(%rsi)
141 vmovups %zmm2, -0xC0(%rsi)
142 vmovups %zmm2, -0x80(%rsi)
143 vmovups %zmm2, -0x40(%rsi)
146 /* Align on 64 and loop with aligned stores. */
149 vmovups %zmm2, (%rax)
153 L(gobble_256bytes_loop):
154 vmovaps %zmm2, (%rdi)
155 vmovaps %zmm2, 0x40(%rdi)
156 vmovaps %zmm2, 0x80(%rdi)
157 vmovaps %zmm2, 0xC0(%rdi)
160 jb L(gobble_256bytes_loop)
161 vmovups %zmm2, (%rsi)
162 vmovups %zmm2, 0x40(%rsi)
163 vmovups %zmm2, 0x80(%rsi)
164 vmovups %zmm2, 0xC0(%rsi)
167 /* Align on 128 and loop with non-temporal stores. */
171 vmovups %zmm2, (%rax)
172 vmovups %zmm2, 0x40(%rax)
175 L(gobble_512bytes_nt_loop):
176 vmovntdq %zmm2, (%rdi)
177 vmovntdq %zmm2, 0x40(%rdi)
178 vmovntdq %zmm2, 0x80(%rdi)
179 vmovntdq %zmm2, 0xC0(%rdi)
180 vmovntdq %zmm2, 0x100(%rdi)
181 vmovntdq %zmm2, 0x140(%rdi)
182 vmovntdq %zmm2, 0x180(%rdi)
183 vmovntdq %zmm2, 0x1C0(%rdi)
186 jb L(gobble_512bytes_nt_loop)
188 vmovups %zmm2, (%rsi)
189 vmovups %zmm2, 0x40(%rsi)
190 vmovups %zmm2, 0x80(%rsi)
191 vmovups %zmm2, 0xC0(%rsi)
192 vmovups %zmm2, 0x100(%rsi)
193 vmovups %zmm2, 0x140(%rsi)
194 vmovups %zmm2, 0x180(%rsi)
195 vmovups %zmm2, 0x1C0(%rsi)