1 /* Optimized mempcpy implementation for POWER7.
2 Copyright (C) 2010-2018 Free Software Foundation, Inc.
3 Contributed by Luis Machado <luisgpm@br.ibm.com>.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
23 /* void * [r3] __mempcpy (void *dst [r3], void *src [r4], size_t len [r5]);
24 Returns 'dst' + 'len'. */
27 # define MEMPCPY __mempcpy
30 ENTRY_TOCLESS (MEMPCPY, 5)
38 ble cr1,L(copy_LT_32) /* If move < 32 bytes use short move
41 andi. 11,3,7 /* Check alignment of DST. */
44 clrldi 10,4,61 /* Check alignment of SRC. */
45 cmpld cr6,10,11 /* SRC and DST alignments match? */
48 bne cr6,L(copy_GE_32_unaligned)
50 srdi 9,5,3 /* Number of full quadwords remaining. */
52 beq L(copy_GE_32_aligned_cont)
58 /* Get the SRC aligned to 8 bytes. */
76 clrldi 10,12,61 /* Check alignment of SRC again. */
77 srdi 9,31,3 /* Number of full doublewords remaining. */
79 L(copy_GE_32_aligned_cont):
89 /* Copy 1~3 doublewords so the main loop starts
90 at a multiple of 32 bytes. */
109 1: /* Copy 1 doubleword and set the counter. */
118 /* Main aligned copy loop. Copies 32-bytes at a time. */
135 /* Check for tail bytes. */
144 /* At this point we have a tail of 0-7 bytes and we know that the
145 destination is doubleword-aligned. */
146 4: /* Copy 4 bytes. */
153 2: /* Copy 2 bytes. */
160 1: /* Copy 1 byte. */
165 0: /* Return DST + LEN pointer. */
171 /* Handle copies of 0~31 bytes. */
179 /* At least 9 bytes to go. */
185 beq L(copy_LT_32_aligned)
187 /* Force 4-bytes alignment for SRC. */
196 1: bf 31,L(end_4bytes_alignment)
204 L(end_4bytes_alignment):
208 L(copy_LT_32_aligned):
209 /* At least 6 bytes to go, and SRC is word-aligned. */
223 8: /* Copy 8 bytes. */
232 4: /* Copy 4 bytes. */
239 2: /* Copy 2-3 bytes. */
252 1: /* Copy 1 byte. */
257 0: /* Return DST + LEN pointer. */
262 /* Handles copies of 0~8 bytes. */
267 /* Though we could've used ld/std here, they are still
268 slow for unaligned cases. */
274 ld 3,-16(1) /* Return DST + LEN pointer. */
279 4: /* Copies 4~7 bytes. */
295 5: /* Copy 1 byte. */
301 0: /* Return DST + LEN pointer. */
306 /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
307 SRC is not. Use aligned quadword loads from SRC, shifted to realign
308 the data, allowing for aligned DST stores. */
310 L(copy_GE_32_unaligned):
311 clrldi 0,0,60 /* Number of bytes until the 1st
313 andi. 11,3,15 /* Check alignment of DST (against
315 srdi 9,5,4 /* Number of full quadwords remaining. */
317 beq L(copy_GE_32_unaligned_cont)
319 /* SRC is not quadword aligned, get it aligned. */
324 /* Vector instructions work best when proper alignment (16-bytes)
325 is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
326 1: /* Copy 1 byte. */
333 2: /* Copy 2 bytes. */
340 4: /* Copy 4 bytes. */
347 8: /* Copy 8 bytes. */
355 clrldi 10,12,60 /* Check alignment of SRC. */
356 srdi 9,31,4 /* Number of full quadwords remaining. */
358 /* The proper alignment is present, it is OK to copy the bytes now. */
359 L(copy_GE_32_unaligned_cont):
361 /* Setup two indexes to speed up the indexed vector operations. */
363 li 6,16 /* Index for 16-bytes offsets. */
364 li 7,32 /* Index for 32-bytes offsets. */
366 srdi 8,31,5 /* Setup the loop counter. */
371 #ifdef __LITTLE_ENDIAN__
377 bf 31,L(setup_unaligned_loop)
379 /* Copy another 16 bytes to align to 32-bytes due to the loop . */
381 #ifdef __LITTLE_ENDIAN__
391 L(setup_unaligned_loop):
393 ble cr6,L(end_unaligned_loop)
395 /* Copy 32 bytes at a time using vector instructions. */
399 /* Note: vr6/vr10 may contain data that was already copied,
400 but in order to get proper alignment, we may have to copy
401 some portions again. This is faster than having unaligned
402 vector instructions though. */
404 lvx 4,11,6 /* vr4 = r11+16. */
405 #ifdef __LITTLE_ENDIAN__
410 lvx 3,11,7 /* vr3 = r11+32. */
411 #ifdef __LITTLE_ENDIAN__
421 bdnz L(unaligned_loop)
424 L(end_unaligned_loop):
426 /* Check for tail bytes. */
434 /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
435 8: /* Copy 8 bytes. */
444 4: /* Copy 4 bytes. */
451 2: /* Copy 2~3 bytes. */
458 1: /* Copy 1 byte. */
463 0: /* Return DST + LEN pointer. */
469 END_GEN_TB (MEMPCPY,TB_TOCLESS)
470 libc_hidden_def (__mempcpy)
471 weak_alias (__mempcpy, mempcpy)
472 libc_hidden_builtin_def (mempcpy)