1 /* Optimized memcpy implementation for PowerPC32/POWER7.
2 Copyright (C) 2010-2014 Free Software Foundation, Inc.
3 Contributed by Luis Machado <luisgpm@br.ibm.com>.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
22 /* __ptr_t [r3] memcpy (__ptr_t dst [r3], __ptr_t src [r4], size_t len [r5]);
30 cfi_adjust_cfa_offset(32)
32 cfi_offset(30,(20-32))
38 ble cr1, L(copy_LT_32) /* If move < 32 bytes use short move
41 andi. 11,3,15 /* Check alignment of DST. */
42 clrlwi 10,4,28 /* Check alignment of SRC. */
43 cmplw cr6,10,11 /* SRC and DST alignments match? */
46 bne cr6,L(copy_GE_32_unaligned)
48 srwi 9,5,3 /* Number of full quadwords remaining. */
50 beq L(copy_GE_32_aligned_cont)
56 /* Get the SRC aligned to 8 bytes. */
74 clrlwi 10,12,29 /* Check alignment of SRC again. */
75 srwi 9,31,3 /* Number of full doublewords remaining. */
77 L(copy_GE_32_aligned_cont):
87 /* Copy 1~3 doublewords so the main loop starts
88 at a multiple of 32 bytes. */
107 1: /* Copy 1 doubleword and set the counter. */
117 /* Main aligned copy loop. Copies up to 128-bytes at a time. */
120 /* check for any 32-byte or 64-byte lumps that are outside of a
121 nice 128-byte range. R8 contains the number of 32-byte
122 lumps, so drop this into the CR, and use the SO/EQ bits to help
123 handle the 32- or 64- byte lumps. Then handle the rest with an
124 unrolled 128-bytes-at-a-time copy loop. */
131 /* if the SO bit (indicating a 32-byte lump) is not set, move along. */
132 bns cr7,L(aligned_64byte)
141 /* if the EQ bit (indicating a 64-byte lump) is not set, move along. */
142 bne cr7,L(aligned_128setup)
155 /* Set up for the 128-byte at a time copy loop. */
157 cmpwi 8,0 # Any 4x lumps left?
158 beq 3f # if not, move along.
161 mtctr 8 # otherwise, load the ctr and begin.
166 /* for the 2nd + iteration of this loop. */
188 bdnz L(aligned_128head)
191 /* Check for tail bytes. */
200 /* At this point we have a tail of 0-7 bytes and we know that the
201 destination is doubleword-aligned. */
202 4: /* Copy 4 bytes. */
209 2: /* Copy 2 bytes. */
216 1: /* Copy 1 byte. */
221 0: /* Return original DST pointer. */
228 /* Handle copies of 0~31 bytes. */
236 /* At least 9 bytes to go. */
242 beq L(copy_LT_32_aligned)
244 /* Force 4-bytes alignment for SRC. */
253 1: bf 31,L(end_4bytes_alignment)
261 L(end_4bytes_alignment):
265 L(copy_LT_32_aligned):
266 /* At least 6 bytes to go, and SRC is word-aligned. */
280 8: /* Copy 8 bytes. */
289 4: /* Copy 4 bytes. */
296 2: /* Copy 2-3 bytes. */
305 /* Return original DST pointer. */
312 1: /* Copy 1 byte. */
317 0: /* Return original DST pointer. */
323 /* Handles copies of 0~8 bytes. */
328 /* Though we could've used lfd/stfd here, they are still
329 slow for unaligned cases. */
336 /* Return original DST pointer. */
343 4: /* Copies 4~7 bytes. */
355 /* Return original DST pointer. */
362 5: /* Copy 1 byte. */
368 0: /* Return original DST pointer. */
374 /* Handle copies of 32+ bytes where DST is aligned (to quadword) but
375 SRC is not. Use aligned quadword loads from SRC, shifted to realign
376 the data, allowing for aligned DST stores. */
378 L(copy_GE_32_unaligned):
379 andi. 11,3,15 /* Check alignment of DST. */
380 clrlwi 0,0,28 /* Number of bytes until the 1st
382 srwi 9,5,4 /* Number of full quadwords remaining. */
384 beq L(copy_GE_32_unaligned_cont)
386 /* DST is not quadword aligned, get it aligned. */
391 /* Vector instructions work best when proper alignment (16-bytes)
392 is present. Move 0~15 bytes as needed to get DST quadword-aligned. */
393 1: /* Copy 1 byte. */
400 2: /* Copy 2 bytes. */
407 4: /* Copy 4 bytes. */
414 8: /* Copy 8 bytes. */
422 clrlwi 10,12,28 /* Check alignment of SRC. */
423 srwi 9,31,4 /* Number of full quadwords remaining. */
425 /* The proper alignment is present, it is OK to copy the bytes now. */
426 L(copy_GE_32_unaligned_cont):
428 /* Setup two indexes to speed up the indexed vector operations. */
430 li 6,16 /* Index for 16-bytes offsets. */
431 li 7,32 /* Index for 32-bytes offsets. */
433 srwi 8,31,5 /* Setup the loop counter. */
438 #ifdef __LITTLE_ENDIAN__
444 bf 31,L(setup_unaligned_loop)
446 /* Copy another 16 bytes to align to 32-bytes due to the loop . */
448 #ifdef __LITTLE_ENDIAN__
458 L(setup_unaligned_loop):
460 ble cr6,L(end_unaligned_loop)
462 /* Copy 32 bytes at a time using vector instructions. */
466 /* Note: vr6/vr10 may contain data that was already copied,
467 but in order to get proper alignment, we may have to copy
468 some portions again. This is faster than having unaligned
469 vector instructions though. */
471 lvx 4,11,6 /* vr4 = r11+16. */
472 #ifdef __LITTLE_ENDIAN__
477 lvx 3,11,7 /* vr3 = r11+32. */
478 #ifdef __LITTLE_ENDIAN__
488 bdnz L(unaligned_loop)
491 L(end_unaligned_loop):
493 /* Check for tail bytes. */
501 /* We have 1~15 tail bytes to copy, and DST is quadword aligned. */
502 8: /* Copy 8 bytes. */
511 4: /* Copy 4 bytes. */
518 2: /* Copy 2~3 bytes. */
525 1: /* Copy 1 byte. */
530 0: /* Return original DST pointer. */
538 libc_hidden_builtin_def (memcpy)