1 /* Optimized memcpy implementation for PowerPC A2.
2 Copyright (C) 2010-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
20 #include <rtld-global-offsets.h>
22 #define PREFETCH_AHEAD 4 /* no cache lines SRC prefetching ahead */
23 #define ZERO_AHEAD 2 /* no cache lines DST zeroing ahead */
29 dcbt 0,r4 /* Prefetch ONE SRC cacheline */
30 cmplwi cr1,r5,16 /* is size < 16 ? */
31 mr r6,r3 /* Copy dest reg to r6; */
35 /* Big copy (16 bytes or more)
37 Figure out how far to the nearest quadword boundary, or if we are
40 r3 - return value (always)
41 r4 - current source addr
43 r6 - current dest addr
46 neg r8,r3 /* LS 4 bits = # bytes to 8-byte dest bdry */
47 clrlwi r8,r8,32-4 /* align to 16byte boundary */
48 sub r7,r4,r3 /* compute offset to src from dest */
49 cmplwi cr0,r8,0 /* Were we aligned on a 16 byte bdy? */
54 /* Destination is not aligned on quadword boundary. Get us to one.
56 r3 - return value (always)
57 r4 - current source addr
59 r6 - current dest addr
60 r7 - offset to src from dest
61 r8 - number of bytes to quadword boundary
64 mtcrf 0x01,r8 /* put #bytes to boundary into cr7 */
65 subf r5,r8,r5 /* adjust remaining len */
68 lbzx r0,r7,r6 /* copy 1 byte addr */
73 lhzx r0,r7,r6 /* copy 2 byte addr */
78 lwzx r0,r7,r6 /* copy 4 byte addr */
83 lfdx r0,r7,r6 /* copy 8 byte addr */
87 add r4,r7,r6 /* update src addr */
91 /* Dest is quadword aligned now.
93 Lots of decisions to make. If we are copying less than a cache
94 line we won't be here long. If we are not on a cache line
95 boundary we need to get there. And then we need to figure out
96 how many cache lines ahead to pre-touch.
98 r3 - return value (always)
99 r4 - current source addr
101 r6 - current dest addr
111 /* Establishes GOT addressability so we can load the cache line size
112 from rtld_global_ro. This value was set from the aux vector during
114 SETUP_GOT_ACCESS(r9,got_label)
115 addis r9,r9,_GLOBAL_OFFSET_TABLE_-got_label@ha
116 addi r9,r9,_GLOBAL_OFFSET_TABLE_-got_label@l
119 __GLRO(r9, r9, _dl_cache_line_size,
120 RTLD_GLOBAL_RO_DL_CACHE_LINE_SIZE_OFFSET)
123 bne+ cr5,L(cachelineset)
125 /* Cache line size not set: generic byte copy without much optimization */
126 andi. r0,r5,1 /* If length is odd copy one byte. */
127 beq L(cachelinenotset_align)
128 lbz r7,0(r4) /* Read one byte from source. */
129 addi r5,r5,-1 /* Update length. */
130 addi r4,r4,1 /* Update source pointer address. */
131 stb r7,0(r6) /* Store one byte on dest. */
132 addi r6,r6,1 /* Update dest pointer address. */
133 L(cachelinenotset_align):
134 cmpwi cr7,r5,0 /* If length is 0 return. */
136 ori r2,r2,0 /* Force a new dispatch group. */
137 L(cachelinenotset_loop):
138 addic. r5,r5,-2 /* Update length. */
139 lbz r7,0(r4) /* Load 2 bytes from source. */
141 addi r4,r4,2 /* Update source pointer address. */
142 stb r7,0(r6) /* Store 2 bytes on dest. */
144 addi r6,r6,2 /* Update dest pointer address. */
145 bne L(cachelinenotset_loop)
153 cmpw cr5,r5,r10 /* Less than a cacheline to go? */
155 neg r7,r6 /* How far to next cacheline bdy? */
157 addi r6,r6,-8 /* prepare for stdu */
159 addi r4,r4,-8 /* prepare for ldu */
162 ble+ cr5,L(lessthancacheline)
164 beq- cr0,L(big_lines) /* 128 byte line code */
169 /* More than a cacheline left to go, and using 64 byte cachelines */
171 clrlwi r7,r7,32-6 /* How far to next cacheline bdy? */
173 cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
175 /* Reduce total len by what it takes to get to the next cache line */
177 srwi r7,r7,4 /* How many qws to get to the line bdy? */
179 /* How many full cache lines to copy after getting to a line bdy? */
182 cmplwi r10,0 /* If no full cache lines to copy ... */
183 li r11,0 /* number cachelines to copy with prefetch */
184 beq L(nocacheprefetch)
187 /* We are here because we have at least one full cache line to copy,
188 and therefore some pre-touching to do. */
190 cmplwi r10,PREFETCH_AHEAD
191 li r12,64+8 /* prefetch distance */
192 ble L(lessthanmaxprefetch)
194 /* We can only do so much pre-fetching. R11 will have the count of
195 lines left to prefetch after the initial batch of prefetches
198 subi r11,r10,PREFETCH_AHEAD
199 li r10,PREFETCH_AHEAD
201 L(lessthanmaxprefetch):
204 /* At this point r10/ctr hold the number of lines to prefetch in this
205 initial batch, and r11 holds any remainder. */
213 /* Prefetching is done, or was not needed.
215 cr6 - are we on a cacheline boundary already?
216 r7 - number of quadwords to the next cacheline boundary
222 cmplwi cr1,r5,64 /* Less than a cache line to copy? */
224 /* How many bytes are left after we copy whatever full
225 cache lines we can get? */
228 beq cr6,L(cachelinealigned)
231 /* Copy quadwords up to the next cacheline boundary */
238 bdnz L(aligntocacheline)
242 L(cachelinealigned): /* copy while cache lines */
244 blt- cr1,L(lessthancacheline) /* size <64 */
251 li r11,64*ZERO_AHEAD +8 /* DCBZ dist */
254 /* Copy whole cachelines, optimized by prefetching SRC cacheline */
255 L(loop): /* Copy aligned body */
256 dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
283 L(loop2): /* Copy aligned body */
306 L(lessthancacheline): /* Was there less than cache to do ? */
308 srwi r7,r5,4 /* divide size by 16 */
317 bdnz L(copy_remaining)
319 L(do_lt16): /* less than 16 ? */
320 cmplwi cr0,r5,0 /* copy remaining bytes (0-15) */
321 beqlr+ /* no rest to copy */
325 L(shortcopy): /* SIMPLE COPY to handle size =< 15 bytes */
329 lfdx fp9,r7,r6 /* copy 8 byte */
334 lwzx r0,r7,r6 /* copy 4 byte */
339 lhzx r0,r7,r6 /* copy 2 byte */
344 lbzx r0,r7,r6 /* copy 1 byte */
353 /* Similar to above, but for use with 128 byte lines. */
358 clrlwi r7,r7,32-7 /* How far to next cacheline bdy? */
360 cmplwi cr6,r7,0 /* Are we on a cacheline bdy already? */
362 /* Reduce total len by what it takes to get to the next cache line */
364 srwi r7,r7,4 /* How many qw to get to the line bdy? */
366 /* How many full cache lines to copy after getting to a line bdy? */
369 cmplwi r10,0 /* If no full cache lines to copy ... */
370 li r11,0 /* number cachelines to copy with prefetch */
371 beq L(nocacheprefetch_128)
374 /* We are here because we have at least one full cache line to copy,
375 and therefore some pre-touching to do. */
377 cmplwi r10,PREFETCH_AHEAD
378 li r12,128+8 /* prefetch distance */
379 ble L(lessthanmaxprefetch_128)
381 /* We can only do so much pre-fetching. R11 will have the count of
382 lines left to prefetch after the initial batch of prefetches
385 subi r11,r10,PREFETCH_AHEAD
386 li r10,PREFETCH_AHEAD
388 L(lessthanmaxprefetch_128):
391 /* At this point r10/ctr hold the number of lines to prefetch in this
392 initial batch, and r11 holds any remainder. */
397 bdnz L(prefetchSRC_128)
400 /* Prefetching is done, or was not needed.
402 cr6 - are we on a cacheline boundary already?
403 r7 - number of quadwords to the next cacheline boundary
406 L(nocacheprefetch_128):
409 cmplwi cr1,r5,128 /* Less than a cache line to copy? */
411 /* How many bytes are left after we copy whatever full
412 cache lines we can get? */
415 beq cr6,L(cachelinealigned_128)
418 /* Copy quadwords up to the next cacheline boundary */
420 L(aligntocacheline_128):
425 bdnz L(aligntocacheline_128)
428 L(cachelinealigned_128): /* copy while cache lines */
430 blt- cr1,L(lessthancacheline) /* size <128 */
437 li r11,128*ZERO_AHEAD +8 /* DCBZ dist */
440 /* Copy whole cachelines, optimized by prefetching SRC cacheline */
441 L(loop_128): /* Copy aligned body */
442 dcbt r12,r4 /* PREFETCH SOURCE some cache lines ahead */
485 L(loop2_128): /* Copy aligned body */
521 b L(lessthancacheline)
525 libc_hidden_builtin_def (memcpy)