1 /* Optimized memcpy for Qualcomm Falkor processor.
2 Copyright (C) 2017-2018 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library. If not, see
18 <http://www.gnu.org/licenses/>. */
24 ARMv8-a, AArch64, falkor, unaligned accesses. */
38 /* Copies are split into 3 main cases:
40 1. Small copies of up to 32 bytes
41 2. Medium copies of 33..128 bytes which are fully unrolled
42 3. Large copies of more than 128 bytes.
44 Large copies align the sourceto a quad word and use an unrolled loop
45 processing 64 bytes per iteration.
47 FALKOR-SPECIFIC DESIGN:
49 The smallest copies (32 bytes or less) focus on optimal pipeline usage,
50 which is why the redundant copies of 0-3 bytes have been replaced with
51 conditionals, since the former would unnecessarily break across multiple
52 issue groups. The medium copy group has been enlarged to 128 bytes since
53 bumping up the small copies up to 32 bytes allows us to do that without
54 cost and also allows us to reduce the size of the prep code before loop64.
56 All copies are done only via two registers r6 and r7. This is to ensure
57 that all loads hit a single hardware prefetcher which can get correctly
58 trained to prefetch a single stream.
60 The non-temporal stores help optimize cache utilization. */
63 ENTRY_ALIGN (__memcpy_falkor, 6)
66 add srcend, src, count
67 add dstend, dstin, count
74 /* Medium copies: 33..128 bytes. */
76 ldp A_l, A_h, [src, 16]
77 stp A_l, A_h, [dstin, 16]
79 ldp A_l, A_h, [src, 32]
80 stp A_l, A_h, [dstin, 32]
81 ldp A_l, A_h, [src, 48]
82 stp A_l, A_h, [dstin, 48]
83 ldp A_l, A_h, [srcend, -64]
84 stp A_l, A_h, [dstend, -64]
85 ldp A_l, A_h, [srcend, -48]
86 stp A_l, A_h, [dstend, -48]
88 ldp A_l, A_h, [srcend, -32]
89 stp A_l, A_h, [dstend, -32]
90 ldp A_l, A_h, [srcend, -16]
91 stp A_l, A_h, [dstend, -16]
95 /* Small copies: 0..32 bytes. */
101 stp A_l, A_h, [dstin]
102 ldp A_l, A_h, [srcend, -16]
103 stp A_l, A_h, [dstend, -16]
111 ldr A_l, [srcend, -8]
112 str A_l, [dstend, -8]
120 ldr A_lw, [srcend, -4]
121 str A_lw, [dstend, -4]
129 ldrh A_lw, [srcend, -2]
130 strh A_lw, [dstend, -2]
141 /* Align SRC to 16 bytes and copy; that way at least one of the
142 accesses is aligned throughout the copy sequence.
144 The count is off by 0 to 15 bytes, but this is OK because we trim
145 off the last 64 bytes to copy off from the end. Due to this the
146 loop never runs out of bounds. */
149 sub count, count, 64 + 16
153 add count, count, tmp1
156 ldp A_l, A_h, [src, 16]!
157 stnp A_l, A_h, [dst, 16]
158 ldp A_l, A_h, [src, 16]!
159 subs count, count, 64
160 stnp A_l, A_h, [dst, 32]
161 ldp A_l, A_h, [src, 16]!
162 stnp A_l, A_h, [dst, 48]
163 ldp A_l, A_h, [src, 16]!
164 stnp A_l, A_h, [dst, 64]
168 /* Write the last full set of 64 bytes. The remainder is at most 64
169 bytes, so it is safe to always copy 64 bytes from the end even if
170 there is just 1 byte left. */
172 ldp A_l, A_h, [srcend, -64]
173 stnp A_l, A_h, [dstend, -64]
174 ldp A_l, A_h, [srcend, -48]
175 stnp A_l, A_h, [dstend, -48]
176 ldp A_l, A_h, [srcend, -32]
177 stnp A_l, A_h, [dstend, -32]
178 ldp A_l, A_h, [srcend, -16]
179 stnp A_l, A_h, [dstend, -16]
182 END (__memcpy_falkor)
183 libc_hidden_builtin_def (__memcpy_falkor)