]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S
Replace FSF snail mail address with URLs.
[thirdparty/glibc.git] / sysdeps / sparc / sparc64 / multiarch / memcpy-ultra3.S
1 /* Copy SIZE bytes from SRC to DEST.
2 For UltraSPARC-III.
3 Copyright (C) 2001, 2003 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by David S. Miller (davem@redhat.com)
6
7 The GNU C Library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2.1 of the License, or (at your option) any later version.
11
12 The GNU C Library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with the GNU C Library; if not, see
19 <http://www.gnu.org/licenses/>. */
20
21 #include <sysdep.h>
22
23 #define ASI_BLK_P 0xf0
24 #define FPRS_FEF 0x04
25 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
26 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
27
28 #ifndef XCC
29 #define USE_BPR
30 #define XCC xcc
31 #endif
32
33 #if !defined NOT_IN_libc
34
35 .register %g2,#scratch
36 .register %g3,#scratch
37 .register %g6,#scratch
38
39 .text
40
41 /* Special/non-trivial issues of this code:
42 *
43 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
44 * 2) Only low 32 FPU registers are used so that only the
45 * lower half of the FPU register set is dirtied by this
46 * code. This is especially important in the kernel.
47 * 3) This code never prefetches cachelines past the end
48 * of the source buffer.
49 *
50 * The cheetah's flexible spine, oversized liver, enlarged heart,
51 * slender muscular body, and claws make it the swiftest hunter
52 * in Africa and the fastest animal on land. Can reach speeds
53 * of up to 2.4GB per second.
54 */
55 .align 32
56 ENTRY(__memcpy_ultra3)
57
58 100: /* %o0=dst, %o1=src, %o2=len */
59 mov %o0, %g5
60 cmp %o2, 0
61 be,pn %XCC, out
62 218: or %o0, %o1, %o3
63 cmp %o2, 16
64 bleu,a,pn %XCC, small_copy
65 or %o3, %o2, %o3
66
67 cmp %o2, 256
68 blu,pt %XCC, medium_copy
69 andcc %o3, 0x7, %g0
70
71 ba,pt %xcc, enter
72 andcc %o0, 0x3f, %g2
73
74 /* Here len >= 256 and condition codes reflect execution
75 * of "andcc %o0, 0x7, %g2", done by caller.
76 */
77 .align 64
78 enter:
79 /* Is 'dst' already aligned on an 64-byte boundary? */
80 be,pt %XCC, 2f
81
82 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
83 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
84 * subtract this from 'len'.
85 */
86 sub %g2, 0x40, %g2
87 sub %g0, %g2, %g2
88 sub %o2, %g2, %o2
89
90 /* Copy %g2 bytes from src to dst, one byte at a time. */
91 1: ldub [%o1 + 0x00], %o3
92 add %o1, 0x1, %o1
93 add %o0, 0x1, %o0
94 subcc %g2, 0x1, %g2
95
96 bg,pt %XCC, 1b
97 stb %o3, [%o0 + -1]
98
99 2: VISEntryHalf
100 and %o1, 0x7, %g1
101 ba,pt %xcc, begin
102 alignaddr %o1, %g0, %o1
103
104 .align 64
105 begin:
106 prefetch [%o1 + 0x000], #one_read
107 prefetch [%o1 + 0x040], #one_read
108 andn %o2, (0x40 - 1), %o4
109 prefetch [%o1 + 0x080], #one_read
110 prefetch [%o1 + 0x0c0], #one_read
111 ldd [%o1 + 0x000], %f0
112 prefetch [%o1 + 0x100], #one_read
113 ldd [%o1 + 0x008], %f2
114 prefetch [%o1 + 0x140], #one_read
115 ldd [%o1 + 0x010], %f4
116 prefetch [%o1 + 0x180], #one_read
117 faligndata %f0, %f2, %f16
118 ldd [%o1 + 0x018], %f6
119 faligndata %f2, %f4, %f18
120 ldd [%o1 + 0x020], %f8
121 faligndata %f4, %f6, %f20
122 ldd [%o1 + 0x028], %f10
123 faligndata %f6, %f8, %f22
124
125 ldd [%o1 + 0x030], %f12
126 faligndata %f8, %f10, %f24
127 ldd [%o1 + 0x038], %f14
128 faligndata %f10, %f12, %f26
129 ldd [%o1 + 0x040], %f0
130
131 sub %o4, 0x80, %o4
132 add %o1, 0x40, %o1
133 ba,pt %xcc, loop
134 srl %o4, 6, %o3
135
136 .align 64
137 loop:
138 ldd [%o1 + 0x008], %f2
139 faligndata %f12, %f14, %f28
140 ldd [%o1 + 0x010], %f4
141 faligndata %f14, %f0, %f30
142 stda %f16, [%o0] ASI_BLK_P
143 ldd [%o1 + 0x018], %f6
144 faligndata %f0, %f2, %f16
145
146 ldd [%o1 + 0x020], %f8
147 faligndata %f2, %f4, %f18
148 ldd [%o1 + 0x028], %f10
149 faligndata %f4, %f6, %f20
150 ldd [%o1 + 0x030], %f12
151 faligndata %f6, %f8, %f22
152 ldd [%o1 + 0x038], %f14
153 faligndata %f8, %f10, %f24
154
155 ldd [%o1 + 0x040], %f0
156 prefetch [%o1 + 0x180], #one_read
157 faligndata %f10, %f12, %f26
158 subcc %o3, 0x01, %o3
159 add %o1, 0x40, %o1
160 bg,pt %XCC, loop
161 add %o0, 0x40, %o0
162
163 /* Finally we copy the last full 64-byte block. */
164 loopfini:
165 ldd [%o1 + 0x008], %f2
166 faligndata %f12, %f14, %f28
167 ldd [%o1 + 0x010], %f4
168 faligndata %f14, %f0, %f30
169 stda %f16, [%o0] ASI_BLK_P
170 ldd [%o1 + 0x018], %f6
171 faligndata %f0, %f2, %f16
172 ldd [%o1 + 0x020], %f8
173 faligndata %f2, %f4, %f18
174 ldd [%o1 + 0x028], %f10
175 faligndata %f4, %f6, %f20
176 ldd [%o1 + 0x030], %f12
177 faligndata %f6, %f8, %f22
178 ldd [%o1 + 0x038], %f14
179 faligndata %f8, %f10, %f24
180 cmp %g1, 0
181 be,pt %XCC, 1f
182 add %o0, 0x40, %o0
183 ldd [%o1 + 0x040], %f0
184 1: faligndata %f10, %f12, %f26
185 faligndata %f12, %f14, %f28
186 faligndata %f14, %f0, %f30
187 stda %f16, [%o0] ASI_BLK_P
188 add %o0, 0x40, %o0
189 add %o1, 0x40, %o1
190 membar #Sync
191
192 /* Now we copy the (len modulo 64) bytes at the end.
193 * Note how we borrow the %f0 loaded above.
194 *
195 * Also notice how this code is careful not to perform a
196 * load past the end of the src buffer.
197 */
198 loopend:
199 and %o2, 0x3f, %o2
200 andcc %o2, 0x38, %g2
201 be,pn %XCC, endcruft
202 subcc %g2, 0x8, %g2
203 be,pn %XCC, endcruft
204 cmp %g1, 0
205
206 be,a,pt %XCC, 1f
207 ldd [%o1 + 0x00], %f0
208
209 1: ldd [%o1 + 0x08], %f2
210 add %o1, 0x8, %o1
211 sub %o2, 0x8, %o2
212 subcc %g2, 0x8, %g2
213 faligndata %f0, %f2, %f8
214 std %f8, [%o0 + 0x00]
215 be,pn %XCC, endcruft
216 add %o0, 0x8, %o0
217 ldd [%o1 + 0x08], %f0
218 add %o1, 0x8, %o1
219 sub %o2, 0x8, %o2
220 subcc %g2, 0x8, %g2
221 faligndata %f2, %f0, %f8
222 std %f8, [%o0 + 0x00]
223 bne,pn %XCC, 1b
224 add %o0, 0x8, %o0
225
226 /* If anything is left, we copy it one byte at a time.
227 * Note that %g1 is (src & 0x3) saved above before the
228 * alignaddr was performed.
229 */
230 endcruft:
231 cmp %o2, 0
232 add %o1, %g1, %o1
233 VISExitHalf
234 be,pn %XCC, out
235 sub %o0, %o1, %o3
236
237 andcc %g1, 0x7, %g0
238 bne,pn %icc, small_copy_unaligned
239 andcc %o2, 0x8, %g0
240 be,pt %icc, 1f
241 nop
242 ldx [%o1], %o5
243 stx %o5, [%o1 + %o3]
244 add %o1, 0x8, %o1
245
246 1: andcc %o2, 0x4, %g0
247 be,pt %icc, 1f
248 nop
249 lduw [%o1], %o5
250 stw %o5, [%o1 + %o3]
251 add %o1, 0x4, %o1
252
253 1: andcc %o2, 0x2, %g0
254 be,pt %icc, 1f
255 nop
256 lduh [%o1], %o5
257 sth %o5, [%o1 + %o3]
258 add %o1, 0x2, %o1
259
260 1: andcc %o2, 0x1, %g0
261 be,pt %icc, out
262 nop
263 ldub [%o1], %o5
264 ba,pt %xcc, out
265 stb %o5, [%o1 + %o3]
266
267 medium_copy: /* 16 < len <= 64 */
268 bne,pn %XCC, small_copy_unaligned
269 sub %o0, %o1, %o3
270
271 medium_copy_aligned:
272 andn %o2, 0x7, %o4
273 and %o2, 0x7, %o2
274 1: subcc %o4, 0x8, %o4
275 ldx [%o1], %o5
276 stx %o5, [%o1 + %o3]
277 bgu,pt %XCC, 1b
278 add %o1, 0x8, %o1
279 andcc %o2, 0x4, %g0
280 be,pt %XCC, 1f
281 nop
282 sub %o2, 0x4, %o2
283 lduw [%o1], %o5
284 stw %o5, [%o1 + %o3]
285 add %o1, 0x4, %o1
286 1: cmp %o2, 0
287 be,pt %XCC, out
288 nop
289 ba,pt %xcc, small_copy_unaligned
290 nop
291
292 small_copy: /* 0 < len <= 16 */
293 andcc %o3, 0x3, %g0
294 bne,pn %XCC, small_copy_unaligned
295 sub %o0, %o1, %o3
296
297 small_copy_aligned:
298 subcc %o2, 4, %o2
299 lduw [%o1], %g1
300 stw %g1, [%o1 + %o3]
301 bgu,pt %XCC, small_copy_aligned
302 add %o1, 4, %o1
303
304 out: retl
305 mov %g5, %o0
306
307 .align 32
308 small_copy_unaligned:
309 subcc %o2, 1, %o2
310 ldub [%o1], %g1
311 stb %g1, [%o1 + %o3]
312 bgu,pt %XCC, small_copy_unaligned
313 add %o1, 1, %o1
314 retl
315 mov %g5, %o0
316
317 END(__memcpy_ultra3)
318
319 #endif