]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/sparc/sparc64/multiarch/memcpy-niagara2.S
Update copyright notices with scripts/update-copyrights
[thirdparty/glibc.git] / sysdeps / sparc / sparc64 / multiarch / memcpy-niagara2.S
1 /* Copy SIZE bytes from SRC to DEST. For SUN4V Niagara-2.
2 Copyright (C) 2007-2014 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by David S. Miller (davem@davemloft.net)
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
19
20 #include <sysdep.h>
21
22 #define ASI_BLK_INIT_QUAD_LDD_P 0xe2
23 #define ASI_BLK_P 0xf0
24 #define ASI_P 0x80
25 #define ASI_PNF 0x82
26
27 #define FPRS_FEF 0x04
28
29 #define VISEntryHalf \
30 rd %fprs, %o5; \
31 wr %g0, FPRS_FEF, %fprs
32
33 #define VISExitHalf \
34 and %o5, FPRS_FEF, %o5; \
35 wr %o5, 0x0, %fprs
36
37 #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P
38
39 #define LOAD(type,addr,dest) type [addr], dest
40 #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest
41 #define STORE(type,src,addr) type src, [addr]
42 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
43 #define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI
44
45 #ifndef XCC
46 #define USE_BPR
47 #define XCC xcc
48 #endif
49
50 #define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \
51 faligndata %x0, %x1, %f0; \
52 faligndata %x1, %x2, %f2; \
53 faligndata %x2, %x3, %f4; \
54 faligndata %x3, %x4, %f6; \
55 faligndata %x4, %x5, %f8; \
56 faligndata %x5, %x6, %f10; \
57 faligndata %x6, %x7, %f12; \
58 faligndata %x7, %x8, %f14;
59
60 #define FREG_MOVE_1(x0) \
61 fsrc2 %x0, %f0;
62 #define FREG_MOVE_2(x0, x1) \
63 fsrc2 %x0, %f0; \
64 fsrc2 %x1, %f2;
65 #define FREG_MOVE_3(x0, x1, x2) \
66 fsrc2 %x0, %f0; \
67 fsrc2 %x1, %f2; \
68 fsrc2 %x2, %f4;
69 #define FREG_MOVE_4(x0, x1, x2, x3) \
70 fsrc2 %x0, %f0; \
71 fsrc2 %x1, %f2; \
72 fsrc2 %x2, %f4; \
73 fsrc2 %x3, %f6;
74 #define FREG_MOVE_5(x0, x1, x2, x3, x4) \
75 fsrc2 %x0, %f0; \
76 fsrc2 %x1, %f2; \
77 fsrc2 %x2, %f4; \
78 fsrc2 %x3, %f6; \
79 fsrc2 %x4, %f8;
80 #define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \
81 fsrc2 %x0, %f0; \
82 fsrc2 %x1, %f2; \
83 fsrc2 %x2, %f4; \
84 fsrc2 %x3, %f6; \
85 fsrc2 %x4, %f8; \
86 fsrc2 %x5, %f10;
87 #define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \
88 fsrc2 %x0, %f0; \
89 fsrc2 %x1, %f2; \
90 fsrc2 %x2, %f4; \
91 fsrc2 %x3, %f6; \
92 fsrc2 %x4, %f8; \
93 fsrc2 %x5, %f10; \
94 fsrc2 %x6, %f12;
95 #define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \
96 fsrc2 %x0, %f0; \
97 fsrc2 %x1, %f2; \
98 fsrc2 %x2, %f4; \
99 fsrc2 %x3, %f6; \
100 fsrc2 %x4, %f8; \
101 fsrc2 %x5, %f10; \
102 fsrc2 %x6, %f12; \
103 fsrc2 %x7, %f14;
104 #define FREG_LOAD_1(base, x0) \
105 LOAD(ldd, base + 0x00, %x0)
106 #define FREG_LOAD_2(base, x0, x1) \
107 LOAD(ldd, base + 0x00, %x0); \
108 LOAD(ldd, base + 0x08, %x1);
109 #define FREG_LOAD_3(base, x0, x1, x2) \
110 LOAD(ldd, base + 0x00, %x0); \
111 LOAD(ldd, base + 0x08, %x1); \
112 LOAD(ldd, base + 0x10, %x2);
113 #define FREG_LOAD_4(base, x0, x1, x2, x3) \
114 LOAD(ldd, base + 0x00, %x0); \
115 LOAD(ldd, base + 0x08, %x1); \
116 LOAD(ldd, base + 0x10, %x2); \
117 LOAD(ldd, base + 0x18, %x3);
118 #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
119 LOAD(ldd, base + 0x00, %x0); \
120 LOAD(ldd, base + 0x08, %x1); \
121 LOAD(ldd, base + 0x10, %x2); \
122 LOAD(ldd, base + 0x18, %x3); \
123 LOAD(ldd, base + 0x20, %x4);
124 #define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
125 LOAD(ldd, base + 0x00, %x0); \
126 LOAD(ldd, base + 0x08, %x1); \
127 LOAD(ldd, base + 0x10, %x2); \
128 LOAD(ldd, base + 0x18, %x3); \
129 LOAD(ldd, base + 0x20, %x4); \
130 LOAD(ldd, base + 0x28, %x5);
131 #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
132 LOAD(ldd, base + 0x00, %x0); \
133 LOAD(ldd, base + 0x08, %x1); \
134 LOAD(ldd, base + 0x10, %x2); \
135 LOAD(ldd, base + 0x18, %x3); \
136 LOAD(ldd, base + 0x20, %x4); \
137 LOAD(ldd, base + 0x28, %x5); \
138 LOAD(ldd, base + 0x30, %x6);
139
140 #if !defined NOT_IN_libc
141
142 .register %g2,#scratch
143 .register %g3,#scratch
144 .register %g6,#scratch
145
146 .text
147
148 ENTRY(__mempcpy_niagara2)
149 ba,pt %XCC, 101f
150 add %o0, %o2, %g5
151 END(__mempcpy_niagara2)
152
153 .align 32
154 ENTRY(__memcpy_niagara2)
155 100: /* %o0=dst, %o1=src, %o2=len */
156 mov %o0, %g5
157 101:
158 # ifndef USE_BPR
159 srl %o2, 0, %o2
160 # endif
161 cmp %o2, 0
162 be,pn %XCC, 85f
163 218: or %o0, %o1, %o3
164 cmp %o2, 16
165 blu,a,pn %XCC, 80f
166 or %o3, %o2, %o3
167
168 /* 2 blocks (128 bytes) is the minimum we can do the block
169 * copy with. We need to ensure that we'll iterate at least
170 * once in the block copy loop. At worst we'll need to align
171 * the destination to a 64-byte boundary which can chew up
172 * to (64 - 1) bytes from the length before we perform the
173 * block copy loop.
174 *
175 * However, the cut-off point, performance wise, is around
176 * 4 64-byte blocks.
177 */
178 cmp %o2, (4 * 64)
179 blu,pt %XCC, 75f
180 andcc %o3, 0x7, %g0
181
182 /* %o0: dst
183 * %o1: src
184 * %o2: len (known to be >= 128)
185 *
186 * The block copy loops can use %o4, %g2, %g3 as
187 * temporaries while copying the data. %o5 must
188 * be preserved between VISEntryHalf and VISExitHalf
189 */
190
191 LOAD(prefetch, %o1 + 0x000, #one_read)
192 LOAD(prefetch, %o1 + 0x040, #one_read)
193 LOAD(prefetch, %o1 + 0x080, #one_read)
194
195 /* Align destination on 64-byte boundary. */
196 andcc %o0, (64 - 1), %o4
197 be,pt %XCC, 2f
198 sub %o4, 64, %o4
199 sub %g0, %o4, %o4 ! bytes to align dst
200 sub %o2, %o4, %o2
201 1: subcc %o4, 1, %o4
202 LOAD(ldub, %o1, %g1)
203 STORE(stb, %g1, %o0)
204 add %o1, 1, %o1
205 bne,pt %XCC, 1b
206 add %o0, 1, %o0
207
208 2:
209 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
210 * o5 from here until we hit VISExitHalf.
211 */
212 VISEntryHalf
213
214 alignaddr %o1, %g0, %g0
215
216 add %o1, (64 - 1), %o4
217 andn %o4, (64 - 1), %o4
218 andn %o2, (64 - 1), %g1
219 sub %o2, %g1, %o2
220
221 and %o1, (64 - 1), %g2
222 add %o1, %g1, %o1
223 sub %o0, %o4, %g3
224 brz,pt %g2, 190f
225 cmp %g2, 32
226 blu,a 5f
227 cmp %g2, 16
228 cmp %g2, 48
229 blu,a 4f
230 cmp %g2, 40
231 cmp %g2, 56
232 blu 170f
233 nop
234 ba,a,pt %xcc, 180f
235
236 4: /* 32 <= low bits < 48 */
237 blu 150f
238 nop
239 ba,a,pt %xcc, 160f
240 5: /* 0 < low bits < 32 */
241 blu,a 6f
242 cmp %g2, 8
243 cmp %g2, 24
244 blu 130f
245 nop
246 ba,a,pt %xcc, 140f
247 6: /* 0 < low bits < 16 */
248 bgeu 120f
249 nop
250 /* fall through for 0 < low bits < 8 */
251 110: sub %o4, 64, %g2
252 LOAD_BLK(%g2, %f0)
253 1: STORE_INIT(%g0, %o4 + %g3)
254 LOAD_BLK(%o4, %f16)
255 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
256 STORE_BLK(%f0, %o4 + %g3)
257 FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
258 subcc %g1, 64, %g1
259 add %o4, 64, %o4
260 bne,pt %XCC, 1b
261 LOAD(prefetch, %o4 + 64, #one_read)
262 ba,pt %xcc, 195f
263 nop
264
265 120: sub %o4, 56, %g2
266 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
267 1: STORE_INIT(%g0, %o4 + %g3)
268 LOAD_BLK(%o4, %f16)
269 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
270 STORE_BLK(%f0, %o4 + %g3)
271 FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
272 subcc %g1, 64, %g1
273 add %o4, 64, %o4
274 bne,pt %XCC, 1b
275 LOAD(prefetch, %o4 + 64, #one_read)
276 ba,pt %xcc, 195f
277 nop
278
279 130: sub %o4, 48, %g2
280 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
281 1: STORE_INIT(%g0, %o4 + %g3)
282 LOAD_BLK(%o4, %f16)
283 FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
284 STORE_BLK(%f0, %o4 + %g3)
285 FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
286 subcc %g1, 64, %g1
287 add %o4, 64, %o4
288 bne,pt %XCC, 1b
289 LOAD(prefetch, %o4 + 64, #one_read)
290 ba,pt %xcc, 195f
291 nop
292
293 140: sub %o4, 40, %g2
294 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
295 1: STORE_INIT(%g0, %o4 + %g3)
296 LOAD_BLK(%o4, %f16)
297 FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
298 STORE_BLK(%f0, %o4 + %g3)
299 FREG_MOVE_5(f22, f24, f26, f28, f30)
300 subcc %g1, 64, %g1
301 add %o4, 64, %o4
302 bne,pt %XCC, 1b
303 LOAD(prefetch, %o4 + 64, #one_read)
304 ba,pt %xcc, 195f
305 nop
306
307 150: sub %o4, 32, %g2
308 FREG_LOAD_4(%g2, f0, f2, f4, f6)
309 1: STORE_INIT(%g0, %o4 + %g3)
310 LOAD_BLK(%o4, %f16)
311 FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
312 STORE_BLK(%f0, %o4 + %g3)
313 FREG_MOVE_4(f24, f26, f28, f30)
314 subcc %g1, 64, %g1
315 add %o4, 64, %o4
316 bne,pt %XCC, 1b
317 LOAD(prefetch, %o4 + 64, #one_read)
318 ba,pt %xcc, 195f
319 nop
320
321 160: sub %o4, 24, %g2
322 FREG_LOAD_3(%g2, f0, f2, f4)
323 1: STORE_INIT(%g0, %o4 + %g3)
324 LOAD_BLK(%o4, %f16)
325 FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
326 STORE_BLK(%f0, %o4 + %g3)
327 FREG_MOVE_3(f26, f28, f30)
328 subcc %g1, 64, %g1
329 add %o4, 64, %o4
330 bne,pt %XCC, 1b
331 LOAD(prefetch, %o4 + 64, #one_read)
332 ba,pt %xcc, 195f
333 nop
334
335 170: sub %o4, 16, %g2
336 FREG_LOAD_2(%g2, f0, f2)
337 1: STORE_INIT(%g0, %o4 + %g3)
338 LOAD_BLK(%o4, %f16)
339 FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
340 STORE_BLK(%f0, %o4 + %g3)
341 FREG_MOVE_2(f28, f30)
342 subcc %g1, 64, %g1
343 add %o4, 64, %o4
344 bne,pt %XCC, 1b
345 LOAD(prefetch, %o4 + 64, #one_read)
346 ba,pt %xcc, 195f
347 nop
348
349 180: sub %o4, 8, %g2
350 FREG_LOAD_1(%g2, f0)
351 1: STORE_INIT(%g0, %o4 + %g3)
352 LOAD_BLK(%o4, %f16)
353 FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
354 STORE_BLK(%f0, %o4 + %g3)
355 FREG_MOVE_1(f30)
356 subcc %g1, 64, %g1
357 add %o4, 64, %o4
358 bne,pt %XCC, 1b
359 LOAD(prefetch, %o4 + 64, #one_read)
360 ba,pt %xcc, 195f
361 nop
362
363 190:
364 1: STORE_INIT(%g0, %o4 + %g3)
365 subcc %g1, 64, %g1
366 LOAD_BLK(%o4, %f0)
367 STORE_BLK(%f0, %o4 + %g3)
368 add %o4, 64, %o4
369 bne,pt %XCC, 1b
370 LOAD(prefetch, %o4 + 64, #one_read)
371
372 195:
373 add %o4, %g3, %o0
374 membar #Sync
375
376 VISExitHalf
377
378 /* %o2 contains any final bytes still needed to be copied
379 * over. If anything is left, we copy it one byte at a time.
380 */
381 brz,pt %o2, 85f
382 sub %o0, %o1, %o3
383 ba,a,pt %XCC, 90f
384
385 .align 64
386 75: /* 16 < len <= 64 */
387 bne,pn %XCC, 75f
388 sub %o0, %o1, %o3
389
390 72:
391 andn %o2, 0xf, %o4
392 and %o2, 0xf, %o2
393 1: subcc %o4, 0x10, %o4
394 LOAD(ldx, %o1, %o5)
395 add %o1, 0x08, %o1
396 LOAD(ldx, %o1, %g1)
397 sub %o1, 0x08, %o1
398 STORE(stx, %o5, %o1 + %o3)
399 add %o1, 0x8, %o1
400 STORE(stx, %g1, %o1 + %o3)
401 bgu,pt %XCC, 1b
402 add %o1, 0x8, %o1
403 73: andcc %o2, 0x8, %g0
404 be,pt %XCC, 1f
405 nop
406 sub %o2, 0x8, %o2
407 LOAD(ldx, %o1, %o5)
408 STORE(stx, %o5, %o1 + %o3)
409 add %o1, 0x8, %o1
410 1: andcc %o2, 0x4, %g0
411 be,pt %XCC, 1f
412 nop
413 sub %o2, 0x4, %o2
414 LOAD(lduw, %o1, %o5)
415 STORE(stw, %o5, %o1 + %o3)
416 add %o1, 0x4, %o1
417 1: cmp %o2, 0
418 be,pt %XCC, 85f
419 nop
420 ba,pt %xcc, 90f
421 nop
422
423 75:
424 andcc %o0, 0x7, %g1
425 sub %g1, 0x8, %g1
426 be,pn %icc, 2f
427 sub %g0, %g1, %g1
428 sub %o2, %g1, %o2
429
430 1: subcc %g1, 1, %g1
431 LOAD(ldub, %o1, %o5)
432 STORE(stb, %o5, %o1 + %o3)
433 bgu,pt %icc, 1b
434 add %o1, 1, %o1
435
436 2: add %o1, %o3, %o0
437 andcc %o1, 0x7, %g1
438 bne,pt %icc, 8f
439 sll %g1, 3, %g1
440
441 cmp %o2, 16
442 bgeu,pt %icc, 72b
443 nop
444 ba,a,pt %xcc, 73b
445
446 8: mov 64, %o3
447 andn %o1, 0x7, %o1
448 LOAD(ldx, %o1, %g2)
449 sub %o3, %g1, %o3
450 andn %o2, 0x7, %o4
451 sllx %g2, %g1, %g2
452 1: add %o1, 0x8, %o1
453 LOAD(ldx, %o1, %g3)
454 subcc %o4, 0x8, %o4
455 srlx %g3, %o3, %o5
456 or %o5, %g2, %o5
457 STORE(stx, %o5, %o0)
458 add %o0, 0x8, %o0
459 bgu,pt %icc, 1b
460 sllx %g3, %g1, %g2
461
462 srl %g1, 3, %g1
463 andcc %o2, 0x7, %o2
464 be,pn %icc, 85f
465 add %o1, %g1, %o1
466 ba,pt %xcc, 90f
467 sub %o0, %o1, %o3
468
469 .align 64
470 80: /* 0 < len <= 16 */
471 andcc %o3, 0x3, %g0
472 bne,pn %XCC, 90f
473 sub %o0, %o1, %o3
474
475 1:
476 subcc %o2, 4, %o2
477 LOAD(lduw, %o1, %g1)
478 STORE(stw, %g1, %o1 + %o3)
479 bgu,pt %XCC, 1b
480 add %o1, 4, %o1
481
482 85: retl
483 mov %g5, %o0
484
485 .align 32
486 90:
487 subcc %o2, 1, %o2
488 LOAD(ldub, %o1, %g1)
489 STORE(stb, %g1, %o1 + %o3)
490 bgu,pt %XCC, 90b
491 add %o1, 1, %o1
492 retl
493 mov %g5, %o0
494
495 END(__memcpy_niagara2)
496
497 #endif