]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/sparc/sparc32/memcpy.S
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / sysdeps / sparc / sparc32 / memcpy.S
1 /* Copy SIZE bytes from SRC to DEST.
2 For SPARC v7.
3 Copyright (C) 1996-2019 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5 Contributed by David S. Miller <davem@caip.rutgers.edu>,
6 Eddie C. Dost <ecd@skynet.be> and
7 Jakub Jelinek <jj@ultra.linux.cz>.
8
9 The GNU C Library is free software; you can redistribute it and/or
10 modify it under the terms of the GNU Lesser General Public
11 License as published by the Free Software Foundation; either
12 version 2.1 of the License, or (at your option) any later version.
13
14 The GNU C Library is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 Lesser General Public License for more details.
18
19 You should have received a copy of the GNU Lesser General Public
20 License along with the GNU C Library; if not, see
21 <http://www.gnu.org/licenses/>. */
22
23 #include <sysdep.h>
24
25 /* Both these macros have to start with exactly the same insn */
26 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
27 ldd [%src + offset + 0x00], %t0; \
28 ldd [%src + offset + 0x08], %t2; \
29 ldd [%src + offset + 0x10], %t4; \
30 ldd [%src + offset + 0x18], %t6; \
31 st %t0, [%dst + offset + 0x00]; \
32 st %t1, [%dst + offset + 0x04]; \
33 st %t2, [%dst + offset + 0x08]; \
34 st %t3, [%dst + offset + 0x0c]; \
35 st %t4, [%dst + offset + 0x10]; \
36 st %t5, [%dst + offset + 0x14]; \
37 st %t6, [%dst + offset + 0x18]; \
38 st %t7, [%dst + offset + 0x1c];
39
40 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
41 ldd [%src + offset + 0x00], %t0; \
42 ldd [%src + offset + 0x08], %t2; \
43 ldd [%src + offset + 0x10], %t4; \
44 ldd [%src + offset + 0x18], %t6; \
45 std %t0, [%dst + offset + 0x00]; \
46 std %t2, [%dst + offset + 0x08]; \
47 std %t4, [%dst + offset + 0x10]; \
48 std %t6, [%dst + offset + 0x18];
49
50 #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
51 ldd [%src - offset - 0x10], %t0; \
52 ldd [%src - offset - 0x08], %t2; \
53 st %t0, [%dst - offset - 0x10]; \
54 st %t1, [%dst - offset - 0x0c]; \
55 st %t2, [%dst - offset - 0x08]; \
56 st %t3, [%dst - offset - 0x04];
57
58 #define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
59 ldd [%src - offset - 0x10], %t0; \
60 ldd [%src - offset - 0x08], %t2; \
61 std %t0, [%dst - offset - 0x10]; \
62 std %t2, [%dst - offset - 0x08];
63
64 #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
65 ldub [%src - offset - 0x02], %t0; \
66 ldub [%src - offset - 0x01], %t1; \
67 stb %t0, [%dst - offset - 0x02]; \
68 stb %t1, [%dst - offset - 0x01];
69
70 #define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
71 ldd [%src + offset + 0x00], %t0; \
72 ldd [%src + offset + 0x08], %t2; \
73 srl %t0, shir, %t5; \
74 srl %t1, shir, %t6; \
75 sll %t0, shil, %t0; \
76 or %t5, %prev, %t5; \
77 sll %t1, shil, %prev; \
78 or %t6, %t0, %t0; \
79 srl %t2, shir, %t1; \
80 srl %t3, shir, %t6; \
81 sll %t2, shil, %t2; \
82 or %t1, %prev, %t1; \
83 std %t4, [%dst + offset + offset2 - 0x04]; \
84 std %t0, [%dst + offset + offset2 + 0x04]; \
85 sll %t3, shil, %prev; \
86 or %t6, %t2, %t4;
87
88 #define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
89 ldd [%src + offset + 0x00], %t0; \
90 ldd [%src + offset + 0x08], %t2; \
91 srl %t0, shir, %t4; \
92 srl %t1, shir, %t5; \
93 sll %t0, shil, %t6; \
94 or %t4, %prev, %t0; \
95 sll %t1, shil, %prev; \
96 or %t5, %t6, %t1; \
97 srl %t2, shir, %t4; \
98 srl %t3, shir, %t5; \
99 sll %t2, shil, %t6; \
100 or %t4, %prev, %t2; \
101 sll %t3, shil, %prev; \
102 or %t5, %t6, %t3; \
103 std %t0, [%dst + offset + offset2 + 0x00]; \
104 std %t2, [%dst + offset + offset2 + 0x08];
105
106 .text
107 ENTRY(__mempcpy)
108 add %o0, %o2, %g1
109 ba 101f
110 st %g1, [%sp + 64]
111 END(__mempcpy)
112
113 .align 4
114 ENTRY(memcpy) /* %o0=dst %o1=src %o2=len */
115 st %o0, [%sp + 64]
116 101:
117 sub %o0, %o1, %o4
118 9: andcc %o4, 3, %o5
119 0: bne 86f
120 cmp %o2, 15
121
122 bleu 90f
123 andcc %o1, 3, %g0
124
125 be 78f
126 andcc %o1, 4, %g0
127
128 andcc %o1, 1, %g0
129 be 4f
130 andcc %o1, 2, %g0
131
132 ldub [%o1], %g2
133 add %o1, 1, %o1
134 stb %g2, [%o0]
135 sub %o2, 1, %o2
136 bne 77f
137 add %o0, 1, %o0
138 4: lduh [%o1], %g2
139 add %o1, 2, %o1
140 sth %g2, [%o0]
141 sub %o2, 2, %o2
142 add %o0, 2, %o0
143
144 77: andcc %o1, 4, %g0
145 78: be 2f
146 mov %o2, %g1
147
148 ld [%o1], %o4
149 sub %g1, 4, %g1
150 st %o4, [%o0]
151 add %o1, 4, %o1
152 add %o0, 4, %o0
153 2: andcc %g1, 0xffffff80, %g6
154 be 3f
155 andcc %o0, 4, %g0
156
157 be 82f + 4
158 5: MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
159 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
160 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
161 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
162 subcc %g6, 128, %g6
163 add %o1, 128, %o1
164 bne 5b
165 add %o0, 128, %o0
166 3: andcc %g1, 0x70, %g6
167 be 80f
168 andcc %g1, 8, %g0
169
170 srl %g6, 1, %o4
171 mov %o7, %g2
172 add %g6, %o4, %o4
173 add %o1, %g6, %o1
174 104: call 100f
175 add %o0, %g6, %o0
176 jmpl %o5 + (80f - 104b), %g0
177 mov %g2, %o7
178
179 79: MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
180 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
181 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
182 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
183 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
184 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
185 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
186
187 80: be 81f
188 andcc %g1, 4, %g0
189
190 ldd [%o1], %g2
191 add %o0, 8, %o0
192 st %g2, [%o0 - 0x08]
193 add %o1, 8, %o1
194 st %g3, [%o0 - 0x04]
195
196 81: be 1f
197 andcc %g1, 2, %g0
198
199 ld [%o1], %g2
200 add %o1, 4, %o1
201 st %g2, [%o0]
202 add %o0, 4, %o0
203 1: be 1f
204 andcc %g1, 1, %g0
205
206 lduh [%o1], %g2
207 add %o1, 2, %o1
208 sth %g2, [%o0]
209 add %o0, 2, %o0
210 1: be 1f
211 nop
212
213 ldub [%o1], %g2
214 stb %g2, [%o0]
215 1: retl
216 ld [%sp + 64], %o0
217
218 82: /* ldd_std */
219 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
220 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
221 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
222 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
223 subcc %g6, 128, %g6
224 add %o1, 128, %o1
225 bne 82b
226 add %o0, 128, %o0
227
228 andcc %g1, 0x70, %g6
229 be 84f
230 andcc %g1, 8, %g0
231
232 mov %o7, %g2
233 111: call 110f
234 add %o1, %g6, %o1
235 mov %g2, %o7
236 jmpl %o5 + (84f - 111b), %g0
237 add %o0, %g6, %o0
238
239 83: MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
240 MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
241 MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
242 MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
243 MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
244 MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
245 MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
246
247 84: be 85f
248 andcc %g1, 4, %g0
249
250 ldd [%o1], %g2
251 add %o0, 8, %o0
252 std %g2, [%o0 - 0x08]
253 add %o1, 8, %o1
254 85: be 1f
255 andcc %g1, 2, %g0
256
257 ld [%o1], %g2
258 add %o1, 4, %o1
259 st %g2, [%o0]
260 add %o0, 4, %o0
261 1: be 1f
262 andcc %g1, 1, %g0
263
264 lduh [%o1], %g2
265 add %o1, 2, %o1
266 sth %g2, [%o0]
267 add %o0, 2, %o0
268 1: be 1f
269 nop
270
271 ldub [%o1], %g2
272 stb %g2, [%o0]
273 1: retl
274 ld [%sp + 64], %o0
275
276 86: cmp %o2, 6
277 bleu 88f
278
279 cmp %o2, 256
280 bcc 87f
281
282 andcc %o0, 3, %g0
283 be 61f
284 andcc %o0, 1, %g0
285 be 60f
286 andcc %o0, 2, %g0
287
288 ldub [%o1], %g5
289 add %o1, 1, %o1
290 stb %g5, [%o0]
291 sub %o2, 1, %o2
292 bne 61f
293 add %o0, 1, %o0
294 60: ldub [%o1], %g3
295 add %o1, 2, %o1
296 stb %g3, [%o0]
297 sub %o2, 2, %o2
298 ldub [%o1 - 1], %g3
299 add %o0, 2, %o0
300 stb %g3, [%o0 - 1]
301 61: and %o1, 3, %g2
302 and %o2, 0xc, %g3
303 and %o1, -4, %o1
304 cmp %g3, 4
305 sll %g2, 3, %g4
306 mov 32, %g2
307 be 4f
308 sub %g2, %g4, %g6
309
310 blu 3f
311 cmp %g3, 0x8
312
313 be 2f
314 srl %o2, 2, %g3
315
316 ld [%o1], %o3
317 add %o0, -8, %o0
318 ld [%o1 + 4], %o4
319 b 8f
320 add %g3, 1, %g3
321 2: ld [%o1], %o4
322 add %o0, -12, %o0
323 ld [%o1 + 4], %o5
324 add %g3, 2, %g3
325 b 9f
326 add %o1, -4, %o1
327 3: ld [%o1], %g1
328 add %o0, -4, %o0
329 ld [%o1 + 4], %o3
330 srl %o2, 2, %g3
331 b 7f
332 add %o1, 4, %o1
333 4: ld [%o1], %o5
334 cmp %o2, 7
335 ld [%o1 + 4], %g1
336 srl %o2, 2, %g3
337 bleu 10f
338 add %o1, 8, %o1
339
340 ld [%o1], %o3
341 add %g3, -1, %g3
342 5: sll %o5, %g4, %g2
343 srl %g1, %g6, %g5
344 or %g2, %g5, %g2
345 st %g2, [%o0]
346 7: ld [%o1 + 4], %o4
347 sll %g1, %g4, %g2
348 srl %o3, %g6, %g5
349 or %g2, %g5, %g2
350 st %g2, [%o0 + 4]
351 8: ld [%o1 + 8], %o5
352 sll %o3, %g4, %g2
353 srl %o4, %g6, %g5
354 or %g2, %g5, %g2
355 st %g2, [%o0 + 8]
356 9: ld [%o1 + 12], %g1
357 sll %o4, %g4, %g2
358 srl %o5, %g6, %g5
359 addcc %g3, -4, %g3
360 or %g2, %g5, %g2
361 add %o1, 16, %o1
362 st %g2, [%o0 + 12]
363 add %o0, 16, %o0
364 bne,a 5b
365 ld [%o1], %o3
366 10: sll %o5, %g4, %g2
367 srl %g1, %g6, %g5
368 srl %g6, 3, %g3
369 or %g2, %g5, %g2
370 sub %o1, %g3, %o1
371 andcc %o2, 2, %g0
372 st %g2, [%o0]
373 be 1f
374 andcc %o2, 1, %g0
375
376 ldub [%o1], %g2
377 add %o1, 2, %o1
378 stb %g2, [%o0 + 4]
379 add %o0, 2, %o0
380 ldub [%o1 - 1], %g2
381 stb %g2, [%o0 + 3]
382 1: be 1f
383 nop
384 ldub [%o1], %g2
385 stb %g2, [%o0 + 4]
386 1: retl
387 ld [%sp + 64], %o0
388
389 87: andcc %o1, 3, %g0
390 be 3f
391 andcc %o1, 1, %g0
392
393 be 4f
394 andcc %o1, 2, %g0
395
396 ldub [%o1], %g2
397 add %o1, 1, %o1
398 stb %g2, [%o0]
399 sub %o2, 1, %o2
400 bne 3f
401 add %o0, 1, %o0
402 4: lduh [%o1], %g2
403 add %o1, 2, %o1
404 srl %g2, 8, %g3
405 sub %o2, 2, %o2
406 stb %g3, [%o0]
407 add %o0, 2, %o0
408 stb %g2, [%o0 - 1]
409 3: andcc %o1, 4, %g0
410
411 bne 2f
412 cmp %o5, 1
413
414 ld [%o1], %o4
415 srl %o4, 24, %g2
416 stb %g2, [%o0]
417 srl %o4, 16, %g3
418 stb %g3, [%o0 + 1]
419 srl %o4, 8, %g2
420 stb %g2, [%o0 + 2]
421 sub %o2, 4, %o2
422 stb %o4, [%o0 + 3]
423 add %o1, 4, %o1
424 add %o0, 4, %o0
425 2: be 33f
426 cmp %o5, 2
427 be 32f
428 sub %o2, 4, %o2
429 31: ld [%o1], %g2
430 add %o1, 4, %o1
431 srl %g2, 24, %g3
432 and %o0, 7, %g5
433 stb %g3, [%o0]
434 cmp %g5, 7
435 sll %g2, 8, %g1
436 add %o0, 4, %o0
437 be 41f
438 and %o2, 0xffffffc0, %o3
439 ld [%o0 - 7], %o4
440 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
441 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
442 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
443 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
444 subcc %o3, 64, %o3
445 add %o1, 64, %o1
446 bne 4b
447 add %o0, 64, %o0
448
449 andcc %o2, 0x30, %o3
450 be,a 1f
451 srl %g1, 16, %g2
452 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
453 subcc %o3, 16, %o3
454 add %o1, 16, %o1
455 bne 4b
456 add %o0, 16, %o0
457
458 srl %g1, 16, %g2
459 1: st %o4, [%o0 - 7]
460 sth %g2, [%o0 - 3]
461 srl %g1, 8, %g4
462 b 88f
463 stb %g4, [%o0 - 1]
464 32: ld [%o1], %g2
465 add %o1, 4, %o1
466 srl %g2, 16, %g3
467 and %o0, 7, %g5
468 sth %g3, [%o0]
469 cmp %g5, 6
470 sll %g2, 16, %g1
471 add %o0, 4, %o0
472 be 42f
473 and %o2, 0xffffffc0, %o3
474 ld [%o0 - 6], %o4
475 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
476 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
477 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
478 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
479 subcc %o3, 64, %o3
480 add %o1, 64, %o1
481 bne 4b
482 add %o0, 64, %o0
483
484 andcc %o2, 0x30, %o3
485 be,a 1f
486 srl %g1, 16, %g2
487 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
488 subcc %o3, 16, %o3
489 add %o1, 16, %o1
490 bne 4b
491 add %o0, 16, %o0
492
493 srl %g1, 16, %g2
494 1: st %o4, [%o0 - 6]
495 b 88f
496 sth %g2, [%o0 - 2]
497 33: ld [%o1], %g2
498 sub %o2, 4, %o2
499 srl %g2, 24, %g3
500 and %o0, 7, %g5
501 stb %g3, [%o0]
502 cmp %g5, 5
503 srl %g2, 8, %g4
504 sll %g2, 24, %g1
505 sth %g4, [%o0 + 1]
506 add %o1, 4, %o1
507 be 43f
508 and %o2, 0xffffffc0, %o3
509
510 ld [%o0 - 1], %o4
511 add %o0, 4, %o0
512 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
513 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
514 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
515 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
516 subcc %o3, 64, %o3
517 add %o1, 64, %o1
518 bne 4b
519 add %o0, 64, %o0
520
521 andcc %o2, 0x30, %o3
522 be,a 1f
523 srl %g1, 24, %g2
524 4: SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, -1)
525 subcc %o3, 16, %o3
526 add %o1, 16, %o1
527 bne 4b
528 add %o0, 16, %o0
529
530 srl %g1, 24, %g2
531 1: st %o4, [%o0 - 5]
532 b 88f
533 stb %g2, [%o0 - 1]
534 41: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
535 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
536 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
537 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
538 subcc %o3, 64, %o3
539 add %o1, 64, %o1
540 bne 41b
541 add %o0, 64, %o0
542
543 andcc %o2, 0x30, %o3
544 be,a 1f
545 srl %g1, 16, %g2
546 4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 8, 24, -3)
547 subcc %o3, 16, %o3
548 add %o1, 16, %o1
549 bne 4b
550 add %o0, 16, %o0
551
552 srl %g1, 16, %g2
553 1: sth %g2, [%o0 - 3]
554 srl %g1, 8, %g4
555 b 88f
556 stb %g4, [%o0 - 1]
557 43: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
558 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
559 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
560 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
561 subcc %o3, 64, %o3
562 add %o1, 64, %o1
563 bne 43b
564 add %o0, 64, %o0
565
566 andcc %o2, 0x30, %o3
567 be,a 1f
568 srl %g1, 24, %g2
569 4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 24, 8, 3)
570 subcc %o3, 16, %o3
571 add %o1, 16, %o1
572 bne 4b
573 add %o0, 16, %o0
574
575 srl %g1, 24, %g2
576 1: stb %g2, [%o0 + 3]
577 b 88f
578 add %o0, 4, %o0
579 42: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
580 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
581 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
582 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
583 subcc %o3, 64, %o3
584 add %o1, 64, %o1
585 bne 42b
586 add %o0, 64, %o0
587
588 andcc %o2, 0x30, %o3
589 be,a 1f
590 srl %g1, 16, %g2
591 4: SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g6, g1, 16, 16, -2)
592 subcc %o3, 16, %o3
593 add %o1, 16, %o1
594 bne 4b
595 add %o0, 16, %o0
596
597 srl %g1, 16, %g2
598 1: sth %g2, [%o0 - 2]
599
600 /* Fall through */
601
602 88: and %o2, 0xe, %o3
603 mov %o7, %g2
604 sll %o3, 3, %o4
605 add %o0, %o3, %o0
606 106: call 100f
607 add %o1, %o3, %o1
608 mov %g2, %o7
609 jmpl %o5 + (89f - 106b), %g0
610 andcc %o2, 1, %g0
611
612 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
613 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
614 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
615 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
616 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
617 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
618 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
619
620 89: be 1f
621 nop
622
623 ldub [%o1], %g2
624 stb %g2, [%o0]
625 1: retl
626 ld [%sp + 64], %o0
627
628 90: bne 88b
629 andcc %o2, 8, %g0
630
631 be 1f
632 andcc %o2, 4, %g0
633
634 ld [%o1 + 0x00], %g2
635 ld [%o1 + 0x04], %g3
636 add %o1, 8, %o1
637 st %g2, [%o0 + 0x00]
638 st %g3, [%o0 + 0x04]
639 add %o0, 8, %o0
640 1: b 81b
641 mov %o2, %g1
642
643 100: retl
644 sub %o7, %o4, %o5
645 110: retl
646 sub %o7, %g6, %o5
647 END(memcpy)
648
649 libc_hidden_builtin_def (memcpy)
650
651 libc_hidden_def (__mempcpy)
652 weak_alias (__mempcpy, mempcpy)
653 libc_hidden_builtin_def (mempcpy)