]>
Commit | Line | Data |
---|---|---|
46ae8850 | 1 | /* Copy SIZE bytes from SRC to DEST. For SUN4V Niagara-2. |
688903eb | 2 | Copyright (C) 2007-2018 Free Software Foundation, Inc. |
46ae8850 JJ |
3 | This file is part of the GNU C Library. |
4 | Contributed by David S. Miller (davem@davemloft.net) | |
5 | ||
6 | The GNU C Library is free software; you can redistribute it and/or | |
7 | modify it under the terms of the GNU Lesser General Public | |
8 | License as published by the Free Software Foundation; either | |
9 | version 2.1 of the License, or (at your option) any later version. | |
10 | ||
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | Lesser General Public License for more details. | |
15 | ||
16 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 PE |
17 | License along with the GNU C Library; if not, see |
18 | <http://www.gnu.org/licenses/>. */ | |
46ae8850 JJ |
19 | |
20 | #include <sysdep.h> | |
21 | ||
22 | #define ASI_BLK_INIT_QUAD_LDD_P 0xe2 | |
23 | #define ASI_BLK_P 0xf0 | |
24 | #define ASI_P 0x80 | |
25 | #define ASI_PNF 0x82 | |
26 | ||
27 | #define FPRS_FEF 0x04 | |
28 | ||
29 | #define VISEntryHalf \ | |
30 | rd %fprs, %o5; \ | |
31 | wr %g0, FPRS_FEF, %fprs | |
32 | ||
33 | #define VISExitHalf \ | |
34 | and %o5, FPRS_FEF, %o5; \ | |
35 | wr %o5, 0x0, %fprs | |
36 | ||
37 | #define STORE_ASI ASI_BLK_INIT_QUAD_LDD_P | |
38 | ||
39 | #define LOAD(type,addr,dest) type [addr], dest | |
40 | #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest | |
41 | #define STORE(type,src,addr) type src, [addr] | |
42 | #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P | |
43 | #define STORE_INIT(src,addr) stxa src, [addr] STORE_ASI | |
44 | ||
45 | #ifndef XCC | |
46 | #define USE_BPR | |
47 | #define XCC xcc | |
48 | #endif | |
49 | ||
50 | #define FREG_FROB(x0, x1, x2, x3, x4, x5, x6, x7, x8) \ | |
51 | faligndata %x0, %x1, %f0; \ | |
52 | faligndata %x1, %x2, %f2; \ | |
53 | faligndata %x2, %x3, %f4; \ | |
54 | faligndata %x3, %x4, %f6; \ | |
55 | faligndata %x4, %x5, %f8; \ | |
56 | faligndata %x5, %x6, %f10; \ | |
57 | faligndata %x6, %x7, %f12; \ | |
58 | faligndata %x7, %x8, %f14; | |
59 | ||
60 | #define FREG_MOVE_1(x0) \ | |
f230c29b | 61 | fsrc2 %x0, %f0; |
46ae8850 | 62 | #define FREG_MOVE_2(x0, x1) \ |
f230c29b DM |
63 | fsrc2 %x0, %f0; \ |
64 | fsrc2 %x1, %f2; | |
46ae8850 | 65 | #define FREG_MOVE_3(x0, x1, x2) \ |
f230c29b DM |
66 | fsrc2 %x0, %f0; \ |
67 | fsrc2 %x1, %f2; \ | |
68 | fsrc2 %x2, %f4; | |
46ae8850 | 69 | #define FREG_MOVE_4(x0, x1, x2, x3) \ |
f230c29b DM |
70 | fsrc2 %x0, %f0; \ |
71 | fsrc2 %x1, %f2; \ | |
72 | fsrc2 %x2, %f4; \ | |
73 | fsrc2 %x3, %f6; | |
46ae8850 | 74 | #define FREG_MOVE_5(x0, x1, x2, x3, x4) \ |
f230c29b DM |
75 | fsrc2 %x0, %f0; \ |
76 | fsrc2 %x1, %f2; \ | |
77 | fsrc2 %x2, %f4; \ | |
78 | fsrc2 %x3, %f6; \ | |
79 | fsrc2 %x4, %f8; | |
46ae8850 | 80 | #define FREG_MOVE_6(x0, x1, x2, x3, x4, x5) \ |
f230c29b DM |
81 | fsrc2 %x0, %f0; \ |
82 | fsrc2 %x1, %f2; \ | |
83 | fsrc2 %x2, %f4; \ | |
84 | fsrc2 %x3, %f6; \ | |
85 | fsrc2 %x4, %f8; \ | |
86 | fsrc2 %x5, %f10; | |
46ae8850 | 87 | #define FREG_MOVE_7(x0, x1, x2, x3, x4, x5, x6) \ |
f230c29b DM |
88 | fsrc2 %x0, %f0; \ |
89 | fsrc2 %x1, %f2; \ | |
90 | fsrc2 %x2, %f4; \ | |
91 | fsrc2 %x3, %f6; \ | |
92 | fsrc2 %x4, %f8; \ | |
93 | fsrc2 %x5, %f10; \ | |
94 | fsrc2 %x6, %f12; | |
46ae8850 | 95 | #define FREG_MOVE_8(x0, x1, x2, x3, x4, x5, x6, x7) \ |
f230c29b DM |
96 | fsrc2 %x0, %f0; \ |
97 | fsrc2 %x1, %f2; \ | |
98 | fsrc2 %x2, %f4; \ | |
99 | fsrc2 %x3, %f6; \ | |
100 | fsrc2 %x4, %f8; \ | |
101 | fsrc2 %x5, %f10; \ | |
102 | fsrc2 %x6, %f12; \ | |
103 | fsrc2 %x7, %f14; | |
46ae8850 JJ |
104 | #define FREG_LOAD_1(base, x0) \ |
105 | LOAD(ldd, base + 0x00, %x0) | |
106 | #define FREG_LOAD_2(base, x0, x1) \ | |
107 | LOAD(ldd, base + 0x00, %x0); \ | |
108 | LOAD(ldd, base + 0x08, %x1); | |
109 | #define FREG_LOAD_3(base, x0, x1, x2) \ | |
110 | LOAD(ldd, base + 0x00, %x0); \ | |
111 | LOAD(ldd, base + 0x08, %x1); \ | |
112 | LOAD(ldd, base + 0x10, %x2); | |
113 | #define FREG_LOAD_4(base, x0, x1, x2, x3) \ | |
114 | LOAD(ldd, base + 0x00, %x0); \ | |
115 | LOAD(ldd, base + 0x08, %x1); \ | |
116 | LOAD(ldd, base + 0x10, %x2); \ | |
117 | LOAD(ldd, base + 0x18, %x3); | |
118 | #define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ | |
119 | LOAD(ldd, base + 0x00, %x0); \ | |
120 | LOAD(ldd, base + 0x08, %x1); \ | |
121 | LOAD(ldd, base + 0x10, %x2); \ | |
122 | LOAD(ldd, base + 0x18, %x3); \ | |
123 | LOAD(ldd, base + 0x20, %x4); | |
124 | #define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ | |
125 | LOAD(ldd, base + 0x00, %x0); \ | |
126 | LOAD(ldd, base + 0x08, %x1); \ | |
127 | LOAD(ldd, base + 0x10, %x2); \ | |
128 | LOAD(ldd, base + 0x18, %x3); \ | |
129 | LOAD(ldd, base + 0x20, %x4); \ | |
130 | LOAD(ldd, base + 0x28, %x5); | |
131 | #define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ | |
132 | LOAD(ldd, base + 0x00, %x0); \ | |
133 | LOAD(ldd, base + 0x08, %x1); \ | |
134 | LOAD(ldd, base + 0x10, %x2); \ | |
135 | LOAD(ldd, base + 0x18, %x3); \ | |
136 | LOAD(ldd, base + 0x20, %x4); \ | |
137 | LOAD(ldd, base + 0x28, %x5); \ | |
138 | LOAD(ldd, base + 0x30, %x6); | |
139 | ||
4f41c682 | 140 | #if IS_IN (libc) |
3afd5a3b | 141 | |
46ae8850 JJ |
142 | .register %g2,#scratch |
143 | .register %g3,#scratch | |
144 | .register %g6,#scratch | |
145 | ||
146 | .text | |
46ae8850 | 147 | |
88d85d4f DM |
148 | ENTRY(__mempcpy_niagara2) |
149 | ba,pt %XCC, 101f | |
150 | add %o0, %o2, %g5 | |
151 | END(__mempcpy_niagara2) | |
152 | ||
46ae8850 | 153 | .align 32 |
3afd5a3b | 154 | ENTRY(__memcpy_niagara2) |
88d85d4f DM |
155 | 100: /* %o0=dst, %o1=src, %o2=len */ |
156 | mov %o0, %g5 | |
157 | 101: | |
3afd5a3b | 158 | # ifndef USE_BPR |
19c10a47 | 159 | srl %o2, 0, %o2 |
3afd5a3b | 160 | # endif |
46ae8850 JJ |
161 | cmp %o2, 0 |
162 | be,pn %XCC, 85f | |
163 | 218: or %o0, %o1, %o3 | |
164 | cmp %o2, 16 | |
165 | blu,a,pn %XCC, 80f | |
166 | or %o3, %o2, %o3 | |
167 | ||
168 | /* 2 blocks (128 bytes) is the minimum we can do the block | |
169 | * copy with. We need to ensure that we'll iterate at least | |
170 | * once in the block copy loop. At worst we'll need to align | |
171 | * the destination to a 64-byte boundary which can chew up | |
172 | * to (64 - 1) bytes from the length before we perform the | |
173 | * block copy loop. | |
174 | * | |
175 | * However, the cut-off point, performance wise, is around | |
176 | * 4 64-byte blocks. | |
177 | */ | |
178 | cmp %o2, (4 * 64) | |
179 | blu,pt %XCC, 75f | |
180 | andcc %o3, 0x7, %g0 | |
181 | ||
182 | /* %o0: dst | |
183 | * %o1: src | |
184 | * %o2: len (known to be >= 128) | |
185 | * | |
186 | * The block copy loops can use %o4, %g2, %g3 as | |
187 | * temporaries while copying the data. %o5 must | |
188 | * be preserved between VISEntryHalf and VISExitHalf | |
189 | */ | |
190 | ||
191 | LOAD(prefetch, %o1 + 0x000, #one_read) | |
192 | LOAD(prefetch, %o1 + 0x040, #one_read) | |
193 | LOAD(prefetch, %o1 + 0x080, #one_read) | |
194 | ||
195 | /* Align destination on 64-byte boundary. */ | |
196 | andcc %o0, (64 - 1), %o4 | |
197 | be,pt %XCC, 2f | |
198 | sub %o4, 64, %o4 | |
199 | sub %g0, %o4, %o4 ! bytes to align dst | |
200 | sub %o2, %o4, %o2 | |
201 | 1: subcc %o4, 1, %o4 | |
202 | LOAD(ldub, %o1, %g1) | |
203 | STORE(stb, %g1, %o0) | |
204 | add %o1, 1, %o1 | |
205 | bne,pt %XCC, 1b | |
206 | add %o0, 1, %o0 | |
207 | ||
208 | 2: | |
209 | /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve | |
210 | * o5 from here until we hit VISExitHalf. | |
211 | */ | |
212 | VISEntryHalf | |
213 | ||
834caf06 | 214 | membar #Sync |
46ae8850 JJ |
215 | alignaddr %o1, %g0, %g0 |
216 | ||
217 | add %o1, (64 - 1), %o4 | |
218 | andn %o4, (64 - 1), %o4 | |
219 | andn %o2, (64 - 1), %g1 | |
220 | sub %o2, %g1, %o2 | |
221 | ||
222 | and %o1, (64 - 1), %g2 | |
223 | add %o1, %g1, %o1 | |
224 | sub %o0, %o4, %g3 | |
225 | brz,pt %g2, 190f | |
226 | cmp %g2, 32 | |
227 | blu,a 5f | |
228 | cmp %g2, 16 | |
229 | cmp %g2, 48 | |
230 | blu,a 4f | |
231 | cmp %g2, 40 | |
232 | cmp %g2, 56 | |
233 | blu 170f | |
234 | nop | |
235 | ba,a,pt %xcc, 180f | |
236 | ||
237 | 4: /* 32 <= low bits < 48 */ | |
238 | blu 150f | |
239 | nop | |
240 | ba,a,pt %xcc, 160f | |
241 | 5: /* 0 < low bits < 32 */ | |
242 | blu,a 6f | |
243 | cmp %g2, 8 | |
244 | cmp %g2, 24 | |
245 | blu 130f | |
246 | nop | |
247 | ba,a,pt %xcc, 140f | |
248 | 6: /* 0 < low bits < 16 */ | |
249 | bgeu 120f | |
250 | nop | |
251 | /* fall through for 0 < low bits < 8 */ | |
252 | 110: sub %o4, 64, %g2 | |
253 | LOAD_BLK(%g2, %f0) | |
254 | 1: STORE_INIT(%g0, %o4 + %g3) | |
255 | LOAD_BLK(%o4, %f16) | |
256 | FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) | |
257 | STORE_BLK(%f0, %o4 + %g3) | |
258 | FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) | |
259 | subcc %g1, 64, %g1 | |
260 | add %o4, 64, %o4 | |
261 | bne,pt %XCC, 1b | |
262 | LOAD(prefetch, %o4 + 64, #one_read) | |
263 | ba,pt %xcc, 195f | |
264 | nop | |
265 | ||
266 | 120: sub %o4, 56, %g2 | |
267 | FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) | |
268 | 1: STORE_INIT(%g0, %o4 + %g3) | |
269 | LOAD_BLK(%o4, %f16) | |
270 | FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) | |
271 | STORE_BLK(%f0, %o4 + %g3) | |
272 | FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) | |
273 | subcc %g1, 64, %g1 | |
274 | add %o4, 64, %o4 | |
275 | bne,pt %XCC, 1b | |
276 | LOAD(prefetch, %o4 + 64, #one_read) | |
277 | ba,pt %xcc, 195f | |
278 | nop | |
279 | ||
280 | 130: sub %o4, 48, %g2 | |
281 | FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) | |
282 | 1: STORE_INIT(%g0, %o4 + %g3) | |
283 | LOAD_BLK(%o4, %f16) | |
284 | FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) | |
285 | STORE_BLK(%f0, %o4 + %g3) | |
286 | FREG_MOVE_6(f20, f22, f24, f26, f28, f30) | |
287 | subcc %g1, 64, %g1 | |
288 | add %o4, 64, %o4 | |
289 | bne,pt %XCC, 1b | |
290 | LOAD(prefetch, %o4 + 64, #one_read) | |
291 | ba,pt %xcc, 195f | |
292 | nop | |
293 | ||
294 | 140: sub %o4, 40, %g2 | |
295 | FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) | |
296 | 1: STORE_INIT(%g0, %o4 + %g3) | |
297 | LOAD_BLK(%o4, %f16) | |
298 | FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) | |
299 | STORE_BLK(%f0, %o4 + %g3) | |
300 | FREG_MOVE_5(f22, f24, f26, f28, f30) | |
301 | subcc %g1, 64, %g1 | |
302 | add %o4, 64, %o4 | |
303 | bne,pt %XCC, 1b | |
304 | LOAD(prefetch, %o4 + 64, #one_read) | |
305 | ba,pt %xcc, 195f | |
306 | nop | |
307 | ||
308 | 150: sub %o4, 32, %g2 | |
309 | FREG_LOAD_4(%g2, f0, f2, f4, f6) | |
310 | 1: STORE_INIT(%g0, %o4 + %g3) | |
311 | LOAD_BLK(%o4, %f16) | |
312 | FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) | |
313 | STORE_BLK(%f0, %o4 + %g3) | |
314 | FREG_MOVE_4(f24, f26, f28, f30) | |
315 | subcc %g1, 64, %g1 | |
316 | add %o4, 64, %o4 | |
317 | bne,pt %XCC, 1b | |
318 | LOAD(prefetch, %o4 + 64, #one_read) | |
319 | ba,pt %xcc, 195f | |
320 | nop | |
321 | ||
322 | 160: sub %o4, 24, %g2 | |
323 | FREG_LOAD_3(%g2, f0, f2, f4) | |
324 | 1: STORE_INIT(%g0, %o4 + %g3) | |
325 | LOAD_BLK(%o4, %f16) | |
326 | FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) | |
327 | STORE_BLK(%f0, %o4 + %g3) | |
328 | FREG_MOVE_3(f26, f28, f30) | |
329 | subcc %g1, 64, %g1 | |
330 | add %o4, 64, %o4 | |
331 | bne,pt %XCC, 1b | |
332 | LOAD(prefetch, %o4 + 64, #one_read) | |
333 | ba,pt %xcc, 195f | |
334 | nop | |
335 | ||
336 | 170: sub %o4, 16, %g2 | |
337 | FREG_LOAD_2(%g2, f0, f2) | |
338 | 1: STORE_INIT(%g0, %o4 + %g3) | |
339 | LOAD_BLK(%o4, %f16) | |
340 | FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) | |
341 | STORE_BLK(%f0, %o4 + %g3) | |
342 | FREG_MOVE_2(f28, f30) | |
343 | subcc %g1, 64, %g1 | |
344 | add %o4, 64, %o4 | |
345 | bne,pt %XCC, 1b | |
346 | LOAD(prefetch, %o4 + 64, #one_read) | |
347 | ba,pt %xcc, 195f | |
348 | nop | |
349 | ||
350 | 180: sub %o4, 8, %g2 | |
351 | FREG_LOAD_1(%g2, f0) | |
352 | 1: STORE_INIT(%g0, %o4 + %g3) | |
353 | LOAD_BLK(%o4, %f16) | |
354 | FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) | |
355 | STORE_BLK(%f0, %o4 + %g3) | |
356 | FREG_MOVE_1(f30) | |
357 | subcc %g1, 64, %g1 | |
358 | add %o4, 64, %o4 | |
359 | bne,pt %XCC, 1b | |
360 | LOAD(prefetch, %o4 + 64, #one_read) | |
361 | ba,pt %xcc, 195f | |
362 | nop | |
363 | ||
364 | 190: | |
365 | 1: STORE_INIT(%g0, %o4 + %g3) | |
366 | subcc %g1, 64, %g1 | |
367 | LOAD_BLK(%o4, %f0) | |
368 | STORE_BLK(%f0, %o4 + %g3) | |
369 | add %o4, 64, %o4 | |
370 | bne,pt %XCC, 1b | |
371 | LOAD(prefetch, %o4 + 64, #one_read) | |
372 | ||
373 | 195: | |
374 | add %o4, %g3, %o0 | |
375 | membar #Sync | |
376 | ||
377 | VISExitHalf | |
378 | ||
379 | /* %o2 contains any final bytes still needed to be copied | |
380 | * over. If anything is left, we copy it one byte at a time. | |
381 | */ | |
382 | brz,pt %o2, 85f | |
383 | sub %o0, %o1, %o3 | |
384 | ba,a,pt %XCC, 90f | |
385 | ||
386 | .align 64 | |
387 | 75: /* 16 < len <= 64 */ | |
388 | bne,pn %XCC, 75f | |
389 | sub %o0, %o1, %o3 | |
390 | ||
391 | 72: | |
392 | andn %o2, 0xf, %o4 | |
393 | and %o2, 0xf, %o2 | |
394 | 1: subcc %o4, 0x10, %o4 | |
395 | LOAD(ldx, %o1, %o5) | |
396 | add %o1, 0x08, %o1 | |
397 | LOAD(ldx, %o1, %g1) | |
398 | sub %o1, 0x08, %o1 | |
399 | STORE(stx, %o5, %o1 + %o3) | |
400 | add %o1, 0x8, %o1 | |
401 | STORE(stx, %g1, %o1 + %o3) | |
402 | bgu,pt %XCC, 1b | |
403 | add %o1, 0x8, %o1 | |
404 | 73: andcc %o2, 0x8, %g0 | |
405 | be,pt %XCC, 1f | |
406 | nop | |
407 | sub %o2, 0x8, %o2 | |
408 | LOAD(ldx, %o1, %o5) | |
409 | STORE(stx, %o5, %o1 + %o3) | |
410 | add %o1, 0x8, %o1 | |
411 | 1: andcc %o2, 0x4, %g0 | |
412 | be,pt %XCC, 1f | |
413 | nop | |
414 | sub %o2, 0x4, %o2 | |
415 | LOAD(lduw, %o1, %o5) | |
416 | STORE(stw, %o5, %o1 + %o3) | |
417 | add %o1, 0x4, %o1 | |
418 | 1: cmp %o2, 0 | |
419 | be,pt %XCC, 85f | |
420 | nop | |
421 | ba,pt %xcc, 90f | |
422 | nop | |
423 | ||
424 | 75: | |
425 | andcc %o0, 0x7, %g1 | |
426 | sub %g1, 0x8, %g1 | |
427 | be,pn %icc, 2f | |
428 | sub %g0, %g1, %g1 | |
429 | sub %o2, %g1, %o2 | |
430 | ||
431 | 1: subcc %g1, 1, %g1 | |
432 | LOAD(ldub, %o1, %o5) | |
433 | STORE(stb, %o5, %o1 + %o3) | |
434 | bgu,pt %icc, 1b | |
435 | add %o1, 1, %o1 | |
436 | ||
437 | 2: add %o1, %o3, %o0 | |
438 | andcc %o1, 0x7, %g1 | |
439 | bne,pt %icc, 8f | |
440 | sll %g1, 3, %g1 | |
441 | ||
442 | cmp %o2, 16 | |
443 | bgeu,pt %icc, 72b | |
444 | nop | |
445 | ba,a,pt %xcc, 73b | |
446 | ||
447 | 8: mov 64, %o3 | |
448 | andn %o1, 0x7, %o1 | |
449 | LOAD(ldx, %o1, %g2) | |
450 | sub %o3, %g1, %o3 | |
451 | andn %o2, 0x7, %o4 | |
452 | sllx %g2, %g1, %g2 | |
453 | 1: add %o1, 0x8, %o1 | |
454 | LOAD(ldx, %o1, %g3) | |
455 | subcc %o4, 0x8, %o4 | |
456 | srlx %g3, %o3, %o5 | |
457 | or %o5, %g2, %o5 | |
458 | STORE(stx, %o5, %o0) | |
459 | add %o0, 0x8, %o0 | |
460 | bgu,pt %icc, 1b | |
461 | sllx %g3, %g1, %g2 | |
462 | ||
463 | srl %g1, 3, %g1 | |
464 | andcc %o2, 0x7, %o2 | |
465 | be,pn %icc, 85f | |
466 | add %o1, %g1, %o1 | |
467 | ba,pt %xcc, 90f | |
468 | sub %o0, %o1, %o3 | |
469 | ||
470 | .align 64 | |
471 | 80: /* 0 < len <= 16 */ | |
472 | andcc %o3, 0x3, %g0 | |
473 | bne,pn %XCC, 90f | |
474 | sub %o0, %o1, %o3 | |
475 | ||
476 | 1: | |
477 | subcc %o2, 4, %o2 | |
478 | LOAD(lduw, %o1, %g1) | |
479 | STORE(stw, %g1, %o1 + %o3) | |
480 | bgu,pt %XCC, 1b | |
481 | add %o1, 4, %o1 | |
482 | ||
483 | 85: retl | |
484 | mov %g5, %o0 | |
485 | ||
486 | .align 32 | |
487 | 90: | |
488 | subcc %o2, 1, %o2 | |
489 | LOAD(ldub, %o1, %g1) | |
490 | STORE(stb, %g1, %o1 + %o3) | |
491 | bgu,pt %XCC, 90b | |
492 | add %o1, 1, %o1 | |
493 | retl | |
494 | mov %g5, %o0 | |
495 | ||
3afd5a3b | 496 | END(__memcpy_niagara2) |
46ae8850 | 497 | |
3afd5a3b | 498 | #endif |