]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/powerpc/powerpc64/memset.S
Replace FSF snail mail address with URLs.
[thirdparty/glibc.git] / sysdeps / powerpc / powerpc64 / memset.S
1 /* Optimized memset implementation for PowerPC64.
2 Copyright (C) 1997, 1999, 2000, 2002, 2003, 2007
3 Free Software Foundation, Inc.
4 This file is part of the GNU C Library.
5
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
10
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
19
20 #include <sysdep.h>
21 #include <bp-sym.h>
22 #include <bp-asm.h>
23
24 .section ".toc","aw"
25 .LC0:
26 .tc __cache_line_size[TC],__cache_line_size
27 .section ".text"
28 .align 2
29
30 /* __ptr_t [r3] memset (__ptr_t s [r3], int c [r4], size_t n [r5]));
31 Returns 's'.
32
33 The memset is done in three sizes: byte (8 bits), word (32 bits),
34 cache line (256 bits). There is a special case for setting cache lines
35 to 0, to take advantage of the dcbz instruction. */
36
37 EALIGN (BP_SYM (memset), 5, 0)
38 CALL_MCOUNT 3
39
40 #define rTMP r0
41 #define rRTN r3 /* Initial value of 1st argument. */
42 #if __BOUNDED_POINTERS__
43 # define rMEMP0 r4 /* Original value of 1st arg. */
44 # define rCHR r5 /* Char to set in each byte. */
45 # define rLEN r6 /* Length of region to set. */
46 # define rMEMP r10 /* Address at which we are storing. */
47 #else
48 # define rMEMP0 r3 /* Original value of 1st arg. */
49 # define rCHR r4 /* Char to set in each byte. */
50 # define rLEN r5 /* Length of region to set. */
51 # define rMEMP r6 /* Address at which we are storing. */
52 #endif
53 #define rALIGN r7 /* Number of bytes we are setting now (when aligning). */
54 #define rMEMP2 r8
55
56 #define rNEG64 r8 /* Constant -64 for clearing with dcbz. */
57 #define rCLS r8 /* Cache line size obtained from static. */
58 #define rCLM r9 /* Cache line size mask to check for cache alignment. */
59 L(_memset):
60 #if __BOUNDED_POINTERS__
61 cmpldi cr1, rRTN, 0
62 CHECK_BOUNDS_BOTH_WIDE (rMEMP0, rTMP, rTMP2, rLEN)
63 beq cr1, L(b0)
64 STORE_RETURN_VALUE (rMEMP0)
65 STORE_RETURN_BOUNDS (rTMP, rTMP2)
66 L(b0):
67 #endif
68 /* Take care of case for size <= 4. */
69 cmpldi cr1, rLEN, 8
70 andi. rALIGN, rMEMP0, 7
71 mr rMEMP, rMEMP0
72 ble- cr1, L(small)
73
74 /* Align to doubleword boundary. */
75 cmpldi cr5, rLEN, 31
76 rlwimi rCHR, rCHR, 8, 16, 23 /* Replicate byte to halfword. */
77 beq+ L(aligned2)
78 mtcrf 0x01, rMEMP0
79 subfic rALIGN, rALIGN, 8
80 cror 28,30,31 /* Detect odd word aligned. */
81 add rMEMP, rMEMP, rALIGN
82 sub rLEN, rLEN, rALIGN
83 rlwimi rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word. */
84 bt 29, L(g4)
85 /* Process the even word of doubleword. */
86 bf+ 31, L(g2)
87 stb rCHR, 0(rMEMP0)
88 bt 30, L(g4x)
89 L(g2):
90 sth rCHR, -6(rMEMP)
91 L(g4x):
92 stw rCHR, -4(rMEMP)
93 b L(aligned)
94 /* Process the odd word of doubleword. */
95 L(g4):
96 bf 28, L(g4x) /* If false, word aligned on odd word. */
97 bf+ 31, L(g0)
98 stb rCHR, 0(rMEMP0)
99 bt 30, L(aligned)
100 L(g0):
101 sth rCHR, -2(rMEMP)
102
103 /* Handle the case of size < 31. */
104 L(aligned2):
105 rlwimi rCHR, rCHR, 16, 0, 15 /* Replicate halfword to word. */
106 L(aligned):
107 mtcrf 0x01, rLEN
108 ble cr5, L(medium)
109 /* Align to 32-byte boundary. */
110 andi. rALIGN, rMEMP, 0x18
111 subfic rALIGN, rALIGN, 0x20
112 insrdi rCHR,rCHR,32,0 /* Replicate word to double word. */
113 beq L(caligned)
114 mtcrf 0x01, rALIGN
115 add rMEMP, rMEMP, rALIGN
116 sub rLEN, rLEN, rALIGN
117 cmplwi cr1, rALIGN, 0x10
118 mr rMEMP2, rMEMP
119 bf 28, L(a1)
120 stdu rCHR, -8(rMEMP2)
121 L(a1): blt cr1, L(a2)
122 std rCHR, -8(rMEMP2)
123 stdu rCHR, -16(rMEMP2)
124 L(a2):
125
126 /* Now aligned to a 32 byte boundary. */
127 L(caligned):
128 cmpldi cr1, rCHR, 0
129 clrrdi. rALIGN, rLEN, 5
130 mtcrf 0x01, rLEN
131 beq cr1, L(zloopstart) /* Special case for clearing memory using dcbz. */
132 L(nondcbz):
133 srdi rTMP, rALIGN, 5
134 mtctr rTMP
135 beq L(medium) /* We may not actually get to do a full line. */
136 clrldi. rLEN, rLEN, 59
137 add rMEMP, rMEMP, rALIGN
138 li rNEG64, -0x40
139 bdz L(cloopdone)
140
141 L(c3): dcbtst rNEG64, rMEMP
142 std rCHR, -8(rMEMP)
143 std rCHR, -16(rMEMP)
144 std rCHR, -24(rMEMP)
145 stdu rCHR, -32(rMEMP)
146 bdnz L(c3)
147 L(cloopdone):
148 std rCHR, -8(rMEMP)
149 std rCHR, -16(rMEMP)
150 cmpldi cr1, rLEN, 16
151 std rCHR, -24(rMEMP)
152 stdu rCHR, -32(rMEMP)
153 beqlr
154 add rMEMP, rMEMP, rALIGN
155 b L(medium_tail2)
156
157 .align 5
158 /* Clear lines of memory in 128-byte chunks. */
159 L(zloopstart):
160 /* If the remaining length is less the 32 bytes, don't bother getting
161 the cache line size. */
162 beq L(medium)
163 ld rCLS,.LC0@toc(r2)
164 lwz rCLS,0(rCLS)
165 /* If the cache line size was not set just goto to L(nondcbz) which is
166 safe for any cache line size. */
167 cmpldi cr1,rCLS,0
168 beq cr1,L(nondcbz)
169
170
171 /* Now we know the cache line size, and it is not 32-bytes, but
172 we may not yet be aligned to the cache line. May have a partial
173 line to fill, so touch it 1st. */
174 dcbt 0,rMEMP
175 addi rCLM,rCLS,-1
176 L(getCacheAligned):
177 cmpldi cr1,rLEN,32
178 and. rTMP,rCLM,rMEMP
179 blt cr1,L(handletail32)
180 beq L(cacheAligned)
181 addi rMEMP,rMEMP,32
182 addi rLEN,rLEN,-32
183 std rCHR,-32(rMEMP)
184 std rCHR,-24(rMEMP)
185 std rCHR,-16(rMEMP)
186 std rCHR,-8(rMEMP)
187 b L(getCacheAligned)
188
189 /* Now we are aligned to the cache line and can use dcbz. */
190 L(cacheAligned):
191 cmpld cr1,rLEN,rCLS
192 blt cr1,L(handletail32)
193 dcbz 0,rMEMP
194 subf rLEN,rCLS,rLEN
195 add rMEMP,rMEMP,rCLS
196 b L(cacheAligned)
197
198 /* We are here because the cache line size was set and was not 32-bytes
199 and the remainder (rLEN) is less than the actual cache line size.
200 So set up the preconditions for L(nondcbz) and go there. */
201 L(handletail32):
202 clrrwi. rALIGN, rLEN, 5
203 b L(nondcbz)
204
205 .align 5
206 L(small):
207 /* Memset of 8 bytes or less. */
208 cmpldi cr6, rLEN, 4
209 cmpldi cr5, rLEN, 1
210 ble cr6,L(le4)
211 subi rLEN, rLEN, 4
212 stb rCHR,0(rMEMP)
213 stb rCHR,1(rMEMP)
214 stb rCHR,2(rMEMP)
215 stb rCHR,3(rMEMP)
216 addi rMEMP,rMEMP, 4
217 cmpldi cr5, rLEN, 1
218 L(le4):
219 cmpldi cr1, rLEN, 3
220 bltlr cr5
221 stb rCHR, 0(rMEMP)
222 beqlr cr5
223 stb rCHR, 1(rMEMP)
224 bltlr cr1
225 stb rCHR, 2(rMEMP)
226 beqlr cr1
227 stb rCHR, 3(rMEMP)
228 blr
229
230 /* Memset of 0-31 bytes. */
231 .align 5
232 L(medium):
233 insrdi rCHR,rCHR,32,0 /* Replicate word to double word. */
234 cmpldi cr1, rLEN, 16
235 L(medium_tail2):
236 add rMEMP, rMEMP, rLEN
237 L(medium_tail):
238 bt- 31, L(medium_31t)
239 bt- 30, L(medium_30t)
240 L(medium_30f):
241 bt- 29, L(medium_29t)
242 L(medium_29f):
243 bge- cr1, L(medium_27t)
244 bflr- 28
245 std rCHR, -8(rMEMP)
246 blr
247
248 L(medium_31t):
249 stbu rCHR, -1(rMEMP)
250 bf- 30, L(medium_30f)
251 L(medium_30t):
252 sthu rCHR, -2(rMEMP)
253 bf- 29, L(medium_29f)
254 L(medium_29t):
255 stwu rCHR, -4(rMEMP)
256 blt- cr1, L(medium_27f)
257 L(medium_27t):
258 std rCHR, -8(rMEMP)
259 stdu rCHR, -16(rMEMP)
260 L(medium_27f):
261 bflr- 28
262 L(medium_28t):
263 std rCHR, -8(rMEMP)
264 blr
265 END_GEN_TB (BP_SYM (memset),TB_TOCLESS)
266 libc_hidden_builtin_def (memset)
267
268 /* Copied from bzero.S to prevent the linker from inserting a stub
269 between bzero and memset. */
270 ENTRY (BP_SYM (__bzero))
271 CALL_MCOUNT 3
272 #if __BOUNDED_POINTERS__
273 mr r6,r4
274 li r5,0
275 mr r4,r3
276 /* Tell memset that we don't want a return value. */
277 li r3,0
278 b L(_memset)
279 #else
280 mr r5,r4
281 li r4,0
282 b L(_memset)
283 #endif
284 END_GEN_TB (BP_SYM (__bzero),TB_TOCLESS)
285
286 weak_alias (BP_SYM (__bzero), BP_SYM (bzero))