]> git.ipfire.org Git - people/ms/u-boot.git/blob - arch/blackfin/include/asm/bitops.h
ab1fea55c87f0d603f1b957a7ac48952d7c1c590
[people/ms/u-boot.git] / arch / blackfin / include / asm / bitops.h
1 /*
2 * U-boot - bitops.h Routines for bit operations
3 *
4 * Copyright (c) 2005-2007 Analog Devices Inc.
5 *
6 * See file CREDITS for list of people who contributed to this
7 * project.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of
12 * the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
22 * MA 02110-1301 USA
23 */
24
25 #ifndef _BLACKFIN_BITOPS_H
26 #define _BLACKFIN_BITOPS_H
27
28 /*
29 * Copyright 1992, Linus Torvalds.
30 */
31
32 #include <linux/config.h>
33 #include <asm/byteorder.h>
34 #include <asm/system.h>
35
36 #ifdef __KERNEL__
37 /*
38 * Function prototypes to keep gcc -Wall happy
39 */
40
41 /*
42 * The __ functions are not atomic
43 */
44
45 /*
46 * ffz = Find First Zero in word. Undefined if no zero exists,
47 * so code should check against ~0UL first..
48 */
49 static __inline__ unsigned long ffz(unsigned long word)
50 {
51 unsigned long result = 0;
52
53 while (word & 1) {
54 result++;
55 word >>= 1;
56 }
57 return result;
58 }
59
60 static __inline__ void set_bit(int nr, volatile void *addr)
61 {
62 int *a = (int *)addr;
63 int mask;
64 unsigned long flags;
65
66 a += nr >> 5;
67 mask = 1 << (nr & 0x1f);
68 local_irq_save(flags);
69 *a |= mask;
70 local_irq_restore(flags);
71 }
72
73 static __inline__ void __set_bit(int nr, volatile void *addr)
74 {
75 int *a = (int *)addr;
76 int mask;
77
78 a += nr >> 5;
79 mask = 1 << (nr & 0x1f);
80 *a |= mask;
81 }
82 #define PLATFORM__SET_BIT
83
84 /*
85 * clear_bit() doesn't provide any barrier for the compiler.
86 */
87 #define smp_mb__before_clear_bit() barrier()
88 #define smp_mb__after_clear_bit() barrier()
89
90 static __inline__ void clear_bit(int nr, volatile void *addr)
91 {
92 int *a = (int *)addr;
93 int mask;
94 unsigned long flags;
95
96 a += nr >> 5;
97 mask = 1 << (nr & 0x1f);
98 local_irq_save(flags);
99 *a &= ~mask;
100 local_irq_restore(flags);
101 }
102
103 static __inline__ void change_bit(int nr, volatile void *addr)
104 {
105 int mask, flags;
106 unsigned long *ADDR = (unsigned long *)addr;
107
108 ADDR += nr >> 5;
109 mask = 1 << (nr & 31);
110 local_irq_save(flags);
111 *ADDR ^= mask;
112 local_irq_restore(flags);
113 }
114
115 static __inline__ void __change_bit(int nr, volatile void *addr)
116 {
117 int mask;
118 unsigned long *ADDR = (unsigned long *)addr;
119
120 ADDR += nr >> 5;
121 mask = 1 << (nr & 31);
122 *ADDR ^= mask;
123 }
124
125 static __inline__ int test_and_set_bit(int nr, volatile void *addr)
126 {
127 int mask, retval;
128 volatile unsigned int *a = (volatile unsigned int *)addr;
129 unsigned long flags;
130
131 a += nr >> 5;
132 mask = 1 << (nr & 0x1f);
133 local_irq_save(flags);
134 retval = (mask & *a) != 0;
135 *a |= mask;
136 local_irq_restore(flags);
137
138 return retval;
139 }
140
141 static __inline__ int __test_and_set_bit(int nr, volatile void *addr)
142 {
143 int mask, retval;
144 volatile unsigned int *a = (volatile unsigned int *)addr;
145
146 a += nr >> 5;
147 mask = 1 << (nr & 0x1f);
148 retval = (mask & *a) != 0;
149 *a |= mask;
150 return retval;
151 }
152
153 static __inline__ int test_and_clear_bit(int nr, volatile void *addr)
154 {
155 int mask, retval;
156 volatile unsigned int *a = (volatile unsigned int *)addr;
157 unsigned long flags;
158
159 a += nr >> 5;
160 mask = 1 << (nr & 0x1f);
161 local_irq_save(flags);
162 retval = (mask & *a) != 0;
163 *a &= ~mask;
164 local_irq_restore(flags);
165
166 return retval;
167 }
168
169 static __inline__ int __test_and_clear_bit(int nr, volatile void *addr)
170 {
171 int mask, retval;
172 volatile unsigned int *a = (volatile unsigned int *)addr;
173
174 a += nr >> 5;
175 mask = 1 << (nr & 0x1f);
176 retval = (mask & *a) != 0;
177 *a &= ~mask;
178 return retval;
179 }
180
181 static __inline__ int test_and_change_bit(int nr, volatile void *addr)
182 {
183 int mask, retval;
184 volatile unsigned int *a = (volatile unsigned int *)addr;
185 unsigned long flags;
186
187 a += nr >> 5;
188 mask = 1 << (nr & 0x1f);
189 local_irq_save(flags);
190 retval = (mask & *a) != 0;
191 *a ^= mask;
192 local_irq_restore(flags);
193
194 return retval;
195 }
196
197 static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
198 {
199 int mask, retval;
200 volatile unsigned int *a = (volatile unsigned int *)addr;
201
202 a += nr >> 5;
203 mask = 1 << (nr & 0x1f);
204 retval = (mask & *a) != 0;
205 *a ^= mask;
206 return retval;
207 }
208
209 /*
210 * This routine doesn't need to be atomic.
211 */
212 static __inline__ int __constant_test_bit(int nr, const volatile void *addr)
213 {
214 return ((1UL << (nr & 31)) &
215 (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
216 }
217
218 static __inline__ int __test_bit(int nr, volatile void *addr)
219 {
220 int *a = (int *)addr;
221 int mask;
222
223 a += nr >> 5;
224 mask = 1 << (nr & 0x1f);
225 return ((mask & *a) != 0);
226 }
227
228 #define test_bit(nr,addr) \
229 (__builtin_constant_p(nr) ? \
230 __constant_test_bit((nr),(addr)) : \
231 __test_bit((nr),(addr)))
232
233 #define find_first_zero_bit(addr, size) \
234 find_next_zero_bit((addr), (size), 0)
235
236 static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
237 {
238 unsigned long *p = ((unsigned long *)addr) + (offset >> 5);
239 unsigned long result = offset & ~31UL;
240 unsigned long tmp;
241
242 if (offset >= size)
243 return size;
244 size -= result;
245 offset &= 31UL;
246 if (offset) {
247 tmp = *(p++);
248 tmp |= ~0UL >> (32 - offset);
249 if (size < 32)
250 goto found_first;
251 if (~tmp)
252 goto found_middle;
253 size -= 32;
254 result += 32;
255 }
256 while (size & ~31UL) {
257 if (~(tmp = *(p++)))
258 goto found_middle;
259 result += 32;
260 size -= 32;
261 }
262 if (!size)
263 return result;
264 tmp = *p;
265
266 found_first:
267 tmp |= ~0UL >> size;
268 found_middle:
269 return result + ffz(tmp);
270 }
271
272 /*
273 * hweightN: returns the hamming weight (i.e. the number
274 * of bits set) of a N-bit word
275 */
276
277 #define hweight32(x) generic_hweight32(x)
278 #define hweight16(x) generic_hweight16(x)
279 #define hweight8(x) generic_hweight8(x)
280
281 static __inline__ int ext2_set_bit(int nr, volatile void *addr)
282 {
283 int mask, retval;
284 unsigned long flags;
285 volatile unsigned char *ADDR = (unsigned char *)addr;
286
287 ADDR += nr >> 3;
288 mask = 1 << (nr & 0x07);
289 local_irq_save(flags);
290 retval = (mask & *ADDR) != 0;
291 *ADDR |= mask;
292 local_irq_restore(flags);
293 return retval;
294 }
295
296 static __inline__ int ext2_clear_bit(int nr, volatile void *addr)
297 {
298 int mask, retval;
299 unsigned long flags;
300 volatile unsigned char *ADDR = (unsigned char *)addr;
301
302 ADDR += nr >> 3;
303 mask = 1 << (nr & 0x07);
304 local_irq_save(flags);
305 retval = (mask & *ADDR) != 0;
306 *ADDR &= ~mask;
307 local_irq_restore(flags);
308 return retval;
309 }
310
311 static __inline__ int ext2_test_bit(int nr, const volatile void *addr)
312 {
313 int mask;
314 const volatile unsigned char *ADDR = (const unsigned char *)addr;
315
316 ADDR += nr >> 3;
317 mask = 1 << (nr & 0x07);
318 return ((mask & *ADDR) != 0);
319 }
320
321 #define ext2_find_first_zero_bit(addr, size) \
322 ext2_find_next_zero_bit((addr), (size), 0)
323
324 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
325 unsigned long size,
326 unsigned long offset)
327 {
328 unsigned long *p = ((unsigned long *)addr) + (offset >> 5);
329 unsigned long result = offset & ~31UL;
330 unsigned long tmp;
331
332 if (offset >= size)
333 return size;
334 size -= result;
335 offset &= 31UL;
336 if (offset) {
337 tmp = *(p++);
338 tmp |= ~0UL >> (32 - offset);
339 if (size < 32)
340 goto found_first;
341 if (~tmp)
342 goto found_middle;
343 size -= 32;
344 result += 32;
345 }
346 while (size & ~31UL) {
347 if (~(tmp = *(p++)))
348 goto found_middle;
349 result += 32;
350 size -= 32;
351 }
352 if (!size)
353 return result;
354 tmp = *p;
355
356 found_first:
357 tmp |= ~0UL >> size;
358 found_middle:
359 return result + ffz(tmp);
360 }
361
362 /* Bitmap functions for the minix filesystem. */
363 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
364 #define minix_set_bit(nr,addr) set_bit(nr,addr)
365 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
366 #define minix_test_bit(nr,addr) test_bit(nr,addr)
367 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
368
369 #endif
370
371 #endif