]> git.ipfire.org Git - people/ms/linux.git/blob - include/linux/bitops.h
Merge tag 'soc-fixes-6.0-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[people/ms/linux.git] / include / linux / bitops.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_BITOPS_H
3 #define _LINUX_BITOPS_H
4
5 #include <asm/types.h>
6 #include <linux/bits.h>
7 #include <linux/typecheck.h>
8
9 #include <uapi/linux/kernel.h>
10
11 /* Set bits in the first 'n' bytes when loaded from memory */
12 #ifdef __LITTLE_ENDIAN
13 # define aligned_byte_mask(n) ((1UL << 8*(n))-1)
14 #else
15 # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
16 #endif
17
18 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
19 #define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
20 #define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
21 #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
22 #define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
23
24 extern unsigned int __sw_hweight8(unsigned int w);
25 extern unsigned int __sw_hweight16(unsigned int w);
26 extern unsigned int __sw_hweight32(unsigned int w);
27 extern unsigned long __sw_hweight64(__u64 w);
28
29 /*
30 * Defined here because those may be needed by architecture-specific static
31 * inlines.
32 */
33
34 #include <asm-generic/bitops/generic-non-atomic.h>
35
36 /*
37 * Many architecture-specific non-atomic bitops contain inline asm code and due
38 * to that the compiler can't optimize them to compile-time expressions or
39 * constants. In contrary, generic_*() helpers are defined in pure C and
40 * compilers optimize them just well.
41 * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively
42 * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when
43 * the arguments can be resolved at compile time. That expression itself is a
44 * constant and doesn't bring any functional changes to the rest of cases.
45 * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when
46 * passing a bitmap from .bss or .data (-> `!!addr` is always true).
47 */
48 #define bitop(op, nr, addr) \
49 ((__builtin_constant_p(nr) && \
50 __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \
51 (uintptr_t)(addr) != (uintptr_t)NULL && \
52 __builtin_constant_p(*(const unsigned long *)(addr))) ? \
53 const##op(nr, addr) : op(nr, addr))
54
55 #define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
56 #define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
57 #define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
58 #define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
59 #define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
60 #define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
61 #define test_bit(nr, addr) bitop(_test_bit, nr, addr)
62 #define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr)
63
64 /*
65 * Include this here because some architectures need generic_ffs/fls in
66 * scope
67 */
68 #include <asm/bitops.h>
69
70 /* Check that the bitops prototypes are sane */
71 #define __check_bitop_pr(name) \
72 static_assert(__same_type(arch_##name, generic_##name) && \
73 __same_type(const_##name, generic_##name) && \
74 __same_type(_##name, generic_##name))
75
76 __check_bitop_pr(__set_bit);
77 __check_bitop_pr(__clear_bit);
78 __check_bitop_pr(__change_bit);
79 __check_bitop_pr(__test_and_set_bit);
80 __check_bitop_pr(__test_and_clear_bit);
81 __check_bitop_pr(__test_and_change_bit);
82 __check_bitop_pr(test_bit);
83
84 #undef __check_bitop_pr
85
86 static inline int get_bitmask_order(unsigned int count)
87 {
88 int order;
89
90 order = fls(count);
91 return order; /* We could be slightly more clever with -1 here... */
92 }
93
94 static __always_inline unsigned long hweight_long(unsigned long w)
95 {
96 return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w);
97 }
98
99 /**
100 * rol64 - rotate a 64-bit value left
101 * @word: value to rotate
102 * @shift: bits to roll
103 */
104 static inline __u64 rol64(__u64 word, unsigned int shift)
105 {
106 return (word << (shift & 63)) | (word >> ((-shift) & 63));
107 }
108
109 /**
110 * ror64 - rotate a 64-bit value right
111 * @word: value to rotate
112 * @shift: bits to roll
113 */
114 static inline __u64 ror64(__u64 word, unsigned int shift)
115 {
116 return (word >> (shift & 63)) | (word << ((-shift) & 63));
117 }
118
119 /**
120 * rol32 - rotate a 32-bit value left
121 * @word: value to rotate
122 * @shift: bits to roll
123 */
124 static inline __u32 rol32(__u32 word, unsigned int shift)
125 {
126 return (word << (shift & 31)) | (word >> ((-shift) & 31));
127 }
128
129 /**
130 * ror32 - rotate a 32-bit value right
131 * @word: value to rotate
132 * @shift: bits to roll
133 */
134 static inline __u32 ror32(__u32 word, unsigned int shift)
135 {
136 return (word >> (shift & 31)) | (word << ((-shift) & 31));
137 }
138
139 /**
140 * rol16 - rotate a 16-bit value left
141 * @word: value to rotate
142 * @shift: bits to roll
143 */
144 static inline __u16 rol16(__u16 word, unsigned int shift)
145 {
146 return (word << (shift & 15)) | (word >> ((-shift) & 15));
147 }
148
149 /**
150 * ror16 - rotate a 16-bit value right
151 * @word: value to rotate
152 * @shift: bits to roll
153 */
154 static inline __u16 ror16(__u16 word, unsigned int shift)
155 {
156 return (word >> (shift & 15)) | (word << ((-shift) & 15));
157 }
158
159 /**
160 * rol8 - rotate an 8-bit value left
161 * @word: value to rotate
162 * @shift: bits to roll
163 */
164 static inline __u8 rol8(__u8 word, unsigned int shift)
165 {
166 return (word << (shift & 7)) | (word >> ((-shift) & 7));
167 }
168
169 /**
170 * ror8 - rotate an 8-bit value right
171 * @word: value to rotate
172 * @shift: bits to roll
173 */
174 static inline __u8 ror8(__u8 word, unsigned int shift)
175 {
176 return (word >> (shift & 7)) | (word << ((-shift) & 7));
177 }
178
179 /**
180 * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
181 * @value: value to sign extend
182 * @index: 0 based bit index (0<=index<32) to sign bit
183 *
184 * This is safe to use for 16- and 8-bit types as well.
185 */
186 static __always_inline __s32 sign_extend32(__u32 value, int index)
187 {
188 __u8 shift = 31 - index;
189 return (__s32)(value << shift) >> shift;
190 }
191
192 /**
193 * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
194 * @value: value to sign extend
195 * @index: 0 based bit index (0<=index<64) to sign bit
196 */
197 static __always_inline __s64 sign_extend64(__u64 value, int index)
198 {
199 __u8 shift = 63 - index;
200 return (__s64)(value << shift) >> shift;
201 }
202
203 static inline unsigned fls_long(unsigned long l)
204 {
205 if (sizeof(l) == 4)
206 return fls(l);
207 return fls64(l);
208 }
209
210 static inline int get_count_order(unsigned int count)
211 {
212 if (count == 0)
213 return -1;
214
215 return fls(--count);
216 }
217
218 /**
219 * get_count_order_long - get order after rounding @l up to power of 2
220 * @l: parameter
221 *
222 * it is same as get_count_order() but with long type parameter
223 */
224 static inline int get_count_order_long(unsigned long l)
225 {
226 if (l == 0UL)
227 return -1;
228 return (int)fls_long(--l);
229 }
230
231 /**
232 * __ffs64 - find first set bit in a 64 bit word
233 * @word: The 64 bit word
234 *
235 * On 64 bit arches this is a synonym for __ffs
236 * The result is not defined if no bits are set, so check that @word
237 * is non-zero before calling this.
238 */
239 static inline unsigned long __ffs64(u64 word)
240 {
241 #if BITS_PER_LONG == 32
242 if (((u32)word) == 0UL)
243 return __ffs((u32)(word >> 32)) + 32;
244 #elif BITS_PER_LONG != 64
245 #error BITS_PER_LONG not 32 or 64
246 #endif
247 return __ffs((unsigned long)word);
248 }
249
250 /**
251 * assign_bit - Assign value to a bit in memory
252 * @nr: the bit to set
253 * @addr: the address to start counting from
254 * @value: the value to assign
255 */
256 static __always_inline void assign_bit(long nr, volatile unsigned long *addr,
257 bool value)
258 {
259 if (value)
260 set_bit(nr, addr);
261 else
262 clear_bit(nr, addr);
263 }
264
265 static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
266 bool value)
267 {
268 if (value)
269 __set_bit(nr, addr);
270 else
271 __clear_bit(nr, addr);
272 }
273
274 /**
275 * __ptr_set_bit - Set bit in a pointer's value
276 * @nr: the bit to set
277 * @addr: the address of the pointer variable
278 *
279 * Example:
280 * void *p = foo();
281 * __ptr_set_bit(bit, &p);
282 */
283 #define __ptr_set_bit(nr, addr) \
284 ({ \
285 typecheck_pointer(*(addr)); \
286 __set_bit(nr, (unsigned long *)(addr)); \
287 })
288
289 /**
290 * __ptr_clear_bit - Clear bit in a pointer's value
291 * @nr: the bit to clear
292 * @addr: the address of the pointer variable
293 *
294 * Example:
295 * void *p = foo();
296 * __ptr_clear_bit(bit, &p);
297 */
298 #define __ptr_clear_bit(nr, addr) \
299 ({ \
300 typecheck_pointer(*(addr)); \
301 __clear_bit(nr, (unsigned long *)(addr)); \
302 })
303
304 /**
305 * __ptr_test_bit - Test bit in a pointer's value
306 * @nr: the bit to test
307 * @addr: the address of the pointer variable
308 *
309 * Example:
310 * void *p = foo();
311 * if (__ptr_test_bit(bit, &p)) {
312 * ...
313 * } else {
314 * ...
315 * }
316 */
317 #define __ptr_test_bit(nr, addr) \
318 ({ \
319 typecheck_pointer(*(addr)); \
320 test_bit(nr, (unsigned long *)(addr)); \
321 })
322
323 #ifdef __KERNEL__
324
325 #ifndef set_mask_bits
326 #define set_mask_bits(ptr, mask, bits) \
327 ({ \
328 const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \
329 typeof(*(ptr)) old__, new__; \
330 \
331 do { \
332 old__ = READ_ONCE(*(ptr)); \
333 new__ = (old__ & ~mask__) | bits__; \
334 } while (cmpxchg(ptr, old__, new__) != old__); \
335 \
336 old__; \
337 })
338 #endif
339
340 #ifndef bit_clear_unless
341 #define bit_clear_unless(ptr, clear, test) \
342 ({ \
343 const typeof(*(ptr)) clear__ = (clear), test__ = (test);\
344 typeof(*(ptr)) old__, new__; \
345 \
346 do { \
347 old__ = READ_ONCE(*(ptr)); \
348 new__ = old__ & ~clear__; \
349 } while (!(old__ & test__) && \
350 cmpxchg(ptr, old__, new__) != old__); \
351 \
352 !(old__ & test__); \
353 })
354 #endif
355
356 #endif /* __KERNEL__ */
357 #endif