]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - include/xfs_arch.h
xfs: detect agfl count corruption and reset agfl
[thirdparty/xfsprogs-dev.git] / include / xfs_arch.h
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #ifndef __XFS_ARCH_H__
19 #define __XFS_ARCH_H__
20
21 #if __BYTE_ORDER == __BIG_ENDIAN
22 #define XFS_NATIVE_HOST 1
23 #else
24 #undef XFS_NATIVE_HOST
25 #endif
26
27 #ifdef __CHECKER__
28 # ifndef __bitwise
29 # define __bitwise __attribute__((bitwise))
30 # endif
31 #define __force __attribute__((force))
32 #else
33 # ifndef __bitwise
34 # define __bitwise
35 # endif
36 #define __force
37 #endif
38
39 typedef __u16 __bitwise __le16;
40 typedef __u32 __bitwise __le32;
41 typedef __u64 __bitwise __le64;
42
43 typedef __u16 __bitwise __be16;
44 typedef __u32 __bitwise __be32;
45 typedef __u64 __bitwise __be64;
46
47 /*
48 * Casts are necessary for constants, because we never know how for sure
49 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
50 */
51 #define ___swab16(x) \
52 ({ \
53 __u16 __x = (x); \
54 ((__u16)( \
55 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
56 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
57 })
58
59 #define ___swab32(x) \
60 ({ \
61 __u32 __x = (x); \
62 ((__u32)( \
63 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
64 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
65 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
66 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
67 })
68
69 #define ___swab64(x) \
70 ({ \
71 __u64 __x = (x); \
72 ((__u64)( \
73 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
74 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
75 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
76 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
77 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
78 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
79 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
80 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
81 })
82
83 #define ___constant_swab16(x) \
84 ((__u16)( \
85 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
86 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
87 #define ___constant_swab32(x) \
88 ((__u32)( \
89 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
90 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
91 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
92 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
93 #define ___constant_swab64(x) \
94 ((__u64)( \
95 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
96 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
97 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
98 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
99 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
100 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
101 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
102 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
103
104 /*
105 * provide defaults when no architecture-specific optimization is detected
106 */
107 #ifndef __arch__swab16
108 # define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
109 #endif
110 #ifndef __arch__swab32
111 # define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
112 #endif
113 #ifndef __arch__swab64
114 # define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
115 #endif
116
117 #ifndef __arch__swab16p
118 # define __arch__swab16p(x) __arch__swab16(*(x))
119 #endif
120 #ifndef __arch__swab32p
121 # define __arch__swab32p(x) __arch__swab32(*(x))
122 #endif
123 #ifndef __arch__swab64p
124 # define __arch__swab64p(x) __arch__swab64(*(x))
125 #endif
126
127 #ifndef __arch__swab16s
128 # define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
129 #endif
130 #ifndef __arch__swab32s
131 # define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
132 #endif
133 #ifndef __arch__swab64s
134 # define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
135 #endif
136
137
138 /*
139 * Allow constant folding
140 */
141 # define __swab16(x) \
142 (__builtin_constant_p((__u16)(x)) ? \
143 ___constant_swab16((x)) : \
144 __fswab16((x)))
145 # define __swab32(x) \
146 (__builtin_constant_p((__u32)(x)) ? \
147 ___constant_swab32((x)) : \
148 __fswab32((x)))
149 # define __swab64(x) \
150 (__builtin_constant_p((__u64)(x)) ? \
151 ___constant_swab64((x)) : \
152 __fswab64((x)))
153
154
155 static __inline__ __u16 __fswab16(__u16 x)
156 {
157 return (__extension__ __arch__swab16(x));
158 }
159 static __inline__ __u16 __swab16p(__u16 *x)
160 {
161 return (__extension__ __arch__swab16p(x));
162 }
163 static __inline__ void __swab16s(__u16 *addr)
164 {
165 (__extension__ ({__arch__swab16s(addr);}));
166 }
167
168 static __inline__ __u32 __fswab32(__u32 x)
169 {
170 return (__extension__ __arch__swab32(x));
171 }
172 static __inline__ __u32 __swab32p(__u32 *x)
173 {
174 return (__extension__ __arch__swab32p(x));
175 }
176 static __inline__ void __swab32s(__u32 *addr)
177 {
178 (__extension__ ({__arch__swab32s(addr);}));
179 }
180
181 static __inline__ __u64 __fswab64(__u64 x)
182 {
183 # ifdef __SWAB_64_THRU_32__
184 __u32 h = x >> 32;
185 __u32 l = x & ((1ULL<<32)-1);
186 return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
187 # else
188 return (__extension__ __arch__swab64(x));
189 # endif
190 }
191 static __inline__ __u64 __swab64p(__u64 *x)
192 {
193 return (__extension__ __arch__swab64p(x));
194 }
195 static __inline__ void __swab64s(__u64 *addr)
196 {
197 (__extension__ ({__arch__swab64s(addr);}));
198 }
199
200 #ifdef XFS_NATIVE_HOST
201 #define cpu_to_be16(val) ((__force __be16)(__u16)(val))
202 #define cpu_to_be32(val) ((__force __be32)(__u32)(val))
203 #define cpu_to_be64(val) ((__force __be64)(__u64)(val))
204 #define be16_to_cpu(val) ((__force __u16)(__be16)(val))
205 #define be32_to_cpu(val) ((__force __u32)(__be32)(val))
206 #define be64_to_cpu(val) ((__force __u64)(__be64)(val))
207
208 #define cpu_to_le32(val) ((__force __be32)__swab32((__u32)(val)))
209 #define le32_to_cpu(val) (__swab32((__force __u32)(__le32)(val)))
210
211 #define __constant_cpu_to_le32(val) \
212 ((__force __le32)___constant_swab32((__u32)(val)))
213 #define __constant_cpu_to_be32(val) \
214 ((__force __be32)(__u32)(val))
215 #else
216 #define cpu_to_be16(val) ((__force __be16)__swab16((__u16)(val)))
217 #define cpu_to_be32(val) ((__force __be32)__swab32((__u32)(val)))
218 #define cpu_to_be64(val) ((__force __be64)__swab64((__u64)(val)))
219 #define be16_to_cpu(val) (__swab16((__force __u16)(__be16)(val)))
220 #define be32_to_cpu(val) (__swab32((__force __u32)(__be32)(val)))
221 #define be64_to_cpu(val) (__swab64((__force __u64)(__be64)(val)))
222
223 #define cpu_to_le32(val) ((__force __le32)(__u32)(val))
224 #define le32_to_cpu(val) ((__force __u32)(__le32)(val))
225
226 #define __constant_cpu_to_le32(val) \
227 ((__force __le32)(__u32)(val))
228 #define __constant_cpu_to_be32(val) \
229 ((__force __be32)___constant_swab32((__u32)(val)))
230 #endif
231
232 static inline void be16_add_cpu(__be16 *a, __s16 b)
233 {
234 *a = cpu_to_be16(be16_to_cpu(*a) + b);
235 }
236
237 static inline void be32_add_cpu(__be32 *a, __s32 b)
238 {
239 *a = cpu_to_be32(be32_to_cpu(*a) + b);
240 }
241
242 static inline void be64_add_cpu(__be64 *a, __s64 b)
243 {
244 *a = cpu_to_be64(be64_to_cpu(*a) + b);
245 }
246
247 static inline uint16_t get_unaligned_be16(void *p)
248 {
249 uint8_t *__p = p;
250 return __p[0] << 8 | __p[1];
251 }
252
253 static inline uint32_t get_unaligned_be32(void *p)
254 {
255 uint8_t *__p = p;
256 return (uint32_t)__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3];
257 }
258
259 static inline uint64_t get_unaligned_be64(void *p)
260 {
261 return (uint64_t)get_unaligned_be32(p) << 32 |
262 get_unaligned_be32(p + 4);
263 }
264
265 static inline void put_unaligned_be16(uint16_t val, void *p)
266 {
267 uint8_t *__p = p;
268 *__p++ = val >> 8;
269 *__p++ = val;
270 }
271
272 static inline void put_unaligned_be32(uint32_t val, void *p)
273 {
274 uint8_t *__p = p;
275 put_unaligned_be16(val >> 16, __p);
276 put_unaligned_be16(val, __p + 2);
277 }
278
279 static inline void put_unaligned_be64(uint64_t val, void *p)
280 {
281 put_unaligned_be32(val >> 32, p);
282 put_unaligned_be32(val, p + 4);
283 }
284
285 #endif /* __XFS_ARCH_H__ */