]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - include/xfs_arch.h
xfs: account only rmapbt-used blocks against rmapbt perag res
[thirdparty/xfsprogs-dev.git] / include / xfs_arch.h
CommitLineData
2bd0ea18 1/*
5e656dbb 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
f302e9e4 3 * All Rights Reserved.
5000d01d 4 *
f302e9e4
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
2bd0ea18 7 * published by the Free Software Foundation.
5000d01d 8 *
f302e9e4
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
5000d01d 13 *
f302e9e4
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
2bd0ea18
NS
17 */
18#ifndef __XFS_ARCH_H__
19#define __XFS_ARCH_H__
20
f302e9e4
NS
21#if __BYTE_ORDER == __BIG_ENDIAN
22#define XFS_NATIVE_HOST 1
23#else
24#undef XFS_NATIVE_HOST
836f654f
NS
25#endif
26
1e6bc37b 27#ifdef __CHECKER__
84a083b5
DW
28# ifndef __bitwise
29# define __bitwise __attribute__((bitwise))
30# endif
1e6bc37b
CH
31#define __force __attribute__((force))
32#else
84a083b5
DW
33# ifndef __bitwise
34# define __bitwise
35# endif
1e6bc37b
CH
36#define __force
37#endif
38
39typedef __u16 __bitwise __le16;
40typedef __u32 __bitwise __le32;
41typedef __u64 __bitwise __le64;
42
43typedef __u16 __bitwise __be16;
44typedef __u32 __bitwise __be32;
45typedef __u64 __bitwise __be64;
46
cd36c33e
CH
47/*
48 * Casts are necessary for constants, because we never know how for sure
49 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
50 */
51#define ___swab16(x) \
52({ \
53 __u16 __x = (x); \
54 ((__u16)( \
55 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
56 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
57})
58
59#define ___swab32(x) \
60({ \
61 __u32 __x = (x); \
62 ((__u32)( \
63 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
64 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
65 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
66 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
67})
68
69#define ___swab64(x) \
70({ \
71 __u64 __x = (x); \
72 ((__u64)( \
73 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
74 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
75 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
76 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
77 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
78 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
79 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
80 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
81})
82
83#define ___constant_swab16(x) \
84 ((__u16)( \
85 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
86 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
87#define ___constant_swab32(x) \
88 ((__u32)( \
89 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
90 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
91 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
92 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
93#define ___constant_swab64(x) \
94 ((__u64)( \
95 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
96 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
97 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
98 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
99 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
100 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
101 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
102 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
103
104/*
105 * provide defaults when no architecture-specific optimization is detected
106 */
107#ifndef __arch__swab16
108# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
109#endif
110#ifndef __arch__swab32
111# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
112#endif
113#ifndef __arch__swab64
114# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
115#endif
116
117#ifndef __arch__swab16p
118# define __arch__swab16p(x) __arch__swab16(*(x))
119#endif
120#ifndef __arch__swab32p
121# define __arch__swab32p(x) __arch__swab32(*(x))
122#endif
123#ifndef __arch__swab64p
124# define __arch__swab64p(x) __arch__swab64(*(x))
125#endif
126
127#ifndef __arch__swab16s
128# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
129#endif
130#ifndef __arch__swab32s
131# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
132#endif
133#ifndef __arch__swab64s
134# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
135#endif
136
137
138/*
139 * Allow constant folding
140 */
141# define __swab16(x) \
142(__builtin_constant_p((__u16)(x)) ? \
143 ___constant_swab16((x)) : \
144 __fswab16((x)))
145# define __swab32(x) \
146(__builtin_constant_p((__u32)(x)) ? \
147 ___constant_swab32((x)) : \
148 __fswab32((x)))
149# define __swab64(x) \
150(__builtin_constant_p((__u64)(x)) ? \
151 ___constant_swab64((x)) : \
152 __fswab64((x)))
153
154
155static __inline__ __u16 __fswab16(__u16 x)
156{
157 return (__extension__ __arch__swab16(x));
158}
159static __inline__ __u16 __swab16p(__u16 *x)
160{
161 return (__extension__ __arch__swab16p(x));
162}
163static __inline__ void __swab16s(__u16 *addr)
164{
165 (__extension__ ({__arch__swab16s(addr);}));
166}
167
168static __inline__ __u32 __fswab32(__u32 x)
169{
170 return (__extension__ __arch__swab32(x));
171}
172static __inline__ __u32 __swab32p(__u32 *x)
173{
174 return (__extension__ __arch__swab32p(x));
175}
176static __inline__ void __swab32s(__u32 *addr)
177{
178 (__extension__ ({__arch__swab32s(addr);}));
179}
180
181static __inline__ __u64 __fswab64(__u64 x)
182{
183# ifdef __SWAB_64_THRU_32__
184 __u32 h = x >> 32;
185 __u32 l = x & ((1ULL<<32)-1);
186 return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
187# else
188 return (__extension__ __arch__swab64(x));
189# endif
190}
191static __inline__ __u64 __swab64p(__u64 *x)
192{
193 return (__extension__ __arch__swab64p(x));
194}
195static __inline__ void __swab64s(__u64 *addr)
196{
197 (__extension__ ({__arch__swab64s(addr);}));
198}
199
7cfabea3 200#ifdef XFS_NATIVE_HOST
5e656dbb
BN
201#define cpu_to_be16(val) ((__force __be16)(__u16)(val))
202#define cpu_to_be32(val) ((__force __be32)(__u32)(val))
203#define cpu_to_be64(val) ((__force __be64)(__u64)(val))
204#define be16_to_cpu(val) ((__force __u16)(__be16)(val))
205#define be32_to_cpu(val) ((__force __u32)(__be32)(val))
206#define be64_to_cpu(val) ((__force __u64)(__be64)(val))
7e280e68
DC
207
208#define cpu_to_le32(val) ((__force __be32)__swab32((__u32)(val)))
209#define le32_to_cpu(val) (__swab32((__force __u32)(__le32)(val)))
210
211#define __constant_cpu_to_le32(val) \
212 ((__force __le32)___constant_swab32((__u32)(val)))
213#define __constant_cpu_to_be32(val) \
214 ((__force __be32)(__u32)(val))
7cfabea3 215#else
5e656dbb
BN
216#define cpu_to_be16(val) ((__force __be16)__swab16((__u16)(val)))
217#define cpu_to_be32(val) ((__force __be32)__swab32((__u32)(val)))
218#define cpu_to_be64(val) ((__force __be64)__swab64((__u64)(val)))
219#define be16_to_cpu(val) (__swab16((__force __u16)(__be16)(val)))
220#define be32_to_cpu(val) (__swab32((__force __u32)(__be32)(val)))
221#define be64_to_cpu(val) (__swab64((__force __u64)(__be64)(val)))
7e280e68
DC
222
223#define cpu_to_le32(val) ((__force __le32)(__u32)(val))
224#define le32_to_cpu(val) ((__force __u32)(__le32)(val))
225
226#define __constant_cpu_to_le32(val) \
227 ((__force __le32)(__u32)(val))
228#define __constant_cpu_to_be32(val) \
229 ((__force __be32)___constant_swab32((__u32)(val)))
7cfabea3
CH
230#endif
231
5e656dbb
BN
232static inline void be16_add_cpu(__be16 *a, __s16 b)
233{
234 *a = cpu_to_be16(be16_to_cpu(*a) + b);
235}
236
237static inline void be32_add_cpu(__be32 *a, __s32 b)
238{
239 *a = cpu_to_be32(be32_to_cpu(*a) + b);
240}
241
242static inline void be64_add_cpu(__be64 *a, __s64 b)
243{
244 *a = cpu_to_be64(be64_to_cpu(*a) + b);
245}
246
14f8b681 247static inline uint16_t get_unaligned_be16(void *p)
cd36c33e 248{
14f8b681 249 uint8_t *__p = p;
cd36c33e
CH
250 return __p[0] << 8 | __p[1];
251}
252
14f8b681 253static inline uint32_t get_unaligned_be32(void *p)
cd36c33e 254{
14f8b681 255 uint8_t *__p = p;
b0e515d6 256 return (uint32_t)__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3];
cd36c33e
CH
257}
258
14f8b681 259static inline uint64_t get_unaligned_be64(void *p)
cd36c33e 260{
14f8b681 261 return (uint64_t)get_unaligned_be32(p) << 32 |
cd36c33e
CH
262 get_unaligned_be32(p + 4);
263}
264
14f8b681 265static inline void put_unaligned_be16(uint16_t val, void *p)
cd36c33e 266{
14f8b681 267 uint8_t *__p = p;
cd36c33e
CH
268 *__p++ = val >> 8;
269 *__p++ = val;
270}
271
14f8b681 272static inline void put_unaligned_be32(uint32_t val, void *p)
cd36c33e 273{
14f8b681 274 uint8_t *__p = p;
cd36c33e
CH
275 put_unaligned_be16(val >> 16, __p);
276 put_unaligned_be16(val, __p + 2);
277}
278
14f8b681 279static inline void put_unaligned_be64(uint64_t val, void *p)
cd36c33e
CH
280{
281 put_unaligned_be32(val >> 32, p);
282 put_unaligned_be32(val, p + 4);
283}
284
2bd0ea18 285#endif /* __XFS_ARCH_H__ */