]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blame - include/xfs_arch.h
xfs: map an inode's offset to an exact physical block
[thirdparty/xfsprogs-dev.git] / include / xfs_arch.h
CommitLineData
2bd0ea18 1/*
5e656dbb 2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
f302e9e4 3 * All Rights Reserved.
5000d01d 4 *
f302e9e4
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
2bd0ea18 7 * published by the Free Software Foundation.
5000d01d 8 *
f302e9e4
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
5000d01d 13 *
f302e9e4
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
2bd0ea18
NS
17 */
18#ifndef __XFS_ARCH_H__
19#define __XFS_ARCH_H__
20
f302e9e4
NS
21#if __BYTE_ORDER == __BIG_ENDIAN
22#define XFS_NATIVE_HOST 1
23#else
24#undef XFS_NATIVE_HOST
836f654f
NS
25#endif
26
1e6bc37b
CH
27#ifdef __CHECKER__
28#define __bitwise __attribute__((bitwise))
29#define __force __attribute__((force))
30#else
31#define __bitwise
32#define __force
33#endif
34
35typedef __u16 __bitwise __le16;
36typedef __u32 __bitwise __le32;
37typedef __u64 __bitwise __le64;
38
39typedef __u16 __bitwise __be16;
40typedef __u32 __bitwise __be32;
41typedef __u64 __bitwise __be64;
42
cd36c33e
CH
43/*
44 * Casts are necessary for constants, because we never know how for sure
45 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
46 */
47#define ___swab16(x) \
48({ \
49 __u16 __x = (x); \
50 ((__u16)( \
51 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
52 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
53})
54
55#define ___swab32(x) \
56({ \
57 __u32 __x = (x); \
58 ((__u32)( \
59 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
60 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
61 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
62 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
63})
64
65#define ___swab64(x) \
66({ \
67 __u64 __x = (x); \
68 ((__u64)( \
69 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
70 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
71 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
72 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
73 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
74 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
75 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
76 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
77})
78
79#define ___constant_swab16(x) \
80 ((__u16)( \
81 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
82 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
83#define ___constant_swab32(x) \
84 ((__u32)( \
85 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
86 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
87 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
88 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
89#define ___constant_swab64(x) \
90 ((__u64)( \
91 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
92 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
93 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
94 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
95 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
96 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
97 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
98 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
99
100/*
101 * provide defaults when no architecture-specific optimization is detected
102 */
103#ifndef __arch__swab16
104# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
105#endif
106#ifndef __arch__swab32
107# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
108#endif
109#ifndef __arch__swab64
110# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
111#endif
112
113#ifndef __arch__swab16p
114# define __arch__swab16p(x) __arch__swab16(*(x))
115#endif
116#ifndef __arch__swab32p
117# define __arch__swab32p(x) __arch__swab32(*(x))
118#endif
119#ifndef __arch__swab64p
120# define __arch__swab64p(x) __arch__swab64(*(x))
121#endif
122
123#ifndef __arch__swab16s
124# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
125#endif
126#ifndef __arch__swab32s
127# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
128#endif
129#ifndef __arch__swab64s
130# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
131#endif
132
133
134/*
135 * Allow constant folding
136 */
137# define __swab16(x) \
138(__builtin_constant_p((__u16)(x)) ? \
139 ___constant_swab16((x)) : \
140 __fswab16((x)))
141# define __swab32(x) \
142(__builtin_constant_p((__u32)(x)) ? \
143 ___constant_swab32((x)) : \
144 __fswab32((x)))
145# define __swab64(x) \
146(__builtin_constant_p((__u64)(x)) ? \
147 ___constant_swab64((x)) : \
148 __fswab64((x)))
149
150
151static __inline__ __u16 __fswab16(__u16 x)
152{
153 return (__extension__ __arch__swab16(x));
154}
155static __inline__ __u16 __swab16p(__u16 *x)
156{
157 return (__extension__ __arch__swab16p(x));
158}
159static __inline__ void __swab16s(__u16 *addr)
160{
161 (__extension__ ({__arch__swab16s(addr);}));
162}
163
164static __inline__ __u32 __fswab32(__u32 x)
165{
166 return (__extension__ __arch__swab32(x));
167}
168static __inline__ __u32 __swab32p(__u32 *x)
169{
170 return (__extension__ __arch__swab32p(x));
171}
172static __inline__ void __swab32s(__u32 *addr)
173{
174 (__extension__ ({__arch__swab32s(addr);}));
175}
176
177static __inline__ __u64 __fswab64(__u64 x)
178{
179# ifdef __SWAB_64_THRU_32__
180 __u32 h = x >> 32;
181 __u32 l = x & ((1ULL<<32)-1);
182 return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
183# else
184 return (__extension__ __arch__swab64(x));
185# endif
186}
187static __inline__ __u64 __swab64p(__u64 *x)
188{
189 return (__extension__ __arch__swab64p(x));
190}
191static __inline__ void __swab64s(__u64 *addr)
192{
193 (__extension__ ({__arch__swab64s(addr);}));
194}
195
7cfabea3 196#ifdef XFS_NATIVE_HOST
5e656dbb
BN
197#define cpu_to_be16(val) ((__force __be16)(__u16)(val))
198#define cpu_to_be32(val) ((__force __be32)(__u32)(val))
199#define cpu_to_be64(val) ((__force __be64)(__u64)(val))
200#define be16_to_cpu(val) ((__force __u16)(__be16)(val))
201#define be32_to_cpu(val) ((__force __u32)(__be32)(val))
202#define be64_to_cpu(val) ((__force __u64)(__be64)(val))
7e280e68
DC
203
204#define cpu_to_le32(val) ((__force __be32)__swab32((__u32)(val)))
205#define le32_to_cpu(val) (__swab32((__force __u32)(__le32)(val)))
206
207#define __constant_cpu_to_le32(val) \
208 ((__force __le32)___constant_swab32((__u32)(val)))
209#define __constant_cpu_to_be32(val) \
210 ((__force __be32)(__u32)(val))
7cfabea3 211#else
5e656dbb
BN
212#define cpu_to_be16(val) ((__force __be16)__swab16((__u16)(val)))
213#define cpu_to_be32(val) ((__force __be32)__swab32((__u32)(val)))
214#define cpu_to_be64(val) ((__force __be64)__swab64((__u64)(val)))
215#define be16_to_cpu(val) (__swab16((__force __u16)(__be16)(val)))
216#define be32_to_cpu(val) (__swab32((__force __u32)(__be32)(val)))
217#define be64_to_cpu(val) (__swab64((__force __u64)(__be64)(val)))
7e280e68
DC
218
219#define cpu_to_le32(val) ((__force __le32)(__u32)(val))
220#define le32_to_cpu(val) ((__force __u32)(__le32)(val))
221
222#define __constant_cpu_to_le32(val) \
223 ((__force __le32)(__u32)(val))
224#define __constant_cpu_to_be32(val) \
225 ((__force __be32)___constant_swab32((__u32)(val)))
7cfabea3
CH
226#endif
227
5e656dbb
BN
228static inline void be16_add_cpu(__be16 *a, __s16 b)
229{
230 *a = cpu_to_be16(be16_to_cpu(*a) + b);
231}
232
233static inline void be32_add_cpu(__be32 *a, __s32 b)
234{
235 *a = cpu_to_be32(be32_to_cpu(*a) + b);
236}
237
238static inline void be64_add_cpu(__be64 *a, __s64 b)
239{
240 *a = cpu_to_be64(be64_to_cpu(*a) + b);
241}
242
cd36c33e
CH
243static inline __uint16_t get_unaligned_be16(void *p)
244{
245 __uint8_t *__p = p;
246 return __p[0] << 8 | __p[1];
247}
248
249static inline __uint32_t get_unaligned_be32(void *p)
250{
251 __uint8_t *__p = p;
252 return __p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3];
253}
254
255static inline __uint64_t get_unaligned_be64(void *p)
256{
257 return (__uint64_t)get_unaligned_be32(p) << 32 |
258 get_unaligned_be32(p + 4);
259}
260
261static inline void put_unaligned_be16(__uint16_t val, void *p)
262{
263 __uint8_t *__p = p;
264 *__p++ = val >> 8;
265 *__p++ = val;
266}
267
268static inline void put_unaligned_be32(__uint32_t val, void *p)
269{
270 __uint8_t *__p = p;
271 put_unaligned_be16(val >> 16, __p);
272 put_unaligned_be16(val, __p + 2);
273}
274
275static inline void put_unaligned_be64(__uint64_t val, void *p)
276{
277 put_unaligned_be32(val >> 32, p);
278 put_unaligned_be32(val, p + 4);
279}
280
2bd0ea18 281#endif /* __XFS_ARCH_H__ */