]>
git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - include/xfs_arch.h
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #ifndef __XFS_ARCH_H__
19 #define __XFS_ARCH_H__
21 #if __BYTE_ORDER == __BIG_ENDIAN
22 #define XFS_NATIVE_HOST 1
24 #undef XFS_NATIVE_HOST
29 # define __bitwise __attribute__((bitwise))
31 #define __force __attribute__((force))
39 typedef __u16 __bitwise __le16
;
40 typedef __u32 __bitwise __le32
;
41 typedef __u64 __bitwise __le64
;
43 typedef __u16 __bitwise __be16
;
44 typedef __u32 __bitwise __be32
;
45 typedef __u64 __bitwise __be64
;
48 * Casts are necessary for constants, because we never know how for sure
49 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
51 #define ___swab16(x) \
55 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \
56 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \
59 #define ___swab32(x) \
63 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \
64 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \
65 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \
66 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \
69 #define ___swab64(x) \
73 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \
74 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \
75 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \
76 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \
77 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \
78 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
79 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \
80 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \
83 #define ___constant_swab16(x) \
85 (((__u16)(x) & (__u16)0x00ffU) << 8) | \
86 (((__u16)(x) & (__u16)0xff00U) >> 8) ))
87 #define ___constant_swab32(x) \
89 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
90 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
91 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
92 (((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
93 #define ___constant_swab64(x) \
95 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
96 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
97 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
98 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
99 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
100 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
101 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
102 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
105 * provide defaults when no architecture-specific optimization is detected
107 #ifndef __arch__swab16
108 # define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); })
110 #ifndef __arch__swab32
111 # define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); })
113 #ifndef __arch__swab64
114 # define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); })
117 #ifndef __arch__swab16p
118 # define __arch__swab16p(x) __arch__swab16(*(x))
120 #ifndef __arch__swab32p
121 # define __arch__swab32p(x) __arch__swab32(*(x))
123 #ifndef __arch__swab64p
124 # define __arch__swab64p(x) __arch__swab64(*(x))
127 #ifndef __arch__swab16s
128 # define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0)
130 #ifndef __arch__swab32s
131 # define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0)
133 #ifndef __arch__swab64s
134 # define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0)
139 * Allow constant folding
141 # define __swab16(x) \
142 (__builtin_constant_p((__u16)(x)) ? \
143 ___constant_swab16((x)) : \
145 # define __swab32(x) \
146 (__builtin_constant_p((__u32)(x)) ? \
147 ___constant_swab32((x)) : \
149 # define __swab64(x) \
150 (__builtin_constant_p((__u64)(x)) ? \
151 ___constant_swab64((x)) : \
155 static __inline__ __u16
__fswab16(__u16 x
)
157 return (__extension__
__arch__swab16(x
));
159 static __inline__ __u16
__swab16p(__u16
*x
)
161 return (__extension__
__arch__swab16p(x
));
163 static __inline__
void __swab16s(__u16
*addr
)
165 (__extension__ ({__arch__swab16s(addr
);}));
168 static __inline__ __u32
__fswab32(__u32 x
)
170 return (__extension__
__arch__swab32(x
));
172 static __inline__ __u32
__swab32p(__u32
*x
)
174 return (__extension__
__arch__swab32p(x
));
176 static __inline__
void __swab32s(__u32
*addr
)
178 (__extension__ ({__arch__swab32s(addr
);}));
181 static __inline__ __u64
__fswab64(__u64 x
)
183 # ifdef __SWAB_64_THRU_32__
185 __u32 l
= x
& ((1ULL<<32)-1);
186 return (((__u64
)__swab32(l
)) << 32) | ((__u64
)(__swab32(h
)));
188 return (__extension__
__arch__swab64(x
));
191 static __inline__ __u64
__swab64p(__u64
*x
)
193 return (__extension__
__arch__swab64p(x
));
195 static __inline__
void __swab64s(__u64
*addr
)
197 (__extension__ ({__arch__swab64s(addr
);}));
200 #ifdef XFS_NATIVE_HOST
201 #define cpu_to_be16(val) ((__force __be16)(__u16)(val))
202 #define cpu_to_be32(val) ((__force __be32)(__u32)(val))
203 #define cpu_to_be64(val) ((__force __be64)(__u64)(val))
204 #define be16_to_cpu(val) ((__force __u16)(__be16)(val))
205 #define be32_to_cpu(val) ((__force __u32)(__be32)(val))
206 #define be64_to_cpu(val) ((__force __u64)(__be64)(val))
208 #define cpu_to_le32(val) ((__force __be32)__swab32((__u32)(val)))
209 #define le32_to_cpu(val) (__swab32((__force __u32)(__le32)(val)))
211 #define __constant_cpu_to_le32(val) \
212 ((__force __le32)___constant_swab32((__u32)(val)))
213 #define __constant_cpu_to_be32(val) \
214 ((__force __be32)(__u32)(val))
216 #define cpu_to_be16(val) ((__force __be16)__swab16((__u16)(val)))
217 #define cpu_to_be32(val) ((__force __be32)__swab32((__u32)(val)))
218 #define cpu_to_be64(val) ((__force __be64)__swab64((__u64)(val)))
219 #define be16_to_cpu(val) (__swab16((__force __u16)(__be16)(val)))
220 #define be32_to_cpu(val) (__swab32((__force __u32)(__be32)(val)))
221 #define be64_to_cpu(val) (__swab64((__force __u64)(__be64)(val)))
223 #define cpu_to_le32(val) ((__force __le32)(__u32)(val))
224 #define le32_to_cpu(val) ((__force __u32)(__le32)(val))
226 #define __constant_cpu_to_le32(val) \
227 ((__force __le32)(__u32)(val))
228 #define __constant_cpu_to_be32(val) \
229 ((__force __be32)___constant_swab32((__u32)(val)))
232 static inline void be16_add_cpu(__be16
*a
, __s16 b
)
234 *a
= cpu_to_be16(be16_to_cpu(*a
) + b
);
237 static inline void be32_add_cpu(__be32
*a
, __s32 b
)
239 *a
= cpu_to_be32(be32_to_cpu(*a
) + b
);
242 static inline void be64_add_cpu(__be64
*a
, __s64 b
)
244 *a
= cpu_to_be64(be64_to_cpu(*a
) + b
);
247 static inline uint16_t get_unaligned_be16(void *p
)
250 return __p
[0] << 8 | __p
[1];
253 static inline uint32_t get_unaligned_be32(void *p
)
256 return (uint32_t)__p
[0] << 24 | __p
[1] << 16 | __p
[2] << 8 | __p
[3];
259 static inline uint64_t get_unaligned_be64(void *p
)
261 return (uint64_t)get_unaligned_be32(p
) << 32 |
262 get_unaligned_be32(p
+ 4);
265 static inline void put_unaligned_be16(uint16_t val
, void *p
)
272 static inline void put_unaligned_be32(uint32_t val
, void *p
)
275 put_unaligned_be16(val
>> 16, __p
);
276 put_unaligned_be16(val
, __p
+ 2);
279 static inline void put_unaligned_be64(uint64_t val
, void *p
)
281 put_unaligned_be32(val
>> 32, p
);
282 put_unaligned_be32(val
, p
+ 4);
285 #endif /* __XFS_ARCH_H__ */