]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - include/xfs_arch.h
bump version to 2.2, document changes -- sync shared headers, EVMS stripe
[thirdparty/xfsprogs-dev.git] / include / xfs_arch.h
1 /*
2 * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32 #ifndef __XFS_ARCH_H__
33 #define __XFS_ARCH_H__
34
35 #ifndef XFS_BIG_FILESYSTEMS
36 #error XFS_BIG_FILESYSTEMS must be defined true or false
37 #endif
38
39 #ifdef __KERNEL__
40
41 #include <asm/byteorder.h>
42
43 #ifdef __LITTLE_ENDIAN
44 # define __BYTE_ORDER __LITTLE_ENDIAN
45 #endif
46 #ifdef __BIG_ENDIAN
47 # define __BYTE_ORDER __BIG_ENDIAN
48 #endif
49
50 #endif /* __KERNEL__ */
51
52 /* do we need conversion? */
53
54 #define ARCH_NOCONVERT 1
55 #if __BYTE_ORDER == __LITTLE_ENDIAN
56 #define ARCH_CONVERT 0
57 #else
58 #define ARCH_CONVERT ARCH_NOCONVERT
59 #endif
60
61 /* generic swapping macros */
62
63 #define INT_SWAP16(type,var) ((typeof(type))(__swab16((__u16)(var))))
64 #define INT_SWAP32(type,var) ((typeof(type))(__swab32((__u32)(var))))
65 #define INT_SWAP64(type,var) ((typeof(type))(__swab64((__u64)(var))))
66
67 #define INT_SWAP(type, var) \
68 ((sizeof(type) == 8) ? INT_SWAP64(type,var) : \
69 ((sizeof(type) == 4) ? INT_SWAP32(type,var) : \
70 ((sizeof(type) == 2) ? INT_SWAP16(type,var) : \
71 (var))))
72
73 #define INT_SWAP_UNALIGNED_32(from,to) \
74 { \
75 ((__u8*)(to))[0] = ((__u8*)(from))[3]; \
76 ((__u8*)(to))[1] = ((__u8*)(from))[2]; \
77 ((__u8*)(to))[2] = ((__u8*)(from))[1]; \
78 ((__u8*)(to))[3] = ((__u8*)(from))[0]; \
79 }
80
81 #define INT_SWAP_UNALIGNED_64(from,to) \
82 { \
83 INT_SWAP_UNALIGNED_32( ((__u8*)(from)) + 4, ((__u8*)(to))); \
84 INT_SWAP_UNALIGNED_32( ((__u8*)(from)), ((__u8*)(to)) + 4); \
85 }
86
87 /*
88 * get and set integers from potentially unaligned locations
89 */
90
91 #define INT_GET_UNALIGNED_16_LE(pointer) \
92 ((__u16)((((__u8*)(pointer))[0] ) | (((__u8*)(pointer))[1] << 8 )))
93 #define INT_GET_UNALIGNED_16_BE(pointer) \
94 ((__u16)((((__u8*)(pointer))[0] << 8) | (((__u8*)(pointer))[1])))
95 #define INT_SET_UNALIGNED_16_LE(pointer,value) \
96 { \
97 ((__u8*)(pointer))[0] = (((value) ) & 0xff); \
98 ((__u8*)(pointer))[1] = (((value) >> 8) & 0xff); \
99 }
100 #define INT_SET_UNALIGNED_16_BE(pointer,value) \
101 { \
102 ((__u8*)(pointer))[0] = (((value) >> 8) & 0xff); \
103 ((__u8*)(pointer))[1] = (((value) ) & 0xff); \
104 }
105
106 #define INT_GET_UNALIGNED_32_LE(pointer) \
107 ((__u32)((((__u8*)(pointer))[0] ) | (((__u8*)(pointer))[1] << 8 ) \
108 |(((__u8*)(pointer))[2] << 16) | (((__u8*)(pointer))[3] << 24)))
109 #define INT_GET_UNALIGNED_32_BE(pointer) \
110 ((__u32)((((__u8*)(pointer))[0] << 24) | (((__u8*)(pointer))[1] << 16) \
111 |(((__u8*)(pointer))[2] << 8) | (((__u8*)(pointer))[3] )))
112
113 #define INT_GET_UNALIGNED_64_LE(pointer) \
114 (((__u64)(INT_GET_UNALIGNED_32_LE(((__u8*)(pointer))+4)) << 32 ) \
115 |((__u64)(INT_GET_UNALIGNED_32_LE(((__u8*)(pointer)) )) ))
116 #define INT_GET_UNALIGNED_64_BE(pointer) \
117 (((__u64)(INT_GET_UNALIGNED_32_BE(((__u8*)(pointer)) )) << 32 ) \
118 |((__u64)(INT_GET_UNALIGNED_32_BE(((__u8*)(pointer))+4)) ))
119
120 /*
121 * now pick the right ones for our MACHINE ARCHITECTURE
122 */
123
124 #if __BYTE_ORDER == __LITTLE_ENDIAN
125 #define INT_GET_UNALIGNED_16(pointer) INT_GET_UNALIGNED_16_LE(pointer)
126 #define INT_SET_UNALIGNED_16(pointer,value) INT_SET_UNALIGNED_16_LE(pointer,value)
127 #define INT_GET_UNALIGNED_32(pointer) INT_GET_UNALIGNED_32_LE(pointer)
128 #define INT_GET_UNALIGNED_64(pointer) INT_GET_UNALIGNED_64_LE(pointer)
129 #else
130 #define INT_GET_UNALIGNED_16(pointer) INT_GET_UNALIGNED_16_BE(pointer)
131 #define INT_SET_UNALIGNED_16(pointer,value) INT_SET_UNALIGNED_16_BE(pointer,value)
132 #define INT_GET_UNALIGNED_32(pointer) INT_GET_UNALIGNED_32_BE(pointer)
133 #define INT_GET_UNALIGNED_64(pointer) INT_GET_UNALIGNED_64_BE(pointer)
134 #endif
135
136 /* define generic INT_ macros */
137
138 #define INT_GET(reference,arch) \
139 (((arch) == ARCH_NOCONVERT) \
140 ? \
141 (reference) \
142 : \
143 INT_SWAP((reference),(reference)) \
144 )
145
146 /* does not return a value */
147 #define INT_SET(reference,arch,valueref) \
148 (__builtin_constant_p(valueref) ? \
149 (void)( (reference) = ( ((arch) != ARCH_NOCONVERT) ? (INT_SWAP((reference),(valueref))) : (valueref)) ) : \
150 (void)( \
151 ((reference) = (valueref)), \
152 ( ((arch) != ARCH_NOCONVERT) ? (reference) = INT_SWAP((reference),(reference)) : 0 ) \
153 ) \
154 )
155
156 /* does not return a value */
157 #define INT_MOD_EXPR(reference,arch,code) \
158 (void)(((arch) == ARCH_NOCONVERT) \
159 ? \
160 ((reference) code) \
161 : \
162 ( \
163 (reference) = INT_GET((reference),arch) , \
164 ((reference) code), \
165 INT_SET(reference, arch, reference) \
166 ) \
167 )
168
169 /* does not return a value */
170 #define INT_MOD(reference,arch,delta) \
171 (void)( \
172 INT_MOD_EXPR(reference,arch,+=(delta)) \
173 )
174
175 /*
176 * INT_COPY - copy a value between two locations with the
177 * _same architecture_ but _potentially different sizes_
178 *
179 * if the types of the two parameters are equal or they are
180 * in native architecture, a simple copy is done
181 *
182 * otherwise, architecture conversions are done
183 *
184 */
185
186 /* does not return a value */
187 #define INT_COPY(dst,src,arch) \
188 (void)( \
189 ((sizeof(dst) == sizeof(src)) || ((arch) == ARCH_NOCONVERT)) \
190 ? \
191 ((dst) = (src)) \
192 : \
193 INT_SET(dst, arch, INT_GET(src, arch)) \
194 )
195
196 /*
197 * INT_XLATE - copy a value in either direction between two locations
198 * with different architectures
199 *
200 * dir < 0 - copy from memory to buffer (native to arch)
201 * dir > 0 - copy from buffer to memory (arch to native)
202 */
203
204 /* does not return a value */
205 #define INT_XLATE(buf,mem,dir,arch) {\
206 ASSERT(dir); \
207 if (dir>0) { \
208 (mem)=INT_GET(buf, arch); \
209 } else { \
210 INT_SET(buf, arch, mem); \
211 } \
212 }
213
214 #define INT_ISZERO(reference,arch) \
215 ((reference) == 0)
216
217 #define INT_ZERO(reference,arch) \
218 ((reference) = 0)
219
220 #define INT_GET_UNALIGNED_16_ARCH(pointer,arch) \
221 ( ((arch) == ARCH_NOCONVERT) \
222 ? \
223 (INT_GET_UNALIGNED_16(pointer)) \
224 : \
225 (INT_GET_UNALIGNED_16_BE(pointer)) \
226 )
227 #define INT_SET_UNALIGNED_16_ARCH(pointer,value,arch) \
228 if ((arch) == ARCH_NOCONVERT) { \
229 INT_SET_UNALIGNED_16(pointer,value); \
230 } else { \
231 INT_SET_UNALIGNED_16_BE(pointer,value); \
232 }
233
234 #define DIRINO4_GET_ARCH(pointer,arch) \
235 ( ((arch) == ARCH_NOCONVERT) \
236 ? \
237 (INT_GET_UNALIGNED_32(pointer)) \
238 : \
239 (INT_GET_UNALIGNED_32_BE(pointer)) \
240 )
241
242 #if XFS_BIG_FILESYSTEMS
243 #define DIRINO_GET_ARCH(pointer,arch) \
244 ( ((arch) == ARCH_NOCONVERT) \
245 ? \
246 (INT_GET_UNALIGNED_64(pointer)) \
247 : \
248 (INT_GET_UNALIGNED_64_BE(pointer)) \
249 )
250 #else
251 /* MACHINE ARCHITECTURE dependent */
252 #if __BYTE_ORDER == __LITTLE_ENDIAN
253 #define DIRINO_GET_ARCH(pointer,arch) \
254 DIRINO4_GET_ARCH((((__u8*)pointer)+4),arch)
255 #else
256 #define DIRINO_GET_ARCH(pointer,arch) \
257 DIRINO4_GET_ARCH(pointer,arch)
258 #endif
259 #endif
260
261 #define DIRINO_COPY_ARCH(from,to,arch) \
262 if ((arch) == ARCH_NOCONVERT) { \
263 bcopy(from,to,sizeof(xfs_ino_t)); \
264 } else { \
265 INT_SWAP_UNALIGNED_64(from,to); \
266 }
267 #define DIRINO4_COPY_ARCH(from,to,arch) \
268 if ((arch) == ARCH_NOCONVERT) { \
269 bcopy((((__u8*)from+4)),to,sizeof(xfs_dir2_ino4_t)); \
270 } else { \
271 INT_SWAP_UNALIGNED_32(from,to); \
272 }
273
274 #endif /* __XFS_ARCH_H__ */