+++ /dev/null
-#ifndef _UNALIGNED_H_
-#define _UNALIGNED_H_
-
-/*
- * For the benefit of those who are trying to port Linux to another
- * architecture, here are some C-language equivalents.
- *
- * This is based almost entirely upon Richard Henderson's
- * asm-alpha/unaligned.h implementation. Some comments were
- * taken from David Mosberger's asm-ia64/unaligned.h header.
- */
-
-/*
- * The main single-value unaligned transfer routines.
- */
-#define get_unaligned(ptr) \
- __get_unaligned((ptr), sizeof(*(ptr)))
-#define put_unaligned(x,ptr) \
- __put_unaligned((__u64)(x), (ptr), sizeof(*(ptr)))
-
-/*
- * This function doesn't actually exist. The idea is that when
- * someone uses the macros below with an unsupported size (datatype),
- * the linker will alert us to the problem via an unresolved reference
- * error.
- */
-extern void bad_unaligned_access_length(void) __attribute__((noreturn));
-
-struct __una_u64 { __u64 x __attribute__((packed)); };
-struct __una_u32 { __u32 x __attribute__((packed)); };
-struct __una_u16 { __u16 x __attribute__((packed)); };
-
-/*
- * Elemental unaligned loads
- */
-
-static inline __u64 __uldq(const __u64 *addr)
-{
- const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
- return ptr->x;
-}
-
-static inline __u32 __uldl(const __u32 *addr)
-{
- const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
- return ptr->x;
-}
-
-static inline __u16 __uldw(const __u16 *addr)
-{
- const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
- return ptr->x;
-}
-
-/*
- * Elemental unaligned stores
- */
-
-static inline void __ustq(__u64 val, __u64 *addr)
-{
- struct __una_u64 *ptr = (struct __una_u64 *) addr;
- ptr->x = val;
-}
-
-static inline void __ustl(__u32 val, __u32 *addr)
-{
- struct __una_u32 *ptr = (struct __una_u32 *) addr;
- ptr->x = val;
-}
-
-static inline void __ustw(__u16 val, __u16 *addr)
-{
- struct __una_u16 *ptr = (struct __una_u16 *) addr;
- ptr->x = val;
-}
-
-#define __get_unaligned(ptr, size) ({ \
- const void *__gu_p = ptr; \
- __u64 val; \
- switch (size) { \
- case 1: \
- val = *(const __u8 *)__gu_p; \
- break; \
- case 2: \
- val = __uldw(__gu_p); \
- break; \
- case 4: \
- val = __uldl(__gu_p); \
- break; \
- case 8: \
- val = __uldq(__gu_p); \
- break; \
- default: \
- bad_unaligned_access_length(); \
- }; \
- (__typeof__(*(ptr)))val; \
-})
-
-#define __put_unaligned(val, ptr, size) \
-do { \
- void *__gu_p = ptr; \
- switch (size) { \
- case 1: \
- *(__u8 *)__gu_p = val; \
- break; \
- case 2: \
- __ustw(val, __gu_p); \
- break; \
- case 4: \
- __ustl(val, __gu_p); \
- break; \
- case 8: \
- __ustq(val, __gu_p); \
- break; \
- default: \
- bad_unaligned_access_length(); \
- }; \
-} while(0)
-
-#endif /* _UNALIGNED_H */
#define max_t(type,x,y) \
({ type __x = (x); type __y = (y); __x > __y ? __x: __y; })
-/* only 64 bit accesses used in xfs kernel code */
-static inline __u64 get_unaligned_be64(void *ptr)
+
+static inline __uint32_t __get_unaligned_be32(const __uint8_t *p)
+{
+ return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+}
+
+static inline __uint64_t get_unaligned_be64(void *p)
{
- __be64 __tmp;
- memmove(&__tmp, ptr, 8);
- return be64_to_cpu(__tmp);
+ return (__uint64_t)__get_unaligned_be32(p) << 32 |
+ __get_unaligned_be32(p + 4);
}
-static inline void put_unaligned(__be64 val, void *ptr)
+static inline void __put_unaligned_be16(__uint16_t val, __uint8_t *p)
{
- memmove(ptr, &val, 8);
+ *p++ = val >> 8;
+ *p++ = val;
}
+static inline void __put_unaligned_be32(__uint32_t val, __uint8_t *p)
+{
+ __put_unaligned_be16(val >> 16, p);
+ __put_unaligned_be16(val, p + 2);
+}
+
+static inline void put_unaligned_be64(__uint64_t val, void *p)
+{
+ __put_unaligned_be32(val >> 32, p);
+ __put_unaligned_be32(val, p + 4);
+}
+
+
static inline __attribute__((const))
int is_power_of_2(unsigned long n)
{