/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"xchgl %0, %1"
-# if VM_ASM_PLUS
: "=r" (val),
"+m" (var->value)
: "0" (val)
-# else
- : "=r" (val),
- "=m" (var->value)
- : "0" (val),
- "1" (var->value)
-# endif
);
AtomicEpilogue();
return val;
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; cmpxchgl %2, %1"
-# if VM_ASM_PLUS
: "=a" (val),
"+m" (var->value)
: "r" (newVal),
"0" (oldVal)
-# else
- : "=a" (val),
- "=m" (var->value)
- : "r" (newVal),
- "0" (oldVal)
- /*
- * "1" (var->value): results in inconsistent constraints on gcc 2.7.2.3
- * when compiling enterprise-2.2.17-14-RH7.0-update.
- * The constraint has been commented out for now. We may consider doing
- * this systematically, but we need to be sure it is the right thing to
- * do. However, it is also possible that the offending use of this asm
- * function will be removed in the near future in which case we may
- * decide to reintroduce the constraint instead. hpreg & agesen.
- */
-# endif
: "cc"
);
AtomicEpilogue();
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; andl %1, %0"
-# if VM_ASM_PLUS
: "+m" (var->value)
: "ri" (val)
-# else
- : "=m" (var->value)
- : "ri" (val),
- "0" (var->value)
-# endif
: "cc"
);
AtomicEpilogue();
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; orl %1, %0"
-# if VM_ASM_PLUS
: "+m" (var->value)
: "ri" (val)
-# else
- : "=m" (var->value)
- : "ri" (val),
- "0" (var->value)
-# endif
: "cc"
);
AtomicEpilogue();
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; xorl %1, %0"
-# if VM_ASM_PLUS
: "+m" (var->value)
: "ri" (val)
-# else
- : "=m" (var->value)
- : "ri" (val),
- "0" (var->value)
-# endif
: "cc"
);
AtomicEpilogue();
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; addl %1, %0"
-# if VM_ASM_PLUS
: "+m" (var->value)
: "ri" (val)
-# else
- : "=m" (var->value)
- : "ri" (val),
- "0" (var->value)
-# endif
: "cc"
);
AtomicEpilogue();
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; subl %1, %0"
-# if VM_ASM_PLUS
: "+m" (var->value)
: "ri" (val)
-# else
- : "=m" (var->value)
- : "ri" (val),
- "0" (var->value)
-# endif
: "cc"
);
AtomicEpilogue();
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; incl %0"
-# if VM_ASM_PLUS
: "+m" (var->value)
:
-# else
- : "=m" (var->value)
- : "0" (var->value)
-# endif
: "cc"
);
AtomicEpilogue();
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
"lock; decl %0"
-# if VM_ASM_PLUS
: "+m" (var->value)
:
-# else
- : "=m" (var->value)
- : "0" (var->value)
-# endif
: "cc"
);
AtomicEpilogue();
#else // __arm__
/* Checked against the Intel manual and GCC --walken */
__asm__ __volatile__(
-# if VM_ASM_PLUS
"lock; xaddl %0, %1"
: "=r" (val),
"+m" (var->value)
: "0" (val)
: "cc"
-# else
- "lock; xaddl %0, (%1)"
- : "=r" (val)
- : "r" (&var->value),
- "0" (val)
- : "cc", "memory"
-# endif
);
return val;
#endif // __arm__
__asm__ __volatile__(
"lock; cmpxchg8b %0" "\n\t"
"sete %1"
-# if VM_ASM_PLUS
: "+m" (*var),
-# else
- : "=m" (*var),
-# endif
"=qm" (equal),
"=a" (dummy1),
"=d" (dummy2)
__asm__ __volatile__(
"lock; cmpxchgl %3, %0" "\n\t"
"sete %1"
-# if VM_ASM_PLUS
: "+m" (*var),
"=qm" (equal),
"=a" (dummy)
: "r" (newVal),
"2" (oldVal)
-# else
- : "=m" (*var),
- "=qm" (equal),
- "=a" (dummy)
- : /*"0" (*var), */
- "r" (newVal),
- "2" (oldVal)
-# endif
: "cc"
);
AtomicEpilogue();
#endif
+/*
+ * At present, we effectively require a compiler that is at least
+ * gcc-3.3 (circa 2003). Enforce this here, various things below
+ * this line depend upon it.
+ *
+ * In practice, most things presently compile with gcc-4.1 or gcc-4.4.
+ * The various linux kernel modules may use older (gcc-3.3) compilers.
+ */
+#if defined __GNUC__ && __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
+#error "gcc version is to old to compile assembly, need gcc-3.3 or better"
+#endif
+
+
/*
* Consider the following reasons functions are inlined:
*
* are added the inline-ness should be removed.
*/
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3)
+#if defined __GNUC__
/*
* Starting at version 3.3, gcc does not always inline functions marked
* 'inline' (it depends on their size). To force gcc to do so, one must use the
#ifdef _MSC_VER
#define NORETURN __declspec(noreturn)
-#elif __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 9)
+#elif defined __GNUC__
#define NORETURN __attribute__((__noreturn__))
#else
#define NORETURN
#endif
-/*
- * GCC 3.2 inline asm needs the + constraint for input/ouput memory operands.
- * Older GCCs don't know about it --hpreg
- */
-
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 2)
-# define VM_ASM_PLUS 1
-#else
-# define VM_ASM_PLUS 0
-#endif
-
/*
* Branch prediction hints:
* LIKELY(exp) - Expression exp is likely TRUE.
* all others we don't so we do nothing.
*/
-#if (__GNUC__ >= 3)
+#if defined __GNUC__
/*
* gcc3 uses __builtin_expect() to inform the compiler of an expected value.
* We use this to inform the static branch predictor. The '!!' in LIKELY
#define ALIGNED(n)
#endif
-/*
- * __func__ is a stringified function name that is part of the C99 standard. The block
- * below defines __func__ on older systems where the compiler does not support that
- * macro.
- */
-#if defined(__GNUC__) \
- && ((__GNUC__ == 2 && __GNUC_MINOR < 96) \
- || (__GNUC__ < 2))
-# define __func__ __FUNCTION__
-#endif
-
/*
* Once upon a time, this was used to silence compiler warnings that
* get generated when the compiler thinks that a function returns