From: VMware, Inc <> Date: Mon, 20 Dec 2010 21:41:42 +0000 (-0800) Subject: Remove pre-gcc-3.3 crud from our headers X-Git-Tag: 2010.12.19-339835~64 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=0a4eedccda657bfd8f2210487afca3d08429e47f;p=thirdparty%2Fopen-vm-tools.git Remove pre-gcc-3.3 crud from our headers gcc-3.3 itself is ancient, but some linux driver builds use it. Everything else is even more modern. This change strips out a bunch of the pre-gcc-3.3 preprocessor magic from some of the very common header files. Most notable: VM_ASM_PLUS is vacuously true. So it's gone. Signed-off-by: Marcelo Vanzin --- diff --git a/open-vm-tools/lib/include/vm_atomic.h b/open-vm-tools/lib/include/vm_atomic.h index a8615fa9f..8d680b28b 100644 --- a/open-vm-tools/lib/include/vm_atomic.h +++ b/open-vm-tools/lib/include/vm_atomic.h @@ -384,16 +384,9 @@ Atomic_ReadWrite(Atomic_uint32 *var, // IN /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "xchgl %0, %1" -# if VM_ASM_PLUS : "=r" (val), "+m" (var->value) : "0" (val) -# else - : "=r" (val), - "=m" (var->value) - : "0" (val), - "1" (var->value) -# endif ); AtomicEpilogue(); return val; @@ -466,26 +459,10 @@ Atomic_ReadIfEqualWrite(Atomic_uint32 *var, // IN /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; cmpxchgl %2, %1" -# if VM_ASM_PLUS : "=a" (val), "+m" (var->value) : "r" (newVal), "0" (oldVal) -# else - : "=a" (val), - "=m" (var->value) - : "r" (newVal), - "0" (oldVal) - /* - * "1" (var->value): results in inconsistent constraints on gcc 2.7.2.3 - * when compiling enterprise-2.2.17-14-RH7.0-update. - * The constraint has been commented out for now. We may consider doing - * this systematically, but we need to be sure it is the right thing to - * do. However, it is also possible that the offending use of this asm - * function will be removed in the near future in which case we may - * decide to reintroduce the constraint instead. hpreg & agesen. - */ -# endif : "cc" ); AtomicEpilogue(); @@ -597,14 +574,8 @@ Atomic_And(Atomic_uint32 *var, // IN /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; andl %1, %0" -# if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) -# else - : "=m" (var->value) - : "ri" (val), - "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); @@ -659,14 +630,8 @@ Atomic_Or(Atomic_uint32 *var, // IN /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; orl %1, %0" -# if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) -# else - : "=m" (var->value) - : "ri" (val), - "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); @@ -720,14 +685,8 @@ Atomic_Xor(Atomic_uint32 *var, // IN /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; xorl %1, %0" -# if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) -# else - : "=m" (var->value) - : "ri" (val), - "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); @@ -821,14 +780,8 @@ Atomic_Add(Atomic_uint32 *var, // IN /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; addl %1, %0" -# if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) -# else - : "=m" (var->value) - : "ri" (val), - "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); @@ -921,14 +874,8 @@ Atomic_Sub(Atomic_uint32 *var, // IN /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; subl %1, %0" -# if VM_ASM_PLUS : "+m" (var->value) : "ri" (val) -# else - : "=m" (var->value) - : "ri" (val), - "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); @@ -1014,13 +961,8 @@ Atomic_Inc(Atomic_uint32 *var) // IN /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; incl %0" -# if VM_ASM_PLUS : "+m" (var->value) : -# else - : "=m" (var->value) - : "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); @@ -1066,13 +1008,8 @@ Atomic_Dec(Atomic_uint32 *var) // IN /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; decl %0" -# if VM_ASM_PLUS : "+m" (var->value) : -# else - : "=m" (var->value) - : "0" (var->value) -# endif : "cc" ); AtomicEpilogue(); @@ -1246,19 +1183,11 @@ Atomic_FetchAndAddUnfenced(Atomic_uint32 *var, // IN #else // __arm__ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( -# if VM_ASM_PLUS "lock; xaddl %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) : "cc" -# else - "lock; xaddl %0, (%1)" - : "=r" (val) - : "r" (&var->value), - "0" (val) - : "cc", "memory" -# endif ); return val; #endif // __arm__ @@ -1628,11 +1557,7 @@ Atomic_CMPXCHG64(Atomic_uint64 *var, // IN/OUT __asm__ __volatile__( "lock; cmpxchg8b %0" "\n\t" "sete %1" -# if VM_ASM_PLUS : "+m" (*var), -# else - : "=m" (*var), -# endif "=qm" (equal), "=a" (dummy1), "=d" (dummy2) @@ -1716,20 +1641,11 @@ Atomic_CMPXCHG32(Atomic_uint32 *var, // IN/OUT __asm__ __volatile__( "lock; cmpxchgl %3, %0" "\n\t" "sete %1" -# if VM_ASM_PLUS : "+m" (*var), "=qm" (equal), "=a" (dummy) : "r" (newVal), "2" (oldVal) -# else - : "=m" (*var), - "=qm" (equal), - "=a" (dummy) - : /*"0" (*var), */ - "r" (newVal), - "2" (oldVal) -# endif : "cc" ); AtomicEpilogue(); diff --git a/open-vm-tools/lib/include/vm_basic_asm.h b/open-vm-tools/lib/include/vm_basic_asm.h index 94354878a..0d3e822fb 100644 --- a/open-vm-tools/lib/include/vm_basic_asm.h +++ b/open-vm-tools/lib/include/vm_basic_asm.h @@ -801,14 +801,8 @@ SetBit32(uint32 *var, unsigned index) #ifdef __GNUC__ __asm__ ( "bts %1, %0" -# if VM_ASM_PLUS : "+mr" (*var) : "ri" (index) -# else - : "=mr" (*var) - : "ri" (index), - "0" (*var) -# endif : "cc" ); #elif defined(_MSC_VER) @@ -822,14 +816,8 @@ ClearBit32(uint32 *var, unsigned index) #ifdef __GNUC__ __asm__ ( "btr %1, %0" -# if VM_ASM_PLUS : "+mr" (*var) : "ri" (index) -# else - : "=mr" (*var) - : "ri" (index), - "0" (*var) -# endif : "cc" ); #elif defined(_MSC_VER) diff --git a/open-vm-tools/lib/include/vm_basic_types.h b/open-vm-tools/lib/include/vm_basic_types.h index be15f292f..fbb2f03a4 100644 --- a/open-vm-tools/lib/include/vm_basic_types.h +++ b/open-vm-tools/lib/include/vm_basic_types.h @@ -647,6 +647,19 @@ typedef void * UserVA; #endif +/* + * At present, we effectively require a compiler that is at least + * gcc-3.3 (circa 2003). Enforce this here, various things below + * this line depend upon it. + * + * In practice, most things presently compile with gcc-4.1 or gcc-4.4. + * The various linux kernel modules may use older (gcc-3.3) compilers. + */ +#if defined __GNUC__ && __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) +#error "gcc version is to old to compile assembly, need gcc-3.3 or better" +#endif + + /* * Consider the following reasons functions are inlined: * @@ -659,7 +672,7 @@ typedef void * UserVA; * are added the inline-ness should be removed. */ -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 3) +#if defined __GNUC__ /* * Starting at version 3.3, gcc does not always inline functions marked * 'inline' (it depends on their size). To force gcc to do so, one must use the @@ -687,23 +700,12 @@ typedef void * UserVA; #ifdef _MSC_VER #define NORETURN __declspec(noreturn) -#elif __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 9) +#elif defined __GNUC__ #define NORETURN __attribute__((__noreturn__)) #else #define NORETURN #endif -/* - * GCC 3.2 inline asm needs the + constraint for input/ouput memory operands. - * Older GCCs don't know about it --hpreg - */ - -#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 2) -# define VM_ASM_PLUS 1 -#else -# define VM_ASM_PLUS 0 -#endif - /* * Branch prediction hints: * LIKELY(exp) - Expression exp is likely TRUE. @@ -717,7 +719,7 @@ typedef void * UserVA; * all others we don't so we do nothing. */ -#if (__GNUC__ >= 3) +#if defined __GNUC__ /* * gcc3 uses __builtin_expect() to inform the compiler of an expected value. * We use this to inform the static branch predictor. The '!!' in LIKELY @@ -801,17 +803,6 @@ typedef void * UserVA; #define ALIGNED(n) #endif -/* - * __func__ is a stringified function name that is part of the C99 standard. The block - * below defines __func__ on older systems where the compiler does not support that - * macro. - */ -#if defined(__GNUC__) \ - && ((__GNUC__ == 2 && __GNUC_MINOR < 96) \ - || (__GNUC__ < 2)) -# define __func__ __FUNCTION__ -#endif - /* * Once upon a time, this was used to silence compiler warnings that * get generated when the compiler thinks that a function returns