++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
++{
++ v->counter = i;
++}
+
+ /*
+ * atomic_add - add integer to atomic variable
+@@ -47,7 +79,67 @@
+ *
+ * Atomically adds @i to @v.
+ */
+-static __inline__ void atomic_add(int i, atomic_t * v)
++static __inline__ void atomic_add(int i, atomic_t *v)
++{
++ int temp;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %0, %1 # atomic_add \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: add %0, %2 \n"
++#else
++ " addu %0, %2 \n"
++#endif
++ " sc %0, %1 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %0, %1 # atomic_add \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: add %0, %2 \n"
++#else
++ " addu %0, %2 \n"
++#endif
++ " sc %0, %1 \n"
++ " beqz %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: add %0, %1 \n"
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#else
++ " addu %0, %1 \n"
++#endif
++ : "+r" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++}
++
++static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
+ {
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ int temp;
+@@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
+ *
+ * Atomically subtracts @i from @v.
+ */
+-static __inline__ void atomic_sub(int i, atomic_t * v)
++static __inline__ void atomic_sub(int i, atomic_t *v)
++{
++ int temp;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %0, %1 # atomic64_sub \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: sub %0, %2 \n"
++#else
++ " subu %0, %2 \n"
++#endif
++ " sc %0, %1 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %0, %1 # atomic64_sub \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: sub %0, %2 \n"
++#else
++ " subu %0, %2 \n"
++#endif
++ " sc %0, %1 \n"
++ " beqz %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: sub %0, %1 \n"
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#else
++ " subu %0, %1 \n"
++#endif
++ : "+r" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++}
++
++static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
+ {
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ int temp;
+@@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
+ /*
+ * Same as above, but return the result value
+ */
+-static __inline__ int atomic_add_return(int i, atomic_t * v)
++static __inline__ int atomic_add_return(int i, atomic_t *v)
++{
++ int result;
++ int temp;
++
++ smp_mb__before_llsc();
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %1, %2 # atomic_add_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: add %0, %1, %3 \n"
++#else
++ " addu %0, %1, %3 \n"
++#endif
++ " sc %0, %2 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " b 4f \n"
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: addu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %1, %2 # atomic_add_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: add %0, %1, %3 \n"
++#else
++ " addu %0, %1, %3 \n"
++#endif
++ " sc %0, %2 \n"
++ " bnez %0, 4f \n"
++ " b 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: addu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++ " lw %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: add %0, %2 \n"
++#else
++ " addu %0, %2 \n"
++#endif
++ " sw %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Note: Dest reg is not modified on overflow */
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#endif
++ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++
++ smp_llsc_mb();
++
++ return result;
++}
++
++static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
+ {
+ int result;
+
+@@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
+ return result;
+ }
+
+-static __inline__ int atomic_sub_return(int i, atomic_t * v)
++static __inline__ int atomic_sub_return(int i, atomic_t *v)
++{
++ int result;
++ int temp;
++
++ smp_mb__before_llsc();
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %1, %2 # atomic_sub_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: sub %0, %1, %3 \n"
++#else
++ " subu %0, %1, %3 \n"
++#endif
++ " sc %0, %2 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " b 4f \n"
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: subu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
++ : "Ir" (i), "m" (v->counter)
++ : "memory");
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: ll %1, %2 # atomic_sub_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: sub %0, %1, %3 \n"
++#else
++ " subu %0, %1, %3 \n"
++#endif
++ " sc %0, %2 \n"
++ " bnez %0, 4f \n"
++ " b 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: subu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++ " lw %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: sub %0, %2 \n"
++#else
++ " subu %0, %2 \n"
++#endif
++ " sw %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Note: Dest reg is not modified on overflow */
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#endif
++ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++
++ smp_llsc_mb();
++
++ return result;
++}
++static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
+ {
+ int result;
+
+@@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+-static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
++static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
+ {
+ int result;
+
+@@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
+ return result;
+ }
+
+-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
++ int new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline int atomic_xchg(atomic_t *v, int new)
++{
++ return xchg(&v->counter, new);
++}
++
++static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
++{
++ return xchg(&(v->counter), new);
++}
+
+ /**
+ * __atomic_add_unless - add unless the number is a given value
+@@ -324,6 +666,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+
+ #define atomic_dec_return(v) atomic_sub_return(1, (v))
+ #define atomic_inc_return(v) atomic_add_return(1, (v))
++#define atomic_inc_return_unchecked(v) atomic_add_return_unchecked(1, (v))
+
+ /*
+ * atomic_sub_and_test - subtract value from variable and test result
+@@ -345,6 +688,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * other cases.
+ */
+ #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
++#define atomic_inc_and_test_unchecked(v) (atomic_add_return_unchecked(1, (v)) == 0)
+
+ /*
+ * atomic_dec_and_test - decrement by 1 and test
+@@ -369,6 +713,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically increments @v by 1.
+ */
+ #define atomic_inc(v) atomic_add(1, (v))
++#define atomic_inc_unchecked(v) atomic_add_unchecked(1, (v))
+
+ /*
+ * atomic_dec - decrement and test
+@@ -377,6 +722,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * Atomically decrements @v by 1.
+ */
+ #define atomic_dec(v) atomic_sub(1, (v))
++#define atomic_dec_unchecked(v) atomic_sub_return_unchecked(1, (v))
+
+ /*
+ * atomic_add_negative - add and test if negative
+@@ -398,14 +744,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ * @v: pointer of type atomic64_t
+ *
+ */
+-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
++static inline long atomic64_read(const atomic64_t *v)
++{
++ return (*(volatile const long *) &v->counter);
++}
++
++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
++{
++ return (*(volatile const long *) &v->counter);
++}
+
+ /*
+ * atomic64_set - set atomic variable
+ * @v: pointer of type atomic64_t
+ * @i: required value
+ */
+-#define atomic64_set(v, i) ((v)->counter = (i))
++static inline void atomic64_set(atomic64_t *v, long i)
++{
++ v->counter = i;
++}
++
++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
++{
++ v->counter = i;
++}
+
+ /*
+ * atomic64_add - add integer to atomic variable
+@@ -414,7 +776,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
+ *
+ * Atomically adds @i to @v.
+ */
+-static __inline__ void atomic64_add(long i, atomic64_t * v)
++static __inline__ void atomic64_add(long i, atomic64_t *v)
++{
++ long temp;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %0, %1 # atomic64_add \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: dadd %0, %2 \n"
++#else
++ " daddu %0, %2 \n"
++#endif
++ " scd %0, %1 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %0, %1 # atomic64_add \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: dadd %0, %2 \n"
++#else
++ " daddu %0, %2 \n"
++#endif
++ " scd %0, %1 \n"
++ " beqz %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: dadd %0, %1 \n"
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#else
++ " daddu %0, %1 \n"
++#endif
++ : "+r" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++}
++static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
+ {
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ long temp;
+@@ -457,7 +878,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
+ *
+ * Atomically subtracts @i from @v.
+ */
+-static __inline__ void atomic64_sub(long i, atomic64_t * v)
++static __inline__ void atomic64_sub(long i, atomic64_t *v)
++{
++ long temp;
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %0, %1 # atomic64_sub \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: dsub %0, %2 \n"
++#else
++ " dsubu %0, %2 \n"
++#endif
++ " scd %0, %1 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %0, %1 # atomic64_sub \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "2: dsub %0, %2 \n"
++#else
++ " dsubu %0, %2 \n"
++#endif
++ " scd %0, %1 \n"
++ " beqz %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "3: \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ " .set mips0 \n"
++ : "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: dsub %0, %1 \n"
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#else
++ " dsubu %0, %1 \n"
++#endif
++ : "+r" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++}
++
++static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
+ {
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ long temp;
+@@ -496,7 +977,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
+ /*
+ * Same as above, but return the result value
+ */
+-static __inline__ long atomic64_add_return(long i, atomic64_t * v)
++static __inline__ long atomic64_add_return(long i, atomic64_t *v)
++{
++ long result;
++ long temp;
++
++ smp_mb__before_llsc();
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %1, %2 # atomic64_add_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: dadd %0, %1, %3 \n"
++#else
++ " daddu %0, %1, %3 \n"
++#endif
++ " scd %0, %2 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " b 4f \n"
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: daddu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "+m" (v->counter)
++ : "Ir" (i));
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %1, %2 # atomic64_add_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: dadd %0, %1, %3 \n"
++#else
++ " daddu %0, %1, %3 \n"
++#endif
++ " scd %0, %2 \n"
++ " bnez %0, 4f \n"
++ " b 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: daddu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
++ : "Ir" (i), "m" (v->counter)
++ : "memory");
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++ " ld %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: dadd %0, %2 \n"
++#else
++ " daddu %0, %2 \n"
++#endif
++ " sd %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Note: Dest reg is not modified on overflow */
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#endif
++ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++
++ smp_llsc_mb();
++
++ return result;
++}
++static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
+ {
+ long result;
+
+@@ -546,7 +1113,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
+ return result;
+ }
+
+-static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
++static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
++{
++ long result;
++ long temp;
++
++ smp_mb__before_llsc();
++
++ if (kernel_uses_llsc && R10000_LLSC_WAR) {
++ long temp;
++
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %1, %2 # atomic64_sub_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: dsub %0, %1, %3 \n"
++#else
++ " dsubu %0, %1, %3 \n"
++#endif
++ " scd %0, %2 \n"
++ " beqzl %0, 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " b 4f \n"
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: dsubu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
++ : "Ir" (i), "m" (v->counter)
++ : "memory");
++ } else if (kernel_uses_llsc) {
++ __asm__ __volatile__(
++ " .set mips3 \n"
++ "1: lld %1, %2 # atomic64_sub_return \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "2: dsub %0, %1, %3 \n"
++#else
++ " dsubu %0, %1, %3 \n"
++#endif
++ " scd %0, %2 \n"
++ " bnez %0, 4f \n"
++ " b 1b \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ " .set noreorder \n"
++ "3: b 5f \n"
++ " move %0, %1 \n"
++ " .set reorder \n"
++ _ASM_EXTABLE(2b, 3b)
++#endif
++ "4: dsubu %0, %1, %3 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ "5: \n"
++#endif
++ " .set mips0 \n"
++ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
++ : "Ir" (i), "m" (v->counter)
++ : "memory");
++ } else {
++ unsigned long flags;
++
++ raw_local_irq_save(flags);
++ __asm__ __volatile__(
++ " ld %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Exception on overflow. */
++ "1: dsub %0, %2 \n"
++#else
++ " dsubu %0, %2 \n"
++#endif
++ " sd %0, %1 \n"
++#ifdef CONFIG_PAX_REFCOUNT
++ /* Note: Dest reg is not modified on overflow */
++ "2: \n"
++ _ASM_EXTABLE(1b, 2b)
++#endif
++ : "=&r" (result), "+m" (v->counter) : "Ir" (i));
++ raw_local_irq_restore(flags);
++ }
++
++ smp_llsc_mb();
++
++ return result;
++}
++
++static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
+ {
+ long result;
+
+@@ -605,7 +1262,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
+ */
+-static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
++static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
+ {
+ long result;
+
+@@ -662,9 +1319,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
+ return result;
+ }
+
+-#define atomic64_cmpxchg(v, o, n) \
+- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
+-#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
++static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
++{
++ return cmpxchg(&v->counter, old, new);
++}
++
++static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
++ long new)
++{
++ return cmpxchg(&(v->counter), old, new);
++}
++
++static inline long atomic64_xchg(atomic64_t *v, long new)
++{
++ return xchg(&v->counter, new);
++}
++
++static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
++{
++ return xchg(&(v->counter), new);
++}
+
+ /**
+ * atomic64_add_unless - add unless the number is a given value
+@@ -694,6 +1368,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+
+ #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+ #define atomic64_inc_return(v) atomic64_add_return(1, (v))
++#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
+
+ /*
+ * atomic64_sub_and_test - subtract value from variable and test result
+@@ -715,6 +1390,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * other cases.
+ */
+ #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
++#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
+
+ /*
+ * atomic64_dec_and_test - decrement by 1 and test
+@@ -739,6 +1415,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically increments @v by 1.
+ */
+ #define atomic64_inc(v) atomic64_add(1, (v))
++#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
+
+ /*
+ * atomic64_dec - decrement and test
+@@ -747,6 +1424,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+ * Atomically decrements @v by 1.
+ */
+ #define atomic64_dec(v) atomic64_sub(1, (v))
++#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))