-/* Copyright (C) 2002 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, write to the Free
- Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- 02111-1307 USA. */
+ License along with the GNU C Library; if not, see
+ <http://www.gnu.org/licenses/>. */
#include <sysdep.h>
-
-#define SYS_futex 240
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-#ifndef UP
-# define LOCK lock
-#else
-# define LOCK
-#endif
-
-#define CURR_EVENT 0
-#define MUTEX 4
-#define LEFT 8
-#define INIT_COUNT 12
-
+#include <lowlevellock.h>
+#include <lowlevelbarrier.h>
.text
.type pthread_barrier_wait,@function
.align 16
pthread_barrier_wait:
- pushl %esi
+ cfi_startproc
pushl %ebx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
- movl 12(%esp), %ebx
- xorl %esi, %esi
+ movl 8(%esp), %ebx
/* Get the mutex. */
- orl $-1, %eax
+ movl $1, %edx
+ xorl %eax, %eax
LOCK
- xaddl %eax, MUTEX(%ebx)
- jne 1f
+ cmpxchgl %edx, MUTEX(%ebx)
+ jnz 1f
/* One less waiter. If this was the last one needed wake
everybody. */
-2: decl LEFT(%ebx)
+2: subl $1, LEFT(%ebx)
je 3f
/* There are more threads to come. */
+ pushl %esi
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -12)
+
+#if CURR_EVENT == 0
+ movl (%ebx), %edx
+#else
movl CURR_EVENT(%ebx), %edx
+#endif
/* Release the mutex. */
LOCK
- incl MUTEX(%ebx)
- jng 6f
+ subl $1, MUTEX(%ebx)
+ jne 6f
/* Wait for the remaining threads. The call will return immediately
if the CURR_EVENT memory has meanwhile been changed. */
-7: movl %esi, %ecx /* movl $FUTEX_WAIT, %ecx */
+7:
+#if FUTEX_WAIT == 0
+ movl PRIVATE(%ebx), %ecx
+#else
+ movl $FUTEX_WAIT, %ecx
+ orl PRIVATE(%ebx), %ecx
+#endif
+ xorl %esi, %esi
8: movl $SYS_futex, %eax
- int $0x80
+ ENTER_KERNEL
/* Don't return on spurious wakeups. The syscall does not change
any register except %eax so there is no need to reload any of
them. */
+#if CURR_EVENT == 0
+ cmpl %edx, (%ebx)
+#else
cmpl %edx, CURR_EVENT(%ebx)
- je,pn 8b
+#endif
+ je 8b
+
+ /* Increment LEFT. If this brings the count back to the
+ initial count unlock the object. */
+ movl $1, %edx
+ movl INIT_COUNT(%ebx), %ecx
+ LOCK
+ xaddl %edx, LEFT(%ebx)
+ subl $1, %ecx
+ cmpl %ecx, %edx
+ jne 10f
+
+ /* Release the mutex. We cannot release the lock before
+ waking the waiting threads since otherwise a new thread might
+ arrive and gets waken up, too. */
+ LOCK
+ subl $1, MUTEX(%ebx)
+ jne 9f
/* Note: %esi is still zero. */
- movl %esi, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */
+10: movl %esi, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */
- popl %ebx
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%esi)
+ popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+
/* The necessary number of threads arrived. */
-3: movl INIT_COUNT(%ebx), %eax
- movl %eax, LEFT(%ebx)
- incl CURR_EVENT(%ebx)
+3:
+#if CURR_EVENT == 0
+ addl $1, (%ebx)
+#else
+ addl $1, CURR_EVENT(%ebx)
+#endif
/* Wake up all waiters. The count is a signed number in the kernel
so 0x7fffffff is the highest value. */
movl $0x7fffffff, %edx
movl $FUTEX_WAKE, %ecx
+ orl PRIVATE(%ebx), %ecx
movl $SYS_futex, %eax
- int $0x80
+ ENTER_KERNEL
- /* Release the mutex. */
+ /* Increment LEFT. If this brings the count back to the
+ initial count unlock the object. */
+ movl $1, %edx
+ movl INIT_COUNT(%ebx), %ecx
+ LOCK
+ xaddl %edx, LEFT(%ebx)
+ subl $1, %ecx
+ cmpl %ecx, %edx
+ jne 5f
+
+ /* Release the mutex. We cannot release the lock before
+ waking the waiting threads since otherwise a new thread might
+ arrive and gets waken up, too. */
LOCK
- incl MUTEX(%ebx)
- jng 4f
+ subl $1, MUTEX(%ebx)
+ jne 4f
5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */
popl %ebx
- popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(%ebx)
ret
-1: leal MUTEX(%ebx), %ecx
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%ebx, -8)
+1: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %edx
+ xorl $LLL_SHARED, %ecx
call __lll_lock_wait
jmp 2b
-4: leal MUTEX(%ebx), %eax
+4: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
call __lll_unlock_wake
jmp 5b
-6: leal MUTEX(%ebx), %eax
+ cfi_adjust_cfa_offset(4)
+ cfi_offset(%esi, -12)
+6: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
call __lll_unlock_wake
jmp 7b
+
+9: movl PRIVATE(%ebx), %ecx
+ leal MUTEX(%ebx), %eax
+ xorl $LLL_SHARED, %ecx
+ call __lll_unlock_wake
+ jmp 10b
+ cfi_endproc
.size pthread_barrier_wait,.-pthread_barrier_wait