+2003-03-11 Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+ * sysdeps/unix/sysv/linux/s390/s390-64/sysdep.h (__NR_pread64): Define
+ to __NR_pread if not defined.
+ (__NR_pwrite64): Define to __NR_pwrite if not defined.
+
2003-03-11 Jakub Jelinek <jakub@redhat.com>
* sysdeps/unix/sysv/linux/ia64/system.c: New file.
+2003-03-11 Jakub Jelinek <jakub@redhat.com>
+
+ * sysdeps/ia64/bits/atomic.h (atomic_exchange_and_add): Swap 2nd and
+ 3rd argument of __arch_compare_and_exchange_{32,64}_val_acq.
+
+ * sysdeps/unix/sysv/linux/ia64/sem_post.c: Include semaphore.h.
+ * sysdeps/unix/sysv/linux/ia64/sem_timedwait.c: Likewise.
+ * sysdeps/unix/sysv/linux/ia64/sem_trywait.c: Likewise.
+ * sysdeps/unix/sysv/linux/ia64/sem_wait.c: Likewise.
+ * sysdeps/unix/sysv/linux/s390/sem_post.c: Likewise.
+ * sysdeps/unix/sysv/linux/s390/sem_timedwait.c: Likewise.
+ * sysdeps/unix/sysv/linux/s390/sem_trywait.c: Likewise.
+ * sysdeps/unix/sysv/linux/s390/sem_wait.c: Likewise.
+
2003-03-11 Ulrich Drepper <drepper@redhat.com>
+ * sysdeps/pthread/pthread_cond_timedwait.c
+ (__pthread_cond_timedwait): Return the result of the final
+ locking. If it succeeds, the regular function return value.
+
+ * sysdeps/pthread/pthread_cond_wait.c (__pthread_cond_wait):
+ Return result of the final locking.
+ * version.c (__nptl_main): Work around problems with the strange
+ INTERNAL_SYSCALL macro on ppc32.
+ * init.c (__pthread_initialize_minimal_internal): Unblock
+ SIGCANCEL in case the parent blocked it.
+ Reported by Paul Mackerras <paulus@samba.org>.
+
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.S: New file.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_signal.S: New file.
* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: New file.
retq
1: addq $MUTEX, %rdi
- call __lll_lock_wait
+ callq __lll_lock_wait
subq $MUTEX, %rdi
jmp 2b
4: addq $MUTEX, %rdi
- call __lll_unlock_wake
+ callq __lll_unlock_wake
subq $MUTEX, %rdi
jmp 5b
6: addq $MUTEX, %rdi
- call __lll_unlock_wake
+ callq __lll_unlock_wake
subq $MUTEX, %rdi
jmp 7b
.size pthread_barrier_wait,.-pthread_barrier_wait
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- call __lll_mutex_lock_wait
+ callq __lll_mutex_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
/* Unlock in loop requires waekup. */
5: addq $cond_lock-wakeup_seq, %rdi
- call __lll_mutex_unlock_wake
+ callq __lll_mutex_unlock_wake
jmp 6b
/* Unlock in loop requires waekup. */
7: addq $cond_lock-wakeup_seq, %rdi
- call __lll_mutex_unlock_wake
+ callq __lll_mutex_unlock_wake
subq $cond_lock-wakeup_seq, %rdi
jmp 8b
.size __pthread_cond_broadcast, .-__pthread_cond_broadcast
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- call __lll_mutex_lock_wait
+ callq __lll_mutex_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
jmp 2b
/* Unlock in loop requires waekup. */
-5: addq $cond_lock-wakeup_seq, %rdi
- call __lll_mutex_unlock_wake
+5: addq $cond_lock-wakeup_seq, %rdi
+ callq __lll_mutex_unlock_wake
jmp 6b
.size __pthread_cond_signal, .-__pthread_cond_signal
versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal,
#if cond_lock != 0
addq $cond_lock, %rdi
#endif
- call __lll_mutex_lock_wait
+ callq __lll_mutex_lock_wait
#if cond_lock != 0
subq $cond_lock, %rdi
#endif
leaq clear_once_control(%rip), %rsi
movq %rdi, %rdx
movq %rsp, %rdi
- call __pthread_cleanup_push /* Note: no @PLT. */
+ callq __pthread_cleanup_push /* Note: no @PLT. */
- call *40(%rsp)
+ callq *40(%rsp)
/* Pop the cleanup handler. */
movq %rsp, %rdi
xorq %rsi, %rsi
- call __pthread_cleanup_pop /* Note: no @PLT. */
+ callq __pthread_cleanup_pop /* Note: no @PLT. */
addq $32, %rsp
movq errno@gottpoff(%rip), %rdx
movl $EINVAL, %fs:(%rdx)
#else
- call __errno_location@plt
+ callq __errno_location@plt
movl $EINVAL, (%rax)
#endif
jne 2b
xorl %eax, %eax
- ret
+ retq
/* Check whether the timeout value is valid. */
1: subq $16, %rsp
movl %eax, %fs:(%rdx)
#else
movl %eax, %edx
- call __errno_location@plt
+ callq __errno_location@plt
movl %edx, (%rax)
#endif
movq errno@gottpoff(%rip), %rdx
movl $EAGAIN, %fs:(%rdx)
#else
- call __errno_location@plt
+ callq __errno_location@plt
movl $EAGAIN, (%rax)
#endif
orl $-1, %eax
movl %eax, %fs:(%rdx)
#else
movl %eax, %edx
- call __errno_location@plt
+ callq __errno_location@plt
movl %edx, (%rax)
#endif
orl $-1, %eax
/* In newer 2.1 kernels __NR_syscall is missing so we define it here. */
#define __NR_syscall 0
+/*
+ * Newer kernel versions redefined __NR_pread and __NR_pwrite to
+ * __NR_pread64 and __NR_pwrite64. We use the new names but have
+ * to define them on our own for compiling against older kernels.
+ */
+#ifndef __NR_pread64
+# define __NR_pread64 __NR_pread
+#endif
+#ifndef __NR_pwrite64
+# define __NR_pwrite64 __NR_pwrite
+#endif
+
#undef SYS_ify
#define SYS_ify(syscall_name) __NR_##syscall_name