From d767ca43b8c6b28efb5ad12051f6629e4ceae642 Mon Sep 17 00:00:00 2001 From: Florian Weimer Date: Sun, 9 Feb 2020 16:38:33 +0100 Subject: [PATCH] Linux: set_robust_list syscall number is always available Due to the built-in tables, __NR_set_robust_list is always defined (although it may not be available at run time). --- nptl/nptl-init.c | 4 ---- nptl/pthread_create.c | 6 ++---- sysdeps/nptl/fork.c | 10 ++++------ 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/nptl/nptl-init.c b/nptl/nptl-init.c index 18772480146..373be89c95e 100644 --- a/nptl/nptl-init.c +++ b/nptl/nptl-init.c @@ -129,11 +129,9 @@ static void __nptl_set_robust (struct pthread *self) { -#ifdef __NR_set_robust_list INTERNAL_SYSCALL_DECL (err); INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head, sizeof (struct robust_list_head)); -#endif } @@ -254,7 +252,6 @@ __pthread_initialize_minimal_internal (void) pd->robust_prev = &pd->robust_head; #endif pd->robust_head.list = &pd->robust_head; -#ifdef __NR_set_robust_list pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock) - offsetof (pthread_mutex_t, __data.__list.__next)); @@ -262,7 +259,6 @@ __pthread_initialize_minimal_internal (void) int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head, sizeof (struct robust_list_head)); if (INTERNAL_SYSCALL_ERROR_P (res, err)) -#endif set_robust_list_not_avail (); } diff --git a/nptl/pthread_create.c b/nptl/pthread_create.c index d3fd58730c3..58706b4160d 100644 --- a/nptl/pthread_create.c +++ b/nptl/pthread_create.c @@ -389,10 +389,9 @@ START_THREAD_DEFN if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2)) futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE); -#ifdef __NR_set_robust_list -# ifndef __ASSUME_SET_ROBUST_LIST +#ifndef __ASSUME_SET_ROBUST_LIST if (__set_robust_list_avail >= 0) -# endif +#endif { INTERNAL_SYSCALL_DECL (err); /* This call should never fail because the initial call in init.c @@ -400,7 +399,6 @@ START_THREAD_DEFN INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head, sizeof (struct robust_list_head)); } -#endif /* If the parent was running cancellation handlers while creating the thread the new thread inherited the signal mask. Reset the diff --git a/sysdeps/nptl/fork.c b/sysdeps/nptl/fork.c index f5cf88d68c7..5091a000e38 100644 --- a/sysdeps/nptl/fork.c +++ b/sysdeps/nptl/fork.c @@ -83,7 +83,6 @@ __libc_fork (void) if (__fork_generation_pointer != NULL) *__fork_generation_pointer += __PTHREAD_ONCE_FORK_GEN_INCR; -#ifdef __NR_set_robust_list /* Initialize the robust mutex list setting in the kernel which has been reset during the fork. We do not check for errors because if it fails here, it must have failed at process startup as well and @@ -94,19 +93,18 @@ __libc_fork (void) inherit the correct value from the parent. We do not need to clear the pending operation because it must have been zero when fork was called. */ -# if __PTHREAD_MUTEX_HAVE_PREV +#if __PTHREAD_MUTEX_HAVE_PREV self->robust_prev = &self->robust_head; -# endif +#endif self->robust_head.list = &self->robust_head; -# ifdef SHARED +#ifdef SHARED if (__builtin_expect (__libc_pthread_functions_init, 0)) PTHFCT_CALL (ptr_set_robust, (self)); -# else +#else extern __typeof (__nptl_set_robust) __nptl_set_robust __attribute__((weak)); if (__builtin_expect (__nptl_set_robust != NULL, 0)) __nptl_set_robust (self); -# endif #endif /* Reset the lock state in the multi-threaded case. */ -- 2.47.2