]> git.ipfire.org Git - thirdparty/glibc.git/commitdiff
Linux: set_robust_list syscall number is always available
authorFlorian Weimer <fweimer@redhat.com>
Sun, 9 Feb 2020 15:38:33 +0000 (16:38 +0100)
committerFlorian Weimer <fweimer@redhat.com>
Sun, 9 Feb 2020 16:39:40 +0000 (17:39 +0100)
Due to the built-in tables, __NR_set_robust_list is always defined
(although it may not be available at run time).

nptl/nptl-init.c
nptl/pthread_create.c
sysdeps/nptl/fork.c

index 18772480146ca0e90407b3749d7a7b80b4ee41c5..373be89c95e846d8a05f89f87bd36e20ce69d909 100644 (file)
@@ -129,11 +129,9 @@ static
 void
 __nptl_set_robust (struct pthread *self)
 {
-#ifdef __NR_set_robust_list
   INTERNAL_SYSCALL_DECL (err);
   INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
                    sizeof (struct robust_list_head));
-#endif
 }
 
 
@@ -254,7 +252,6 @@ __pthread_initialize_minimal_internal (void)
     pd->robust_prev = &pd->robust_head;
 #endif
     pd->robust_head.list = &pd->robust_head;
-#ifdef __NR_set_robust_list
     pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
                                    - offsetof (pthread_mutex_t,
                                                __data.__list.__next));
@@ -262,7 +259,6 @@ __pthread_initialize_minimal_internal (void)
     int res = INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
                                sizeof (struct robust_list_head));
     if (INTERNAL_SYSCALL_ERROR_P (res, err))
-#endif
       set_robust_list_not_avail ();
   }
 
index d3fd58730c3e2f496072c28b3f0b3ab7982f04fb..58706b4160d74e816366760289eb278ba50fca3a 100644 (file)
@@ -389,10 +389,9 @@ START_THREAD_DEFN
   if (__glibc_unlikely (atomic_exchange_acq (&pd->setxid_futex, 0) == -2))
     futex_wake (&pd->setxid_futex, 1, FUTEX_PRIVATE);
 
-#ifdef __NR_set_robust_list
-# ifndef __ASSUME_SET_ROBUST_LIST
+#ifndef __ASSUME_SET_ROBUST_LIST
   if (__set_robust_list_avail >= 0)
-# endif
+#endif
     {
       INTERNAL_SYSCALL_DECL (err);
       /* This call should never fail because the initial call in init.c
@@ -400,7 +399,6 @@ START_THREAD_DEFN
       INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
                        sizeof (struct robust_list_head));
     }
-#endif
 
   /* If the parent was running cancellation handlers while creating
      the thread the new thread inherited the signal mask.  Reset the
index f5cf88d68c7af36dbf8f80b1226158554acd4f99..5091a000e3854637327a00d47e600f7d08988694 100644 (file)
@@ -83,7 +83,6 @@ __libc_fork (void)
       if (__fork_generation_pointer != NULL)
        *__fork_generation_pointer += __PTHREAD_ONCE_FORK_GEN_INCR;
 
-#ifdef __NR_set_robust_list
       /* Initialize the robust mutex list setting in the kernel which has
         been reset during the fork.  We do not check for errors because if
         it fails here, it must have failed at process startup as well and
@@ -94,19 +93,18 @@ __libc_fork (void)
         inherit the correct value from the parent.  We do not need to clear
         the pending operation because it must have been zero when fork was
         called.  */
-# if __PTHREAD_MUTEX_HAVE_PREV
+#if __PTHREAD_MUTEX_HAVE_PREV
       self->robust_prev = &self->robust_head;
-# endif
+#endif
       self->robust_head.list = &self->robust_head;
-# ifdef SHARED
+#ifdef SHARED
       if (__builtin_expect (__libc_pthread_functions_init, 0))
        PTHFCT_CALL (ptr_set_robust, (self));
-# else
+#else
       extern __typeof (__nptl_set_robust) __nptl_set_robust
        __attribute__((weak));
       if (__builtin_expect (__nptl_set_robust != NULL, 0))
        __nptl_set_robust (self);
-# endif
 #endif
 
       /* Reset the lock state in the multi-threaded case.  */