From: Adhemerval Zanella Date: Thu, 26 Nov 2020 13:54:04 +0000 (-0300) Subject: nptl: Add EOVERFLOW checks for futex calls X-Git-Tag: glibc-2.33~255 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=aa69f19a937b679816ef10e8620ea1141bb1734b;p=thirdparty%2Fglibc.git nptl: Add EOVERFLOW checks for futex calls Some futex-internal calls require additional check for EOVERFLOW (as indicated by [1] [2] [3]). For both mutex and rwlock code, EOVERFLOW is handle as ETIMEDOUT; since it indicate to the caller that the blocking operation could not be issued. For mutex it avoids a possible issue where PTHREAD_MUTEX_ROBUST_* might assume EOVERFLOW indicate futex has succeed, and for PTHREAD_MUTEX_PP_* it avoid a potential busy infinite loop. For rwlock and semaphores, it also avoids potential busy infinite loops. Checked on x86_64-linux-gnu and i686-linux-gnu, although EOVERFLOW won't be possible with current usage (since all timeouts on 32-bit architectures with 32-bit time_t support will be in the range of 32-bit time_t). [1] https://sourceware.org/pipermail/libc-alpha/2020-November/120079.html [2] https://sourceware.org/pipermail/libc-alpha/2020-November/120080.html [3] https://sourceware.org/pipermail/libc-alpha/2020-November/120127.html --- diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c index 685dbca32f8..02d11c61dbc 100644 --- a/nptl/pthread_cond_wait.c +++ b/nptl/pthread_cond_wait.c @@ -506,7 +506,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, __pthread_cleanup_pop (&buffer, 0); - if (__glibc_unlikely (err == ETIMEDOUT)) + if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW)) { __condvar_dec_grefs (cond, g, private); /* If we timed out, we effectively cancel waiting. Note that @@ -515,7 +515,7 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, __condvar_quiesce_and_switch_g1 and us trying to acquire the lock during cancellation is not possible. */ __condvar_cancel_waiting (cond, seq, g, private); - result = ETIMEDOUT; + result = err; goto done; } else diff --git a/nptl/pthread_mutex_timedlock.c b/nptl/pthread_mutex_timedlock.c index 74adffe790d..6c72a36b2b8 100644 --- a/nptl/pthread_mutex_timedlock.c +++ b/nptl/pthread_mutex_timedlock.c @@ -270,7 +270,7 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex, oldval, clockid, abstime, PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); /* The futex call timed out. */ - if (err == ETIMEDOUT) + if (err == ETIMEDOUT || err == EOVERFLOW) return err; /* Reload current lock value. */ oldval = mutex->__data.__lock; @@ -550,8 +550,8 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex, int e = __futex_abstimed_wait64 ( (unsigned int *) &mutex->__data.__lock, ceilval | 2, clockid, abstime, PTHREAD_MUTEX_PSHARED (mutex)); - if (e == ETIMEDOUT) - return ETIMEDOUT; + if (e == ETIMEDOUT || e == EOVERFLOW) + return e; } } while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, diff --git a/nptl/pthread_rwlock_common.c b/nptl/pthread_rwlock_common.c index 4c9f582d3da..9ef432c4740 100644 --- a/nptl/pthread_rwlock_common.c +++ b/nptl/pthread_rwlock_common.c @@ -334,7 +334,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, private); /* We ignore EAGAIN and EINTR. On time-outs, we can just return because we don't need to clean up anything. */ - if (err == ETIMEDOUT) + if (err == ETIMEDOUT || err == EOVERFLOW) return err; } /* It makes sense to not break out of the outer loop here @@ -460,7 +460,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex, 1 | PTHREAD_RWLOCK_FUTEX_USED, clockid, abstime, private); - if (err == ETIMEDOUT) + if (err == ETIMEDOUT || err == EOVERFLOW) { /* If we timed out, we need to unregister. If no read phase has been installed while we waited, we can just decrement @@ -479,7 +479,7 @@ __pthread_rwlock_rdlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, if (atomic_compare_exchange_weak_relaxed (&rwlock->__data.__readers, &r, r - (1 << PTHREAD_RWLOCK_READER_SHIFT))) - return ETIMEDOUT; + return err; /* TODO Back-off. */ } /* Use the acquire MO fence to mirror the steps taken in the @@ -730,7 +730,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, int err = __futex_abstimed_wait64 (&rwlock->__data.__writers_futex, 1 | PTHREAD_RWLOCK_FUTEX_USED, clockid, abstime, private); - if (err == ETIMEDOUT) + if (err == ETIMEDOUT || err == EOVERFLOW) { if (prefer_writer) { @@ -758,7 +758,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, } /* We cleaned up and cannot have stolen another waiting writer's futex wake-up, so just return. */ - return ETIMEDOUT; + return err; } /* If we got interrupted (EINTR) or the futex word does not have the expected value (EAGAIN), retry after reloading __readers. */ @@ -829,7 +829,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, int err = __futex_abstimed_wait64 (&rwlock->__data.__wrphase_futex, PTHREAD_RWLOCK_FUTEX_USED, clockid, abstime, private); - if (err == ETIMEDOUT) + if (err == ETIMEDOUT || err == EOVERFLOW) { if (rwlock->__data.__flags != PTHREAD_RWLOCK_PREFER_READER_NP) { @@ -861,7 +861,7 @@ __pthread_rwlock_wrlock_full64 (pthread_rwlock_t *rwlock, clockid_t clockid, if ((wf & PTHREAD_RWLOCK_FUTEX_USED) != 0) futex_wake (&rwlock->__data.__writers_futex, 1, private); - return ETIMEDOUT; + return err; } /* TODO Back-off. */ } diff --git a/nptl/sem_waitcommon.c b/nptl/sem_waitcommon.c index 6dd4eb97cb5..0ac1f139bd4 100644 --- a/nptl/sem_waitcommon.c +++ b/nptl/sem_waitcommon.c @@ -191,7 +191,7 @@ __new_sem_wait_slow64 (struct new_sem *sem, clockid_t clockid, documentation. Before Linux 2.6.22, EINTR was also returned on spurious wake-ups; we only support more recent Linux versions, so do not need to consider this here.) */ - if (err == ETIMEDOUT || err == EINTR) + if (err == ETIMEDOUT || err == EINTR || err == EOVERFLOW) { __set_errno (err); err = -1;