]> git.ipfire.org Git - thirdparty/openembedded/openembedded-core-contrib.git/commitdiff
glibc: nptl Fix indentation
authorSunil Dora <sunilkumar.dora@windriver.com>
Tue, 17 Jun 2025 10:08:53 +0000 (03:08 -0700)
committerSteve Sakoman <steve@sakoman.com>
Tue, 17 Jun 2025 15:05:29 +0000 (08:05 -0700)
The following commits have been cherry-picked from Glibc master branch:
Bug : https://sourceware.org/bugzilla/show_bug.cgi?id=25847

Upstream-Status: Backport
[https://sourceware.org/git/?p=glibc.git;a=commit;h=ee6c14ed59d480720721aaacc5fb03213dc153da]

Signed-off-by: Sunil Dora <sunilkumar.dora@windriver.com>
Signed-off-by: Steve Sakoman <steve@sakoman.com>
meta/recipes-core/glibc/glibc/0026-PR25847-6.patch [new file with mode: 0644]
meta/recipes-core/glibc/glibc_2.35.bb

diff --git a/meta/recipes-core/glibc/glibc/0026-PR25847-6.patch b/meta/recipes-core/glibc/glibc/0026-PR25847-6.patch
new file mode 100644 (file)
index 0000000..cf87e21
--- /dev/null
@@ -0,0 +1,169 @@
+From a2faee6d0dac6e5232255da9afda4d9ed6cfb6e5 Mon Sep 17 00:00:00 2001
+From: Malte Skarupke <malteskarupke@fastmail.fm>
+Date: Tue, 17 Jun 2025 01:37:12 -0700
+Subject: [PATCH] nptl: Fix indentation
+
+In my previous change I turned a nested loop into a simple loop. I'm doing
+the resulting indentation changes in a separate commit to make the diff on
+the previous commit easier to review.
+
+The following commits have been cherry-picked from Glibc master branch:
+Bug : https://sourceware.org/bugzilla/show_bug.cgi?id=25847
+
+Upstream-Status: Backport
+[https://sourceware.org/git/?p=glibc.git;a=commit;h=ee6c14ed59d480720721aaacc5fb03213dc153da]
+
+Signed-off-by: Sunil Dora <sunilkumar.dora@windriver.com>
+---
+ nptl/pthread_cond_wait.c | 132 ++++++++++++++++-----------------------
+ 1 file changed, 54 insertions(+), 78 deletions(-)
+
+diff --git a/nptl/pthread_cond_wait.c b/nptl/pthread_cond_wait.c
+index 5c86880105..104ebd48ca 100644
+--- a/nptl/pthread_cond_wait.c
++++ b/nptl/pthread_cond_wait.c
+@@ -410,87 +410,63 @@ __pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex,
+       return err;
+     }
+-
+-      while (1)
+-      {
+-        /* Now wait until a signal is available in our group or it is closed.
+-           Acquire MO so that if we observe (signals == lowseq) after group
+-           switching in __condvar_quiesce_and_switch_g1, we synchronize with that
+-           store and will see the prior update of __g1_start done while switching
+-           groups too.  */
+-        unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g);
+-        uint64_t g1_start = __condvar_load_g1_start_relaxed (cond);
+-        unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U;
+-
+-        if (seq < (g1_start >> 1))
+-          {
+-            /* If the group is closed already,
+-               then this waiter originally had enough extra signals to
+-               consume, up until the time its group was closed.  */
+-             break;
+-          }
+-
+-        /* If there is an available signal, don't block.
+-           If __g1_start has advanced at all, then we must be in G1
+-           by now, perhaps in the process of switching back to an older
+-           G2, but in either case we're allowed to consume the available
+-           signal and should not block anymore.  */
+-        if ((int)(signals - lowseq) >= 2)
+-           {
+-             /* Try to grab a signal.  See above for MO.  (if we do another loop
+-                iteration we need to see the correct value of g1_start)  */
+-                     if (atomic_compare_exchange_weak_acquire (
+-                               cond->__data.__g_signals + g,
++  while (1)
++    {
++      /* Now wait until a signal is available in our group or it is closed.
++         Acquire MO so that if we observe (signals == lowseq) after group
++         switching in __condvar_quiesce_and_switch_g1, we synchronize with that
++         store and will see the prior update of __g1_start done while switching
++         groups too.  */
++      unsigned int signals = atomic_load_acquire (cond->__data.__g_signals + g);
++      uint64_t g1_start = __condvar_load_g1_start_relaxed (cond);
++      unsigned int lowseq = (g1_start & 1) == g ? signals : g1_start & ~1U;
++
++      if (seq < (g1_start >> 1))
++        {
++          /* If the group is closed already,
++             then this waiter originally had enough extra signals to
++             consume, up until the time its group was closed.  */
++           break;
++        }
++
++      /* If there is an available signal, don't block.
++         If __g1_start has advanced at all, then we must be in G1
++         by now, perhaps in the process of switching back to an older
++         G2, but in either case we're allowed to consume the available
++         signal and should not block anymore.  */
++      if ((int)(signals - lowseq) >= 2)
++        {
++         /* Try to grab a signal.  See above for MO.  (if we do another loop
++            iteration we need to see the correct value of g1_start)  */
++           if (atomic_compare_exchange_weak_acquire (
++                       cond->__data.__g_signals + g,
+                        &signals, signals - 2))
+-                       break;
+-                     else
+-                       continue;
+-           }
+-        /* No signals available after spinning, so prepare to block.
+-           We first acquire a group reference and use acquire MO for that so
+-           that we synchronize with the dummy read-modify-write in
+-           __condvar_quiesce_and_switch_g1 if we read from that.  In turn,
+-           in this case this will make us see the advancement of __g_signals
+-           to the upcoming new g1_start that occurs with a concurrent
+-           attempt to reuse the group's slot.
+-           We use acquire MO for the __g_signals check to make the
+-           __g1_start check work (see spinning above).
+-           Note that the group reference acquisition will not mask the
+-           release MO when decrementing the reference count because we use
+-           an atomic read-modify-write operation and thus extend the release
+-           sequence.  */
+-        atomic_fetch_add_acquire (cond->__data.__g_refs + g, 2);
+-
+-        // Now block.
+-        struct _pthread_cleanup_buffer buffer;
+-        struct _condvar_cleanup_buffer cbuffer;
+-        cbuffer.wseq = wseq;
+-        cbuffer.cond = cond;
+-        cbuffer.mutex = mutex;
+-        cbuffer.private = private;
+-        __pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer);
+-
+-        err = __futex_abstimed_wait_cancelable64 (
+-          cond->__data.__g_signals + g, signals, clockid, abstime, private);
+-
+-        __pthread_cleanup_pop (&buffer, 0);
+-
+-        if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW))
+-          {
+-            __condvar_dec_grefs (cond, g, private);
+-            /* If we timed out, we effectively cancel waiting.  Note that
+-               we have decremented __g_refs before cancellation, so that a
+-               deadlock between waiting for quiescence of our group in
+-               __condvar_quiesce_and_switch_g1 and us trying to acquire
+-               the lock during cancellation is not possible.  */
+-            __condvar_cancel_waiting (cond, seq, g, private);
+-            result = err;
+             break;
+-          }
+-        else
+-          __condvar_dec_grefs (cond, g, private);
+-
++         else
++            continue;
+       }
++      // Now block.
++      struct _pthread_cleanup_buffer buffer;
++      struct _condvar_cleanup_buffer cbuffer;
++      cbuffer.wseq = wseq;
++      cbuffer.cond = cond;
++      cbuffer.mutex = mutex;
++      cbuffer.private = private;
++      __pthread_cleanup_push (&buffer, __condvar_cleanup_waiting, &cbuffer);
++
++      err = __futex_abstimed_wait_cancelable64 (
++        cond->__data.__g_signals + g, signals, clockid, abstime, private);
++
++      __pthread_cleanup_pop (&buffer, 0);
++
++      if (__glibc_unlikely (err == ETIMEDOUT || err == EOVERFLOW))
++        {
++          /* If we timed out, we effectively cancel waiting.  */
++          __condvar_cancel_waiting (cond, seq, g, private);
++          result = err;
++          break;
++        }
++    }
+   /* Confirm that we have been woken.  We do that before acquiring the mutex
+      to allow for execution of pthread_cond_destroy while having acquired the
+-- 
+2.49.0
+
index 04dcb22c3d97ae63617eeab981b6aeed1a16dfc9..d3c0cbd703e1a27f64898e9dd211f01c61a9de5e 100644 (file)
@@ -67,6 +67,7 @@ SRC_URI =  "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \
            file://0026-PR25847-3.patch \
            file://0026-PR25847-4.patch \
            file://0026-PR25847-5.patch \
+           file://0026-PR25847-6.patch \
            \
            file://0001-Revert-Linux-Implement-a-useful-version-of-_startup_.patch \
            file://0002-get_nscd_addresses-Fix-subscript-typos-BZ-29605.patch \