1 /* pthread_hurd_cond_timedwait_np. Hurd-specific wait on a condition.
2 Copyright (C) 2012-2023 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
21 #include <hurd/signal.h>
24 #include <pt-internal.h>
26 extern int __pthread_hurd_cond_timedwait_internal (pthread_cond_t
*cond
,
27 pthread_mutex_t
*mutex
,
32 __pthread_hurd_cond_timedwait_np (pthread_cond_t
*cond
,
33 pthread_mutex_t
*mutex
,
34 const struct timespec
*abstime
)
36 return __pthread_hurd_cond_timedwait_internal (cond
, mutex
, abstime
);
39 strong_alias (__pthread_hurd_cond_timedwait_np
, pthread_hurd_cond_timedwait_np
);
42 __pthread_hurd_cond_timedwait_internal (pthread_cond_t
*cond
,
43 pthread_mutex_t
*mutex
,
44 const struct timespec
*abstime
)
46 struct hurd_sigstate
*ss
= _hurd_self_sigstate ();
47 struct __pthread
*self
= _pthread_self ();
50 clockid_t clock_id
= __pthread_default_condattr
.__clock
;
52 /* This function will be called by hurd_thread_cancel while we are blocked
53 We wake up our thread if it's still blocking or about to block, so it will
54 progress and notice the cancellation flag. */
59 __pthread_spin_wait (&cond
->__lock
);
60 /* The thread only needs to be awaken if it's blocking or about to block.
61 If it was already unblocked, it's not queued any more. */
62 unblock
= self
->prevp
!= NULL
;
64 __pthread_dequeue (self
);
65 __pthread_spin_unlock (&cond
->__lock
);
68 __pthread_wakeup (self
);
71 assert (ss
->intr_port
== MACH_PORT_NULL
); /* Sanity check for signal bugs. */
73 if (abstime
!= NULL
&& ! valid_nanoseconds (abstime
->tv_nsec
))
76 err
= __pthread_mutex_checklocked (mutex
);
80 /* Atomically enqueue our thread on the condition variable's queue of
81 waiters, and mark our sigstate to indicate that `cancel_me' must be
82 called to wake us up. We must hold the sigstate lock while acquiring
83 the condition variable's lock and tweaking it, so that
84 hurd_thread_cancel can never suspend us and then deadlock waiting for
85 the condition variable's lock. */
87 __spin_lock (&ss
->lock
);
88 __pthread_spin_wait (&cond
->__lock
);
91 /* We were cancelled before doing anything. Don't block at all. */
95 /* Put us on the queue so that pthread_cond_broadcast will know to wake
97 __pthread_enqueue (&cond
->__queue
, self
);
99 clock_id
= cond
->__attr
->__clock
;
100 /* Tell hurd_thread_cancel how to unblock us. */
101 ss
->cancel_hook
= &cancel_me
;
103 __pthread_spin_unlock (&cond
->__lock
);
104 __spin_unlock (&ss
->lock
);
106 /* Increase the waiter reference count. Relaxed MO is sufficient because
107 we only need to synchronize when decrementing the reference count.
108 We however need to have the mutex held to prevent concurrency with
109 a pthread_cond_destroy. */
110 atomic_fetch_add_relaxed (&cond
->__wrefs
, 2);
114 /* Cancelled on entry. Just leave the mutex locked. */
117 __spin_lock (&ss
->lock
);
121 /* Release MUTEX before blocking. */
122 __pthread_mutex_unlock (mutex
);
124 /* Block the thread. */
126 err
= __pthread_timedblock (self
, abstime
, clock_id
);
130 __pthread_block (self
);
133 /* As it was done when enqueueing, prevent hurd_thread_cancel from
134 suspending us while the condition lock is held. */
135 __spin_lock (&ss
->lock
);
136 __pthread_spin_wait (&cond
->__lock
);
137 if (self
->prevp
== NULL
)
138 /* Another thread removed us from the list of waiters, which means
139 a wakeup message has been sent. It was either consumed while
140 we were blocking, or queued after we timed out and before we
141 acquired the condition lock, in which case the message queue
146 /* We're still in the list of waiters. Noone attempted to wake us
147 up, i.e. we timed out. */
148 __pthread_dequeue (self
);
151 __pthread_spin_unlock (&cond
->__lock
);
154 __pthread_block (self
);
157 /* If destruction is pending (i.e., the wake-request flag is nonzero) and we
158 are the last waiter (prior value of __wrefs was 1 << 1), then wake any
159 threads waiting in pthread_cond_destroy. Release MO to synchronize with
160 these threads. Don't bother clearing the wake-up request flag. */
161 if ((atomic_fetch_add_release (&cond
->__wrefs
, -2)) == 3)
162 __gsync_wake (__mach_task_self (), (vm_offset_t
) &cond
->__wrefs
, 0, 0);
164 /* Clear the hook, now that we are done blocking. */
165 ss
->cancel_hook
= NULL
;
166 /* Check the cancellation flag; we might have unblocked due to
167 cancellation rather than a normal pthread_cond_signal or
168 pthread_cond_broadcast (or we might have just happened to get cancelled
169 right after waking up). */
170 cancel
|= ss
->cancel
;
172 __spin_unlock (&ss
->lock
);
175 /* Reacquire the mutex and return. */
176 __pthread_mutex_lock (mutex
);
182 assert (err
== ETIMEDOUT
);