1 /* pthread_hurd_cond_timedwait_np. Hurd-specific wait on a condition.
2 Copyright (C) 2012-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
21 #include <hurd/signal.h>
24 #include <pt-internal.h>
26 extern int __pthread_hurd_cond_timedwait_internal (pthread_cond_t
*cond
,
27 pthread_mutex_t
*mutex
,
32 __pthread_hurd_cond_timedwait_np (pthread_cond_t
*cond
,
33 pthread_mutex_t
*mutex
,
34 const struct timespec
*abstime
)
36 return __pthread_hurd_cond_timedwait_internal (cond
, mutex
, abstime
);
39 strong_alias (__pthread_hurd_cond_timedwait_np
, pthread_hurd_cond_timedwait_np
);
42 __pthread_hurd_cond_timedwait_internal (pthread_cond_t
*cond
,
43 pthread_mutex_t
*mutex
,
44 const struct timespec
*abstime
)
46 struct hurd_sigstate
*ss
= _hurd_self_sigstate ();
47 struct __pthread
*self
= _pthread_self ();
50 clockid_t clock_id
= __pthread_default_condattr
.__clock
;
52 /* This function will be called by hurd_thread_cancel while we are blocked
53 We wake up our thread if it's still blocking or about to block, so it will
54 progress and notice the cancellation flag. */
59 __pthread_spin_wait (&cond
->__lock
);
60 /* The thread only needs to be awaken if it's blocking or about to block.
61 If it was already unblocked, it's not queued any more. */
62 unblock
= self
->prevp
!= NULL
;
64 __pthread_dequeue (self
);
65 __pthread_spin_unlock (&cond
->__lock
);
68 __pthread_wakeup (self
);
71 assert (ss
->intr_port
== MACH_PORT_NULL
); /* Sanity check for signal bugs. */
73 if (abstime
!= NULL
&& ! valid_nanoseconds (abstime
->tv_nsec
))
76 /* Atomically enqueue our thread on the condition variable's queue of
77 waiters, and mark our sigstate to indicate that `cancel_me' must be
78 called to wake us up. We must hold the sigstate lock while acquiring
79 the condition variable's lock and tweaking it, so that
80 hurd_thread_cancel can never suspend us and then deadlock waiting for
81 the condition variable's lock. */
83 __spin_lock (&ss
->lock
);
84 __pthread_spin_wait (&cond
->__lock
);
87 /* We were cancelled before doing anything. Don't block at all. */
91 /* Put us on the queue so that pthread_cond_broadcast will know to wake
93 __pthread_enqueue (&cond
->__queue
, self
);
95 clock_id
= cond
->__attr
->__clock
;
96 /* Tell hurd_thread_cancel how to unblock us. */
97 ss
->cancel_hook
= &cancel_me
;
99 __pthread_spin_unlock (&cond
->__lock
);
100 __spin_unlock (&ss
->lock
);
104 /* Cancelled on entry. Just leave the mutex locked. */
107 __spin_lock (&ss
->lock
);
111 /* Release MUTEX before blocking. */
112 __pthread_mutex_unlock (mutex
);
114 /* Increase the waiter reference count. Relaxed MO is sufficient because
115 we only need to synchronize when decrementing the reference count. */
116 atomic_fetch_add_relaxed (&cond
->__wrefs
, 2);
118 /* Block the thread. */
120 err
= __pthread_timedblock (self
, abstime
, clock_id
);
124 __pthread_block (self
);
127 /* As it was done when enqueueing, prevent hurd_thread_cancel from
128 suspending us while the condition lock is held. */
129 __spin_lock (&ss
->lock
);
130 __pthread_spin_wait (&cond
->__lock
);
131 if (self
->prevp
== NULL
)
132 /* Another thread removed us from the list of waiters, which means
133 a wakeup message has been sent. It was either consumed while
134 we were blocking, or queued after we timed out and before we
135 acquired the condition lock, in which case the message queue
140 /* We're still in the list of waiters. Noone attempted to wake us
141 up, i.e. we timed out. */
142 __pthread_dequeue (self
);
145 __pthread_spin_unlock (&cond
->__lock
);
148 __pthread_block (self
);
151 /* If destruction is pending (i.e., the wake-request flag is nonzero) and we
152 are the last waiter (prior value of __wrefs was 1 << 1), then wake any
153 threads waiting in pthread_cond_destroy. Release MO to synchronize with
154 these threads. Don't bother clearing the wake-up request flag. */
155 if ((atomic_fetch_add_release (&cond
->__wrefs
, -2)) == 3)
156 __gsync_wake (__mach_task_self (), (vm_offset_t
) &cond
->__wrefs
, 0, 0);
158 /* Clear the hook, now that we are done blocking. */
159 ss
->cancel_hook
= NULL
;
160 /* Check the cancellation flag; we might have unblocked due to
161 cancellation rather than a normal pthread_cond_signal or
162 pthread_cond_broadcast (or we might have just happened to get cancelled
163 right after waking up). */
164 cancel
|= ss
->cancel
;
166 __spin_unlock (&ss
->lock
);
169 /* Reacquire the mutex and return. */
170 __pthread_mutex_lock (mutex
);
176 assert (err
== ETIMEDOUT
);