]> git.ipfire.org Git - thirdparty/glibc.git/blame - sysdeps/htl/pt-cond-timedwait.c
Update copyright dates with scripts/update-copyrights
[thirdparty/glibc.git] / sysdeps / htl / pt-cond-timedwait.c
CommitLineData
33574c17 1/* Wait on a condition. Generic version.
6d7e8eda 2 Copyright (C) 2000-2023 Free Software Foundation, Inc.
33574c17
ST
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
ad2b41bf
ST
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
33574c17
ST
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
ad2b41bf 13 Lesser General Public License for more details.
33574c17 14
ad2b41bf
ST
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
5a82c748 17 <https://www.gnu.org/licenses/>. */
33574c17
ST
18
19#include <pthread.h>
20
21#include <pt-internal.h>
f6fb29d2 22#include <pthreadP.h>
48123656 23#include <time.h>
33574c17
ST
24
25extern int __pthread_cond_timedwait_internal (pthread_cond_t *cond,
26 pthread_mutex_t *mutex,
28cada04 27 clockid_t clockid,
33574c17
ST
28 const struct timespec *abstime);
29
30int
31__pthread_cond_timedwait (pthread_cond_t *cond,
32 pthread_mutex_t *mutex,
33 const struct timespec *abstime)
34{
28cada04 35 return __pthread_cond_timedwait_internal (cond, mutex, -1, abstime);
33574c17
ST
36}
37
fba7fc5a 38weak_alias (__pthread_cond_timedwait, pthread_cond_timedwait);
33574c17 39
28cada04
ST
40int
41__pthread_cond_clockwait (pthread_cond_t *cond,
42 pthread_mutex_t *mutex,
43 clockid_t clockid,
44 const struct timespec *abstime)
45{
46 return __pthread_cond_timedwait_internal (cond, mutex, clockid, abstime);
47}
48
49weak_alias (__pthread_cond_clockwait, pthread_cond_clockwait);
50
33574c17
ST
51struct cancel_ctx
52{
53 struct __pthread *wakeup;
54 pthread_cond_t *cond;
55};
56
57static void
58cancel_hook (void *arg)
59{
60 struct cancel_ctx *ctx = arg;
61 struct __pthread *wakeup = ctx->wakeup;
62 pthread_cond_t *cond = ctx->cond;
63 int unblock;
64
8ba6ad70 65 __pthread_spin_wait (&cond->__lock);
33574c17
ST
66 /* The thread only needs to be awaken if it's blocking or about to block.
67 If it was already unblocked, it's not queued any more. */
68 unblock = wakeup->prevp != NULL;
69 if (unblock)
70 __pthread_dequeue (wakeup);
71 __pthread_spin_unlock (&cond->__lock);
72
73 if (unblock)
74 __pthread_wakeup (wakeup);
75}
76
77/* Block on condition variable COND until ABSTIME. As a GNU
78 extension, if ABSTIME is NULL, then wait forever. MUTEX should be
79 held by the calling thread. On return, MUTEX will be held by the
80 calling thread. */
81int
82__pthread_cond_timedwait_internal (pthread_cond_t *cond,
83 pthread_mutex_t *mutex,
28cada04 84 clockid_t clockid,
33574c17
ST
85 const struct timespec *abstime)
86{
87 error_t err;
88 int cancelled, oldtype, drain;
28cada04
ST
89 clockid_t clock_id;
90
91 if (clockid != -1)
92 clock_id = clockid;
93 else
94 clock_id = __pthread_default_condattr.__clock;
33574c17 95
48123656 96 if (abstime && ! valid_nanoseconds (abstime->tv_nsec))
33574c17
ST
97 return EINVAL;
98
cd94860c
ST
99 err = __pthread_mutex_checklocked (mutex);
100 if (err)
101 return err;
102
33574c17
ST
103 struct __pthread *self = _pthread_self ();
104 struct cancel_ctx ctx;
105 ctx.wakeup = self;
106 ctx.cond = cond;
107
108 /* Test for a pending cancellation request, switch to deferred mode for
109 safer resource handling, and prepare the hook to call in case we're
110 cancelled while blocking. Once CANCEL_LOCK is released, the cancellation
111 hook can be called by another thread at any time. Whatever happens,
112 this function must exit with MUTEX locked.
113
114 This function contains inline implementations of pthread_testcancel and
115 pthread_setcanceltype to reduce locking overhead. */
116 __pthread_mutex_lock (&self->cancel_lock);
117 cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
118 && self->cancel_pending;
119
3513d5af 120 if (cancelled)
33574c17 121 {
3513d5af
ST
122 __pthread_mutex_unlock (&self->cancel_lock);
123 __pthread_exit (PTHREAD_CANCELED);
33574c17 124 }
33574c17 125
3513d5af
ST
126 self->cancel_hook = cancel_hook;
127 self->cancel_hook_arg = &ctx;
128 oldtype = self->cancel_type;
129
130 if (oldtype != PTHREAD_CANCEL_DEFERRED)
131 self->cancel_type = PTHREAD_CANCEL_DEFERRED;
132
133 /* Add ourselves to the list of waiters. This is done while setting
134 the cancellation hook to simplify the cancellation procedure, i.e.
135 if the thread is queued, it can be cancelled, otherwise it is
136 already unblocked, progressing on the return path. */
137 __pthread_spin_wait (&cond->__lock);
138 __pthread_enqueue (&cond->__queue, self);
139 if (cond->__attr != NULL && clockid == -1)
140 clock_id = cond->__attr->__clock;
141 __pthread_spin_unlock (&cond->__lock);
142
143 __pthread_mutex_unlock (&self->cancel_lock);
33574c17 144
80817024 145 /* Increase the waiter reference count. Relaxed MO is sufficient because
4565083a
ST
146 we only need to synchronize when decrementing the reference count.
147 We however need to have the mutex held to prevent concurrency with
148 a pthread_cond_destroy. */
80817024
ST
149 atomic_fetch_add_relaxed (&cond->__wrefs, 2);
150
4565083a
ST
151 /* Release MUTEX before blocking. */
152 __pthread_mutex_unlock (mutex);
153
33574c17
ST
154 /* Block the thread. */
155 if (abstime != NULL)
156 err = __pthread_timedblock (self, abstime, clock_id);
157 else
158 {
159 err = 0;
160 __pthread_block (self);
161 }
162
8ba6ad70 163 __pthread_spin_wait (&cond->__lock);
33574c17
ST
164 if (self->prevp == NULL)
165 {
166 /* Another thread removed us from the list of waiters, which means a
167 wakeup message has been sent. It was either consumed while we were
168 blocking, or queued after we timed out and before we acquired the
169 condition lock, in which case the message queue must be drained. */
170 if (!err)
171 drain = 0;
172 else
173 {
174 assert (err == ETIMEDOUT);
175 drain = 1;
176 }
177 }
178 else
179 {
180 /* We're still in the list of waiters. Noone attempted to wake us up,
181 i.e. we timed out. */
182 assert (err == ETIMEDOUT);
183 __pthread_dequeue (self);
184 drain = 0;
185 }
186 __pthread_spin_unlock (&cond->__lock);
187
80817024
ST
188 /* If destruction is pending (i.e., the wake-request flag is nonzero) and we
189 are the last waiter (prior value of __wrefs was 1 << 1), then wake any
190 threads waiting in pthread_cond_destroy. Release MO to synchronize with
191 these threads. Don't bother clearing the wake-up request flag. */
192 if ((atomic_fetch_add_release (&cond->__wrefs, -2)) == 3)
193 __gsync_wake (__mach_task_self (), (vm_offset_t) &cond->__wrefs, 0, 0);
194
33574c17
ST
195 if (drain)
196 __pthread_block (self);
197
198 /* We're almost done. Remove the unblock hook, restore the previous
199 cancellation type, and check for a pending cancellation request. */
200 __pthread_mutex_lock (&self->cancel_lock);
201 self->cancel_hook = NULL;
202 self->cancel_hook_arg = NULL;
203 self->cancel_type = oldtype;
204 cancelled = (self->cancel_state == PTHREAD_CANCEL_ENABLE)
205 && self->cancel_pending;
206 __pthread_mutex_unlock (&self->cancel_lock);
207
208 /* Reacquire MUTEX before returning/cancelling. */
209 __pthread_mutex_lock (mutex);
210
211 if (cancelled)
f6fb29d2 212 __pthread_exit (PTHREAD_CANCELED);
33574c17
ST
213
214 return err;
215}