]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/htl/pt-mutex-timedlock.c
Prefer https to http for gnu.org and fsf.org URLs
[thirdparty/glibc.git] / sysdeps / htl / pt-mutex-timedlock.c
1 /* Lock a mutex with a timeout. Generic version.
2 Copyright (C) 2000-2019 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #include <pthread.h>
20 #include <assert.h>
21
22 #include <pt-internal.h>
23
24 #define LOSE do { * (int *) 0 = 0; } while (1)
25
26 /* Try to lock MUTEX, block until *ABSTIME if it is already held. As
27 a GNU extension, if TIMESPEC is NULL then wait forever. */
28 int
29 __pthread_mutex_timedlock_internal (struct __pthread_mutex *mutex,
30 const struct timespec *abstime)
31 {
32 error_t err;
33 int drain;
34 struct __pthread *self;
35 const struct __pthread_mutexattr *attr = mutex->__attr;
36
37 if (attr == __PTHREAD_ERRORCHECK_MUTEXATTR)
38 attr = &__pthread_errorcheck_mutexattr;
39 if (attr == __PTHREAD_RECURSIVE_MUTEXATTR)
40 attr = &__pthread_recursive_mutexattr;
41
42 __pthread_spin_lock (&mutex->__lock);
43 if (__pthread_spin_trylock (&mutex->__held) == 0)
44 /* Successfully acquired the lock. */
45 {
46 #ifdef ALWAYS_TRACK_MUTEX_OWNER
47 # ifndef NDEBUG
48 self = _pthread_self ();
49 if (self != NULL)
50 /* The main thread may take a lock before the library is fully
51 initialized, in particular, before the main thread has a
52 TCB. */
53 {
54 assert (mutex->__owner == NULL);
55 mutex->__owner = _pthread_self ();
56 }
57 # endif
58 #endif
59
60 if (attr != NULL)
61 switch (attr->__mutex_type)
62 {
63 case PTHREAD_MUTEX_NORMAL:
64 break;
65
66 case PTHREAD_MUTEX_RECURSIVE:
67 mutex->__locks = 1;
68 case PTHREAD_MUTEX_ERRORCHECK:
69 mutex->__owner = _pthread_self ();
70 break;
71
72 default:
73 LOSE;
74 }
75
76 __pthread_spin_unlock (&mutex->__lock);
77 return 0;
78 }
79
80 /* The lock is busy. */
81
82 self = _pthread_self ();
83 assert (self);
84
85 if (attr == NULL || attr->__mutex_type == PTHREAD_MUTEX_NORMAL)
86 {
87 #if defined(ALWAYS_TRACK_MUTEX_OWNER)
88 assert (mutex->__owner != self);
89 #endif
90 }
91 else
92 {
93 switch (attr->__mutex_type)
94 {
95 case PTHREAD_MUTEX_ERRORCHECK:
96 if (mutex->__owner == self)
97 {
98 __pthread_spin_unlock (&mutex->__lock);
99 return EDEADLK;
100 }
101 break;
102
103 case PTHREAD_MUTEX_RECURSIVE:
104 if (mutex->__owner == self)
105 {
106 mutex->__locks++;
107 __pthread_spin_unlock (&mutex->__lock);
108 return 0;
109 }
110 break;
111
112 default:
113 LOSE;
114 }
115 }
116
117 #if !defined(ALWAYS_TRACK_MUTEX_OWNER)
118 if (attr != NULL && attr->__mutex_type != PTHREAD_MUTEX_NORMAL)
119 #endif
120 assert (mutex->__owner);
121
122 if (abstime != NULL && (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000))
123 return EINVAL;
124
125 /* Add ourselves to the queue. */
126 __pthread_enqueue (&mutex->__queue, self);
127 __pthread_spin_unlock (&mutex->__lock);
128
129 /* Block the thread. */
130 if (abstime != NULL)
131 err = __pthread_timedblock (self, abstime, CLOCK_REALTIME);
132 else
133 {
134 err = 0;
135 __pthread_block (self);
136 }
137
138 __pthread_spin_lock (&mutex->__lock);
139 if (self->prevp == NULL)
140 /* Another thread removed us from the queue, which means a wakeup message
141 has been sent. It was either consumed while we were blocking, or
142 queued after we timed out and before we acquired the mutex lock, in
143 which case the message queue must be drained. */
144 drain = err ? 1 : 0;
145 else
146 {
147 /* We're still in the queue. Noone attempted to wake us up, i.e. we
148 timed out. */
149 __pthread_dequeue (self);
150 drain = 0;
151 }
152 __pthread_spin_unlock (&mutex->__lock);
153
154 if (drain)
155 __pthread_block (self);
156
157 if (err)
158 {
159 assert (err == ETIMEDOUT);
160 return err;
161 }
162
163 #if !defined(ALWAYS_TRACK_MUTEX_OWNER)
164 if (attr != NULL && attr->__mutex_type != PTHREAD_MUTEX_NORMAL)
165 #endif
166 {
167 assert (mutex->__owner == self);
168 }
169
170 if (attr != NULL)
171 switch (attr->__mutex_type)
172 {
173 case PTHREAD_MUTEX_NORMAL:
174 break;
175
176 case PTHREAD_MUTEX_RECURSIVE:
177 assert (mutex->__locks == 0);
178 mutex->__locks = 1;
179 case PTHREAD_MUTEX_ERRORCHECK:
180 mutex->__owner = self;
181 break;
182
183 default:
184 LOSE;
185 }
186
187 return 0;
188 }
189
190 int
191 pthread_mutex_timedlock (struct __pthread_mutex *mutex,
192 const struct timespec *abstime)
193 {
194 return __pthread_mutex_timedlock_internal (mutex, abstime);
195 }