]> git.ipfire.org Git - thirdparty/glibc.git/blob - linuxthreads/mutex.c
(CFLAGS-tst-align.c): Add -mpreferred-stack-boundary=4.
[thirdparty/glibc.git] / linuxthreads / mutex.c
1 /* Linuxthreads - a simple clone()-based implementation of Posix */
2 /* threads for Linux. */
3 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
4 /* */
5 /* This program is free software; you can redistribute it and/or */
6 /* modify it under the terms of the GNU Library General Public License */
7 /* as published by the Free Software Foundation; either version 2 */
8 /* of the License, or (at your option) any later version. */
9 /* */
10 /* This program is distributed in the hope that it will be useful, */
11 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
12 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
13 /* GNU Library General Public License for more details. */
14
15 /* Mutexes */
16
17 #include <bits/libc-lock.h>
18 #include <errno.h>
19 #include <sched.h>
20 #include <stddef.h>
21 #include <limits.h>
22 #include "pthread.h"
23 #include "internals.h"
24 #include "spinlock.h"
25 #include "queue.h"
26 #include "restart.h"
27
28 int __pthread_mutex_init(pthread_mutex_t * mutex,
29 const pthread_mutexattr_t * mutex_attr)
30 {
31 __pthread_init_lock(&mutex->__m_lock);
32 mutex->__m_kind =
33 mutex_attr == NULL ? PTHREAD_MUTEX_TIMED_NP : mutex_attr->__mutexkind;
34 mutex->__m_count = 0;
35 mutex->__m_owner = NULL;
36 return 0;
37 }
38 strong_alias (__pthread_mutex_init, pthread_mutex_init)
39 hidden_def (__pthread_mutex_init)
40
41 int __pthread_mutex_destroy(pthread_mutex_t * mutex)
42 {
43 switch (mutex->__m_kind) {
44 case PTHREAD_MUTEX_ADAPTIVE_NP:
45 case PTHREAD_MUTEX_RECURSIVE_NP:
46 if ((mutex->__m_lock.__status & 1) != 0)
47 return EBUSY;
48 return 0;
49 case PTHREAD_MUTEX_ERRORCHECK_NP:
50 case PTHREAD_MUTEX_TIMED_NP:
51 if (mutex->__m_lock.__status != 0)
52 return EBUSY;
53 return 0;
54 default:
55 return EINVAL;
56 }
57 }
58 strong_alias (__pthread_mutex_destroy, pthread_mutex_destroy)
59 hidden_def (__pthread_mutex_destroy)
60
61 int __pthread_mutex_trylock(pthread_mutex_t * mutex)
62 {
63 pthread_descr self;
64 int retcode;
65
66 switch(mutex->__m_kind) {
67 case PTHREAD_MUTEX_ADAPTIVE_NP:
68 retcode = __pthread_trylock(&mutex->__m_lock);
69 return retcode;
70 case PTHREAD_MUTEX_RECURSIVE_NP:
71 self = thread_self();
72 if (mutex->__m_owner == self) {
73 mutex->__m_count++;
74 return 0;
75 }
76 retcode = __pthread_trylock(&mutex->__m_lock);
77 if (retcode == 0) {
78 mutex->__m_owner = self;
79 mutex->__m_count = 0;
80 }
81 return retcode;
82 case PTHREAD_MUTEX_ERRORCHECK_NP:
83 retcode = __pthread_alt_trylock(&mutex->__m_lock);
84 if (retcode == 0) {
85 mutex->__m_owner = thread_self();
86 }
87 return retcode;
88 case PTHREAD_MUTEX_TIMED_NP:
89 retcode = __pthread_alt_trylock(&mutex->__m_lock);
90 return retcode;
91 default:
92 return EINVAL;
93 }
94 }
95 strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)
96 hidden_def (__pthread_mutex_trylock)
97
98 int __pthread_mutex_lock(pthread_mutex_t * mutex)
99 {
100 pthread_descr self;
101
102 switch(mutex->__m_kind) {
103 case PTHREAD_MUTEX_ADAPTIVE_NP:
104 __pthread_lock(&mutex->__m_lock, NULL);
105 return 0;
106 case PTHREAD_MUTEX_RECURSIVE_NP:
107 self = thread_self();
108 if (mutex->__m_owner == self) {
109 mutex->__m_count++;
110 return 0;
111 }
112 __pthread_lock(&mutex->__m_lock, self);
113 mutex->__m_owner = self;
114 mutex->__m_count = 0;
115 return 0;
116 case PTHREAD_MUTEX_ERRORCHECK_NP:
117 self = thread_self();
118 if (mutex->__m_owner == self) return EDEADLK;
119 __pthread_alt_lock(&mutex->__m_lock, self);
120 mutex->__m_owner = self;
121 return 0;
122 case PTHREAD_MUTEX_TIMED_NP:
123 __pthread_alt_lock(&mutex->__m_lock, NULL);
124 return 0;
125 default:
126 return EINVAL;
127 }
128 }
129 strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
130 hidden_def (__pthread_mutex_lock)
131
132 int __pthread_mutex_timedlock (pthread_mutex_t *mutex,
133 const struct timespec *abstime)
134 {
135 pthread_descr self;
136 int res;
137
138 if (__builtin_expect (abstime->tv_nsec, 0) < 0
139 || __builtin_expect (abstime->tv_nsec, 0) >= 1000000000)
140 return EINVAL;
141
142 switch(mutex->__m_kind) {
143 case PTHREAD_MUTEX_ADAPTIVE_NP:
144 __pthread_lock(&mutex->__m_lock, NULL);
145 return 0;
146 case PTHREAD_MUTEX_RECURSIVE_NP:
147 self = thread_self();
148 if (mutex->__m_owner == self) {
149 mutex->__m_count++;
150 return 0;
151 }
152 __pthread_lock(&mutex->__m_lock, self);
153 mutex->__m_owner = self;
154 mutex->__m_count = 0;
155 return 0;
156 case PTHREAD_MUTEX_ERRORCHECK_NP:
157 self = thread_self();
158 if (mutex->__m_owner == self) return EDEADLK;
159 res = __pthread_alt_timedlock(&mutex->__m_lock, self, abstime);
160 if (res != 0)
161 {
162 mutex->__m_owner = self;
163 return 0;
164 }
165 return ETIMEDOUT;
166 case PTHREAD_MUTEX_TIMED_NP:
167 /* Only this type supports timed out lock. */
168 return (__pthread_alt_timedlock(&mutex->__m_lock, NULL, abstime)
169 ? 0 : ETIMEDOUT);
170 default:
171 return EINVAL;
172 }
173 }
174 strong_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)
175
176 int __pthread_mutex_unlock(pthread_mutex_t * mutex)
177 {
178 switch (mutex->__m_kind) {
179 case PTHREAD_MUTEX_ADAPTIVE_NP:
180 __pthread_unlock(&mutex->__m_lock);
181 return 0;
182 case PTHREAD_MUTEX_RECURSIVE_NP:
183 if (mutex->__m_owner != thread_self())
184 return EPERM;
185 if (mutex->__m_count > 0) {
186 mutex->__m_count--;
187 return 0;
188 }
189 mutex->__m_owner = NULL;
190 __pthread_unlock(&mutex->__m_lock);
191 return 0;
192 case PTHREAD_MUTEX_ERRORCHECK_NP:
193 if (mutex->__m_owner != thread_self() || mutex->__m_lock.__status == 0)
194 return EPERM;
195 mutex->__m_owner = NULL;
196 __pthread_alt_unlock(&mutex->__m_lock);
197 return 0;
198 case PTHREAD_MUTEX_TIMED_NP:
199 __pthread_alt_unlock(&mutex->__m_lock);
200 return 0;
201 default:
202 return EINVAL;
203 }
204 }
205 strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
206 hidden_def (__pthread_mutex_unlock)
207
208 int __pthread_mutexattr_init(pthread_mutexattr_t *attr)
209 {
210 attr->__mutexkind = PTHREAD_MUTEX_TIMED_NP;
211 return 0;
212 }
213 strong_alias (__pthread_mutexattr_init, pthread_mutexattr_init)
214
215 int __pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
216 {
217 return 0;
218 }
219 strong_alias (__pthread_mutexattr_destroy, pthread_mutexattr_destroy)
220
221 int __pthread_mutexattr_settype(pthread_mutexattr_t *attr, int kind)
222 {
223 if (kind != PTHREAD_MUTEX_ADAPTIVE_NP
224 && kind != PTHREAD_MUTEX_RECURSIVE_NP
225 && kind != PTHREAD_MUTEX_ERRORCHECK_NP
226 && kind != PTHREAD_MUTEX_TIMED_NP)
227 return EINVAL;
228 attr->__mutexkind = kind;
229 return 0;
230 }
231 weak_alias (__pthread_mutexattr_settype, pthread_mutexattr_settype)
232 strong_alias ( __pthread_mutexattr_settype, __pthread_mutexattr_setkind_np)
233 weak_alias (__pthread_mutexattr_setkind_np, pthread_mutexattr_setkind_np)
234
235 int __pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *kind)
236 {
237 *kind = attr->__mutexkind;
238 return 0;
239 }
240 weak_alias (__pthread_mutexattr_gettype, pthread_mutexattr_gettype)
241 strong_alias (__pthread_mutexattr_gettype, __pthread_mutexattr_getkind_np)
242 weak_alias (__pthread_mutexattr_getkind_np, pthread_mutexattr_getkind_np)
243
244 int __pthread_mutexattr_getpshared (const pthread_mutexattr_t *attr,
245 int *pshared)
246 {
247 *pshared = PTHREAD_PROCESS_PRIVATE;
248 return 0;
249 }
250 weak_alias (__pthread_mutexattr_getpshared, pthread_mutexattr_getpshared)
251
252 int __pthread_mutexattr_setpshared (pthread_mutexattr_t *attr, int pshared)
253 {
254 if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED)
255 return EINVAL;
256
257 /* For now it is not possible to shared a conditional variable. */
258 if (pshared != PTHREAD_PROCESS_PRIVATE)
259 return ENOSYS;
260
261 return 0;
262 }
263 weak_alias (__pthread_mutexattr_setpshared, pthread_mutexattr_setpshared)
264
265 /* Once-only execution */
266
267 static pthread_mutex_t once_masterlock = PTHREAD_MUTEX_INITIALIZER;
268 static pthread_cond_t once_finished = PTHREAD_COND_INITIALIZER;
269 static int fork_generation = 0; /* Child process increments this after fork. */
270
271 enum { NEVER = 0, IN_PROGRESS = 1, DONE = 2 };
272
273 /* If a thread is canceled while calling the init_routine out of
274 pthread once, this handler will reset the once_control variable
275 to the NEVER state. */
276
277 static void pthread_once_cancelhandler(void *arg)
278 {
279 pthread_once_t *once_control = arg;
280
281 pthread_mutex_lock(&once_masterlock);
282 *once_control = NEVER;
283 pthread_mutex_unlock(&once_masterlock);
284 pthread_cond_broadcast(&once_finished);
285 }
286
287 int __pthread_once(pthread_once_t * once_control, void (*init_routine)(void))
288 {
289 /* flag for doing the condition broadcast outside of mutex */
290 int state_changed;
291
292 /* Test without locking first for speed */
293 if (*once_control == DONE) {
294 READ_MEMORY_BARRIER();
295 return 0;
296 }
297 /* Lock and test again */
298
299 state_changed = 0;
300
301 pthread_mutex_lock(&once_masterlock);
302
303 /* If this object was left in an IN_PROGRESS state in a parent
304 process (indicated by stale generation field), reset it to NEVER. */
305 if ((*once_control & 3) == IN_PROGRESS && (*once_control & ~3) != fork_generation)
306 *once_control = NEVER;
307
308 /* If init_routine is being called from another routine, wait until
309 it completes. */
310 while ((*once_control & 3) == IN_PROGRESS) {
311 pthread_cond_wait(&once_finished, &once_masterlock);
312 }
313 /* Here *once_control is stable and either NEVER or DONE. */
314 if (*once_control == NEVER) {
315 *once_control = IN_PROGRESS | fork_generation;
316 pthread_mutex_unlock(&once_masterlock);
317 pthread_cleanup_push(pthread_once_cancelhandler, once_control);
318 init_routine();
319 pthread_cleanup_pop(0);
320 pthread_mutex_lock(&once_masterlock);
321 WRITE_MEMORY_BARRIER();
322 *once_control = DONE;
323 state_changed = 1;
324 }
325 pthread_mutex_unlock(&once_masterlock);
326
327 if (state_changed)
328 pthread_cond_broadcast(&once_finished);
329
330 return 0;
331 }
332 strong_alias (__pthread_once, pthread_once)
333
334 /*
335 * Handle the state of the pthread_once mechanism across forks. The
336 * once_masterlock is acquired in the parent process prior to a fork to ensure
337 * that no thread is in the critical region protected by the lock. After the
338 * fork, the lock is released. In the child, the lock and the condition
339 * variable are simply reset. The child also increments its generation
340 * counter which lets pthread_once calls detect stale IN_PROGRESS states
341 * and reset them back to NEVER.
342 */
343
344 void __pthread_once_fork_prepare(void)
345 {
346 pthread_mutex_lock(&once_masterlock);
347 }
348
349 void __pthread_once_fork_parent(void)
350 {
351 pthread_mutex_unlock(&once_masterlock);
352 }
353
354 void __pthread_once_fork_child(void)
355 {
356 pthread_mutex_init(&once_masterlock, NULL);
357 pthread_cond_init(&once_finished, NULL);
358 if (fork_generation <= INT_MAX - 4)
359 fork_generation += 4; /* leave least significant two bits zero */
360 else
361 fork_generation = 0;
362 }