]>
Commit | Line | Data |
---|---|---|
b168057a | 1 | /* Copyright (C) 2003-2015 Free Software Foundation, Inc. |
76a50749 | 2 | This file is part of the GNU C Library. |
08192659 | 3 | Contributed by Jakub Jelinek <jakub@redhat.com>, 2003. |
76a50749 UD |
4 | |
5 | The GNU C Library is free software; you can redistribute it and/or | |
6 | modify it under the terms of the GNU Lesser General Public | |
7 | License as published by the Free Software Foundation; either | |
8 | version 2.1 of the License, or (at your option) any later version. | |
9 | ||
10 | The GNU C Library is distributed in the hope that it will be useful, | |
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
08192659 | 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
76a50749 UD |
13 | Lesser General Public License for more details. |
14 | ||
15 | You should have received a copy of the GNU Lesser General Public | |
59ba27a6 PE |
16 | License along with the GNU C Library; if not, see |
17 | <http://www.gnu.org/licenses/>. */ | |
76a50749 UD |
18 | |
19 | #include "pthreadP.h" | |
20 | #include <lowlevellock.h> | |
08192659 | 21 | #include <atomic.h> |
76a50749 UD |
22 | |
23 | ||
08192659 | 24 | unsigned long int __fork_generation attribute_hidden; |
76a50749 | 25 | |
76a50749 | 26 | |
08192659 RM |
27 | static void |
28 | clear_once_control (void *arg) | |
29 | { | |
30 | pthread_once_t *once_control = (pthread_once_t *) arg; | |
31 | ||
32 | /* Reset to the uninitialized state here. We don't need a stronger memory | |
33 | order because we do not need to make any other of our writes visible to | |
34 | other threads that see this value: This function will be called if we | |
35 | get interrupted (see __pthread_once), so all we need to relay to other | |
36 | threads is the state being reset again. */ | |
e37c91d4 | 37 | atomic_store_relaxed (once_control, 0); |
08192659 RM |
38 | lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE); |
39 | } | |
76a50749 | 40 | |
08192659 RM |
41 | |
42 | /* This is similar to a lock implementation, but we distinguish between three | |
63668b70 TR |
43 | states: not yet initialized (0), initialization in progress |
44 | (__fork_generation | __PTHREAD_ONCE_INPROGRESS), and initialization | |
45 | finished (__PTHREAD_ONCE_DONE); __fork_generation does not use the bits | |
46 | that are used for __PTHREAD_ONCE_INPROGRESS and __PTHREAD_ONCE_DONE (which | |
47 | is what __PTHREAD_ONCE_FORK_GEN_INCR is used for). If in the first state, | |
08192659 RM |
48 | threads will try to run the initialization by moving to the second state; |
49 | the first thread to do so via a CAS on once_control runs init_routine, | |
50 | other threads block. | |
51 | When forking the process, some threads can be interrupted during the second | |
52 | state; they won't be present in the forked child, so we need to restart | |
53 | initialization in the child. To distinguish an in-progress initialization | |
54 | from an interrupted initialization (in which case we need to reclaim the | |
55 | lock), we look at the fork generation that's part of the second state: We | |
56 | can reclaim iff it differs from the current fork generation. | |
57 | XXX: This algorithm has an ABA issue on the fork generation: If an | |
58 | initialization is interrupted, we then fork 2^30 times (30 bits of | |
59 | once_control are used for the fork generation), and try to initialize | |
60 | again, we can deadlock because we can't distinguish the in-progress and | |
f50277c1 TR |
61 | interrupted cases anymore. |
62 | XXX: We split out this slow path because current compilers do not generate | |
63 | as efficient code when the fast path in __pthread_once below is not in a | |
64 | separate function. */ | |
65 | static int | |
66 | __attribute__ ((noinline)) | |
67 | __pthread_once_slow (pthread_once_t *once_control, void (*init_routine) (void)) | |
76a50749 | 68 | { |
08192659 | 69 | while (1) |
76a50749 | 70 | { |
e37c91d4 | 71 | int val, newval; |
76a50749 | 72 | |
08192659 | 73 | /* We need acquire memory order for this load because if the value |
63668b70 | 74 | signals that initialization has finished, we need to see any |
08192659 | 75 | data modifications done during initialization. */ |
e37c91d4 | 76 | val = atomic_load_acquire (once_control); |
08192659 | 77 | do |
76a50749 | 78 | { |
08192659 | 79 | /* Check if the initialization has already been done. */ |
63668b70 | 80 | if (__glibc_likely ((val & __PTHREAD_ONCE_DONE) != 0)) |
08192659 RM |
81 | return 0; |
82 | ||
08192659 RM |
83 | /* We try to set the state to in-progress and having the current |
84 | fork generation. We don't need atomic accesses for the fork | |
85 | generation because it's immutable in a particular process, and | |
86 | forked child processes start with a single thread that modified | |
87 | the generation. */ | |
63668b70 | 88 | newval = __fork_generation | __PTHREAD_ONCE_INPROGRESS; |
08192659 RM |
89 | /* We need acquire memory order here for the same reason as for the |
90 | load from once_control above. */ | |
08192659 | 91 | } |
e37c91d4 TR |
92 | while (__glibc_unlikely (!atomic_compare_exchange_weak_acquire ( |
93 | once_control, &val, newval))); | |
76a50749 | 94 | |
08192659 | 95 | /* Check if another thread already runs the initializer. */ |
e37c91d4 | 96 | if ((val & __PTHREAD_ONCE_INPROGRESS) != 0) |
08192659 RM |
97 | { |
98 | /* Check whether the initializer execution was interrupted by a | |
63668b70 TR |
99 | fork. We know that for both values, __PTHREAD_ONCE_INPROGRESS |
100 | is set and __PTHREAD_ONCE_DONE is not. */ | |
e37c91d4 | 101 | if (val == newval) |
08192659 RM |
102 | { |
103 | /* Same generation, some other thread was faster. Wait. */ | |
104 | lll_futex_wait (once_control, newval, LLL_PRIVATE); | |
105 | continue; | |
106 | } | |
76a50749 UD |
107 | } |
108 | ||
08192659 RM |
109 | /* This thread is the first here. Do the initialization. |
110 | Register a cleanup handler so that in case the thread gets | |
111 | interrupted the initialization can be restarted. */ | |
112 | pthread_cleanup_push (clear_once_control, once_control); | |
113 | ||
114 | init_routine (); | |
115 | ||
116 | pthread_cleanup_pop (0); | |
117 | ||
118 | ||
119 | /* Mark *once_control as having finished the initialization. We need | |
120 | release memory order here because we need to synchronize with other | |
121 | threads that want to use the initialized data. */ | |
e37c91d4 | 122 | atomic_store_release (once_control, __PTHREAD_ONCE_DONE); |
08192659 RM |
123 | |
124 | /* Wake up all other threads. */ | |
125 | lll_futex_wake (once_control, INT_MAX, LLL_PRIVATE); | |
126 | break; | |
76a50749 UD |
127 | } |
128 | ||
129 | return 0; | |
130 | } | |
f50277c1 TR |
131 | |
132 | int | |
133 | __pthread_once (pthread_once_t *once_control, void (*init_routine) (void)) | |
134 | { | |
135 | /* Fast path. See __pthread_once_slow. */ | |
136 | int val; | |
e37c91d4 | 137 | val = atomic_load_acquire (once_control); |
f50277c1 TR |
138 | if (__glibc_likely ((val & __PTHREAD_ONCE_DONE) != 0)) |
139 | return 0; | |
140 | else | |
141 | return __pthread_once_slow (once_control, init_routine); | |
142 | } | |
08192659 | 143 | weak_alias (__pthread_once, pthread_once) |
4d17e683 | 144 | hidden_def (__pthread_once) |