1 /* System specific fork hooks. Linux version.
2 Copyright (C) 2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
29 fork_system_setup (void)
31 /* See __pthread_once. */
32 __fork_generation
+= __PTHREAD_ONCE_FORK_GEN_INCR
;
36 fork_system_setup_after_fork (void)
38 /* There is one thread running. */
41 /* Initialize thread library locks. */
42 GL (dl_stack_cache_lock
) = LLL_LOCK_INITIALIZER
;
43 __default_pthread_attr_lock
= LLL_LOCK_INITIALIZER
;
46 /* In case of a fork() call the memory allocation in the child will be
47 the same but only one thread is running. All stacks except that of
48 the one running thread are not used anymore. We have to recycle
53 struct pthread
*self
= (struct pthread
*) THREAD_SELF
;
55 /* No locking necessary. The caller is the only stack in use. But
56 we have to be aware that we might have interrupted a list
59 if (GL (dl_in_flight_stack
) != 0)
61 bool add_p
= GL (dl_in_flight_stack
) & 1;
62 list_t
*elem
= (list_t
*) (GL (dl_in_flight_stack
) & ~(uintptr_t) 1);
66 /* We always add at the beginning of the list. So in this case we
67 only need to check the beginning of these lists to see if the
68 pointers at the head of the list are inconsistent. */
71 if (GL (dl_stack_used
).next
->prev
!= &GL (dl_stack_used
))
72 l
= &GL (dl_stack_used
);
73 else if (GL (dl_stack_cache
).next
->prev
!= &GL (dl_stack_cache
))
74 l
= &GL (dl_stack_cache
);
78 assert (l
->next
->prev
== elem
);
86 /* We can simply always replay the delete operation. */
87 elem
->next
->prev
= elem
->prev
;
88 elem
->prev
->next
= elem
->next
;
91 GL (dl_in_flight_stack
) = 0;
94 /* Mark all stacks except the still running one as free. */
96 list_for_each (runp
, &GL (dl_stack_used
))
98 struct pthread
*curp
= list_entry (runp
, struct pthread
, list
);
101 /* This marks the stack as free. */
104 /* Account for the size of the stack. */
105 GL (dl_stack_cache_actsize
) += curp
->stackblock_size
;
107 if (curp
->specific_used
)
109 /* Clear the thread-specific data. */
110 memset (curp
->specific_1stblock
, '\0',
111 sizeof (curp
->specific_1stblock
));
113 curp
->specific_used
= false;
115 for (size_t cnt
= 1; cnt
< PTHREAD_KEY_1STLEVEL_SIZE
; ++cnt
)
116 if (curp
->specific
[cnt
] != NULL
)
118 memset (curp
->specific
[cnt
], '\0',
119 sizeof (curp
->specific_1stblock
));
121 /* We have allocated the block which we do not
122 free here so re-set the bit. */
123 curp
->specific_used
= true;
129 /* Add the stack of all running threads to the cache. */
130 list_splice (&GL (dl_stack_used
), &GL (dl_stack_cache
));
132 /* Remove the entry for the current thread to from the cache list
133 and add it to the list of running threads. Which of the two
134 lists is decided by the user_stack flag. */
135 list_del (&self
->list
);
137 /* Re-initialize the lists for all the threads. */
138 INIT_LIST_HEAD (&GL (dl_stack_used
));
139 INIT_LIST_HEAD (&GL (dl_stack_user
));
141 if (__glibc_unlikely (THREAD_GETMEM (self
, user_stack
)))
142 list_add (&self
->list
, &GL (dl_stack_user
));
144 list_add (&self
->list
, &GL (dl_stack_used
));