]> git.ipfire.org Git - thirdparty/glibc.git/blob - sysdeps/nptl/fork.h
3134d7ab94105ba22fb0bcc996678c5d83037d88
[thirdparty/glibc.git] / sysdeps / nptl / fork.h
1 /* System specific fork hooks. Linux version.
2 Copyright (C) 2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #ifndef _FORK_H
20 #define _FORK_H
21
22 #include <assert.h>
23 #include <ldsodefs.h>
24 #include <list.h>
25 #include <pthreadP.h>
26 #include <sysdep.h>
27
28 static inline void
29 fork_system_setup (void)
30 {
31 /* See __pthread_once. */
32 __fork_generation += __PTHREAD_ONCE_FORK_GEN_INCR;
33 }
34
35 static void
36 fork_system_setup_after_fork (void)
37 {
38 /* There is one thread running. */
39 __nptl_nthreads = 1;
40
41 /* Initialize thread library locks. */
42 GL (dl_stack_cache_lock) = LLL_LOCK_INITIALIZER;
43 __default_pthread_attr_lock = LLL_LOCK_INITIALIZER;
44 }
45
46 /* In case of a fork() call the memory allocation in the child will be
47 the same but only one thread is running. All stacks except that of
48 the one running thread are not used anymore. We have to recycle
49 them. */
50 static void
51 reclaim_stacks (void)
52 {
53 struct pthread *self = (struct pthread *) THREAD_SELF;
54
55 /* No locking necessary. The caller is the only stack in use. But
56 we have to be aware that we might have interrupted a list
57 operation. */
58
59 if (GL (dl_in_flight_stack) != 0)
60 {
61 bool add_p = GL (dl_in_flight_stack) & 1;
62 list_t *elem = (list_t *) (GL (dl_in_flight_stack) & ~(uintptr_t) 1);
63
64 if (add_p)
65 {
66 /* We always add at the beginning of the list. So in this case we
67 only need to check the beginning of these lists to see if the
68 pointers at the head of the list are inconsistent. */
69 list_t *l = NULL;
70
71 if (GL (dl_stack_used).next->prev != &GL (dl_stack_used))
72 l = &GL (dl_stack_used);
73 else if (GL (dl_stack_cache).next->prev != &GL (dl_stack_cache))
74 l = &GL (dl_stack_cache);
75
76 if (l != NULL)
77 {
78 assert (l->next->prev == elem);
79 elem->next = l->next;
80 elem->prev = l;
81 l->next = elem;
82 }
83 }
84 else
85 {
86 /* We can simply always replay the delete operation. */
87 elem->next->prev = elem->prev;
88 elem->prev->next = elem->next;
89 }
90
91 GL (dl_in_flight_stack) = 0;
92 }
93
94 /* Mark all stacks except the still running one as free. */
95 list_t *runp;
96 list_for_each (runp, &GL (dl_stack_used))
97 {
98 struct pthread *curp = list_entry (runp, struct pthread, list);
99 if (curp != self)
100 {
101 /* This marks the stack as free. */
102 curp->tid = 0;
103
104 /* Account for the size of the stack. */
105 GL (dl_stack_cache_actsize) += curp->stackblock_size;
106
107 if (curp->specific_used)
108 {
109 /* Clear the thread-specific data. */
110 memset (curp->specific_1stblock, '\0',
111 sizeof (curp->specific_1stblock));
112
113 curp->specific_used = false;
114
115 for (size_t cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
116 if (curp->specific[cnt] != NULL)
117 {
118 memset (curp->specific[cnt], '\0',
119 sizeof (curp->specific_1stblock));
120
121 /* We have allocated the block which we do not
122 free here so re-set the bit. */
123 curp->specific_used = true;
124 }
125 }
126 }
127 }
128
129 /* Add the stack of all running threads to the cache. */
130 list_splice (&GL (dl_stack_used), &GL (dl_stack_cache));
131
132 /* Remove the entry for the current thread to from the cache list
133 and add it to the list of running threads. Which of the two
134 lists is decided by the user_stack flag. */
135 list_del (&self->list);
136
137 /* Re-initialize the lists for all the threads. */
138 INIT_LIST_HEAD (&GL (dl_stack_used));
139 INIT_LIST_HEAD (&GL (dl_stack_user));
140
141 if (__glibc_unlikely (THREAD_GETMEM (self, user_stack)))
142 list_add (&self->list, &GL (dl_stack_user));
143 else
144 list_add (&self->list, &GL (dl_stack_used));
145 }
146
147
148 #endif