]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/glibc/glibc-rh771342.patch
lcdproc: Update to 0.5.7
[ipfire-2.x.git] / src / patches / glibc / glibc-rh771342.patch
CommitLineData
12788f63
MT
12011-06-30 Ulrich Drepper <drepper@gmail.com>
2
3 * nptl-init.c (__nptl_set_robust): New function.
4 (pthread_functions): Add reference.
5 * npthreadP.h: Declare __nptl_set_robust.
6 * sysdeps/pthread/pthread-functions.h (pthread_functions): Add
7 ptr_set_robust member.
8 * sysdeps/unix/sysv/linux/fork.c: Call set_robust_list syscall in
9 child if threads are used.
10
11diff -Nrup a/nptl/nptl-init.c b/nptl/nptl-init.c
12--- a/nptl/nptl-init.c 2011-12-20 00:29:54.645538691 -0700
13+++ b/nptl/nptl-init.c 2012-01-03 10:18:38.977513783 -0700
14@@ -69,6 +69,13 @@ extern void __libc_setup_tls (size_t tcb
15 #endif
16
17 #ifdef SHARED
18+static
19+#else
20+extern
21+#endif
22+void __nptl_set_robust (struct pthread *);
23+
24+#ifdef SHARED
25 static void nptl_freeres (void);
26
27
28@@ -131,13 +138,25 @@ static const struct pthread_functions pt
29 .ptr__nptl_deallocate_tsd = __nptl_deallocate_tsd,
30 .ptr__nptl_setxid = __nptl_setxid,
31 /* For now only the stack cache needs to be freed. */
32- .ptr_freeres = nptl_freeres
33+ .ptr_freeres = nptl_freeres,
34+ .ptr_set_robust = __nptl_set_robust
35 };
36 # define ptr_pthread_functions &pthread_functions
37 #else
38 # define ptr_pthread_functions NULL
39 #endif
40
41+#ifdef SHARED
42+static
43+#endif
44+void
45+__nptl_set_robust (struct pthread *self)
46+{
47+ INTERNAL_SYSCALL_DECL (err);
48+ INTERNAL_SYSCALL (set_robust_list, err, 2, &self->robust_head,
49+ sizeof (struct robust_list_head));
50+}
51+
52
53 #ifdef SHARED
54 /* This function is called indirectly from the freeres code in libc. */
55diff -Nrup a/nptl/pthreadP.h b/nptl/pthreadP.h
56--- a/nptl/pthreadP.h 2010-05-04 05:27:23.000000000 -0600
57+++ b/nptl/pthreadP.h 2012-01-03 10:12:35.599269269 -0700
58@@ -555,17 +555,20 @@ extern void __pthread_cleanup_pop_restor
59
60 /* Old cleanup interfaces, still used in libc.so. */
61 extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer,
62- void (*routine) (void *), void *arg);
63+ void (*routine) (void *), void *arg);
64 extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer,
65- int execute);
66+ int execute);
67 extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer,
68- void (*routine) (void *), void *arg);
69+ void (*routine) (void *), void *arg);
70 extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer,
71- int execute);
72+ int execute);
73
74 extern void __nptl_deallocate_tsd (void) attribute_hidden;
75
76 extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden;
77+#ifndef SHARED
78+extern void __nptl_set_robust (struct pthread *self);
79+#endif
80
81 extern void __free_stacks (size_t limit) attribute_hidden;
82
83diff -Nrup a/nptl/sysdeps/pthread/pthread-functions.h b/nptl/sysdeps/pthread/pthread-functions.h
84--- a/nptl/sysdeps/pthread/pthread-functions.h 2010-05-04 05:27:23.000000000 -0600
85+++ b/nptl/sysdeps/pthread/pthread-functions.h 2012-01-03 10:12:35.639269301 -0700
86@@ -97,6 +97,7 @@ struct pthread_functions
87 void (*ptr__nptl_deallocate_tsd) (void);
88 int (*ptr__nptl_setxid) (struct xid_command *);
89 void (*ptr_freeres) (void);
90+ void (*ptr_set_robust) (struct pthread *);
91 };
92
93 /* Variable in libc.so. */
94diff -Nrup a/nptl/sysdeps/unix/sysv/linux/fork.c b/nptl/sysdeps/unix/sysv/linux/fork.c
95--- a/nptl/sysdeps/unix/sysv/linux/fork.c 2010-05-04 05:27:23.000000000 -0600
96+++ b/nptl/sysdeps/unix/sysv/linux/fork.c 2012-01-03 10:12:35.649269309 -0700
97@@ -29,6 +29,7 @@
98 #include <ldsodefs.h>
99 #include <bits/stdio-lock.h>
100 #include <atomic.h>
101+#include <pthreadP.h>
102
103
104 unsigned long int *__fork_generation_pointer;
105@@ -86,8 +87,8 @@ __libc_fork (void)
106 just go away. The unloading code works in the order of the
107 list.
108
109- While executing the registered handlers we are building a
110- list of all the entries so that we can go backward later on. */
111+ While executing the registered handlers we are building a
112+ list of all the entries so that we can go backward later on. */
113 while (1)
114 {
115 /* Execute the handler if there is one. */
116@@ -154,6 +155,24 @@ __libc_fork (void)
117 GL(dl_cpuclock_offset) = now;
118 #endif
119
120+#ifdef __NR_set_robust_list
121+ /* Initialize the robust mutex list which has been reset during
122+ the fork. We do not check for errors since if it fails here
123+ it failed at process start as well and noone could have used
124+ robust mutexes. We also do not have to set
125+ self->robust_head.futex_offset since we inherit the correct
126+ value from the parent. */
127+# ifdef SHARED
128+ if (__libc_pthread_functions.ptr_set_robust != NULL)
129+ PTHFCT_CALL (ptr_set_robust, (self));
130+# else
131+ extern __typeof (__nptl_set_robust) __nptl_set_robust
132+ __attribute__((weak));
133+ if (__builtin_expect (__nptl_set_robust != NULL, 0))
134+ __nptl_set_robust (self);
135+# endif
136+#endif
137+
138 /* Reset the file list. These are recursive mutexes. */
139 fresetlockfiles ();
140
141@@ -170,10 +189,10 @@ __libc_fork (void)
142 allp->handler->child_handler ();
143
144 /* Note that we do not have to wake any possible waiter.
145- This is the only thread in the new process. The count
146- may have been bumped up by other threads doing a fork.
147- We reset it to 1, to avoid waiting for non-existing
148- thread(s) to release the count. */
149+ This is the only thread in the new process. The count
150+ may have been bumped up by other threads doing a fork.
151+ We reset it to 1, to avoid waiting for non-existing
152+ thread(s) to release the count. */
153 allp->handler->refcntr = 1;
154
155 /* XXX We could at this point look through the object pool