]> git.ipfire.org Git - thirdparty/gcc.git/blob - libmudflap/mf-hooks3.c
Merge tree-ssa-20020619-branch into mainline.
[thirdparty/gcc.git] / libmudflap / mf-hooks3.c
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
21
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
26
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 02111-1307, USA. */
31
32
33 #include "config.h"
34
35 #ifndef HAVE_SOCKLEN_T
36 #define socklen_t int
37 #endif
38
39 /* These attempt to coax various unix flavours to declare all our
40 needed tidbits in the system headers. */
41 #if !defined(__FreeBSD__) && !defined(__APPLE__)
42 #define _POSIX_SOURCE
43 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
44 #define _GNU_SOURCE
45 #define _XOPEN_SOURCE
46 #define _BSD_TYPES
47 #define __EXTENSIONS__
48 #define _ALL_SOURCE
49 #define _LARGE_FILE_API
50 #define _XOPEN_SOURCE_EXTENDED 1
51
52 #include <string.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 #include <sys/types.h>
56 #include <sys/mman.h>
57 #include <unistd.h>
58 #include <assert.h>
59 #include <errno.h>
60 #include <limits.h>
61 #include <sched.h>
62
63 #include "mf-runtime.h"
64 #include "mf-impl.h"
65
66 #ifdef _MUDFLAP
67 #error "Do not compile this file with -fmudflap!"
68 #endif
69
70
71 /* Multithreading support hooks. */
72
73
74 #ifdef WRAP_pthreadstuff
75
76
77 #ifndef LIBMUDFLAPTH
78 #error "pthreadstuff is to be included only in libmudflapth"
79 #endif
80
81
82
83 /* Describe a thread (dead or alive). */
84 struct pthread_info
85 {
86 short used_p; /* Is this slot in use? */
87 short dead_p; /* Is this thread dead? */
88 pthread_t self; /* The thread id. */
89
90 /* If libmudflapth allocated the stack, store its base/size. */
91 void *stack;
92 size_t stack_size;
93
94 int *thread_errno;
95 enum __mf_state_enum state;
96 };
97
98
99 /* Describe the startup information for a new user thread. */
100 struct pthread_start_info
101 {
102 /* The user's thread entry point and argument. */
103 void * (*user_fn)(void *);
104 void *user_arg;
105
106 /* Set by user thread when this startup struct may be disposed of. */
107 struct pthread_info *thread_info;
108 };
109
110
111
112
113 /* To avoid dynamic memory allocation, use static array to store these
114 thread description structs. The second (_idx) array is used as a
115 simple caching hash table, mapping PTHREAD_HASH(thread) to its
116 index in __mf_pthread_info[]. */
117
118 #define LIBMUDFLAPTH_THREADS_MAX 1024
119 static struct pthread_info __mf_pthread_info[LIBMUDFLAPTH_THREADS_MAX];
120 static unsigned __mf_pthread_info_idx[LIBMUDFLAPTH_THREADS_MAX];
121 #define PTHREAD_HASH(p) ((unsigned) (p) % LIBMUDFLAPTH_THREADS_MAX)
122
123
124 /* Find any old empty entry in __mf_pthread_info; mark it used and
125 return it. Return NULL if there are no more available slots. */
126 struct pthread_info*
127 __mf_allocate_blank_threadinfo (unsigned* idx)
128 {
129 static unsigned probe = LIBMUDFLAPTH_THREADS_MAX-1;
130 unsigned probe_at_start = probe;
131 static pthread_mutex_t mutex =
132 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
133 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
134 #else
135 PTHREAD_MUTEX_INITIALIZER;
136 #endif
137 int rc;
138
139 rc = pthread_mutex_lock (& mutex);
140 assert (rc == 0);
141
142 /* Look for a blank spot starting one past the last one we found. */
143 do
144 {
145 probe = (probe + 1) % LIBMUDFLAPTH_THREADS_MAX;
146 struct pthread_info* pi = & __mf_pthread_info [probe];
147 if (! pi->used_p)
148 {
149 /* memset (pi, 0, sizeof (*pi)); */
150 pi->used_p = 1;
151 if (idx != NULL) *idx = probe;
152 /* VERBOSE_TRACE ("allocated threadinfo slot %u\n", probe); */
153 rc = pthread_mutex_unlock (& mutex);
154 assert (rc == 0);
155 return pi;
156 }
157 }
158 while (probe != probe_at_start);
159
160 rc = pthread_mutex_unlock (& mutex);
161 assert (rc == 0);
162 return NULL;
163 }
164
165
166 /* Find and return the pthread_info struct for the current thread.
167 There might already be one in __mf_pthread_info for this thread, in
168 which case return it. There may not be one (if this is a main
169 thread, an auxiliary -lpthread manager, or an actual user thread
170 making an early call into libmudflap. In these cases, create a new
171 entry. If not it's not the main thread, put it into reentrant
172 initial state.
173 */
174 static struct pthread_info*
175 __mf_find_threadinfo ()
176 {
177 pthread_t it = pthread_self ();
178 unsigned *hash = & __mf_pthread_info_idx [PTHREAD_HASH (it)];
179 struct pthread_info *result = NULL;
180 static pthread_t last;
181 static int main_thread_seen_p;
182
183 /* Check out the lookup cache; failing that, do a linear search
184 around the table. */
185 {
186 struct pthread_info* pi = & __mf_pthread_info [*hash];
187 unsigned i;
188
189 if (pi->used_p && pi->self == it)
190 result = pi;
191 else for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
192 {
193 struct pthread_info* pi2 = & __mf_pthread_info [i];
194 if (pi2->used_p && pi2->self == it)
195 {
196 *hash = i;
197 result = pi2;
198 break;
199 }
200 }
201 }
202
203 if (result == NULL)
204 {
205 /* Create a __mf_pthread_info record for the main thread. It's
206 different from the auto-recognized worker bees because for
207 example we can assume that it's a fully stack/errno-equipped
208 thread. */
209
210 /* This must be the main thread, until now unseen in libmudflap. */
211 unsigned *hash = & __mf_pthread_info_idx [PTHREAD_HASH (it)];
212 struct pthread_info* pi = __mf_allocate_blank_threadinfo (hash);
213 assert (pi != NULL);
214 assert (pi->used_p);
215 result = pi;
216 result->self = it;
217
218 if (! main_thread_seen_p)
219 {
220 result->state = active;
221 /* NB: leave result->thread_errno unset, as main thread's errno
222 has already been registered in __mf_init. */
223 /* NB: leave stack-related fields unset, to avoid
224 deallocation. */
225 main_thread_seen_p = 1;
226 VERBOSE_TRACE ("identified self as main thread\n");
227 }
228 else
229 {
230 result->state = reentrant;
231 /* NB: leave result->thread_errno unset, as worker thread's
232 errno is unlikely to be used, and user threads fill them
233 in during __mf_pthread_spawn(). */
234 /* NB: leave stack-related fields unset, leaving pthread_create
235 to fill them in for user threads, leaving them empty for
236 other threads. */
237 VERBOSE_TRACE ("identified self as new aux or user thread\n");
238 }
239 }
240
241 if (last != it)
242 {
243 VERBOSE_TRACE ("found threadinfo for %u, slot %u\n",
244 (unsigned) it,
245 (unsigned) *hash);
246 last = it;
247 }
248
249 assert (result != NULL);
250 assert (result->self == it);
251
252 return result;
253 }
254
255
256
257 /* Return a pointer to the per-thread __mf_state variable. */
258 enum __mf_state_enum *
259 __mf_state_perthread ()
260 {
261 assert (! __mf_starting_p);
262 return & (__mf_find_threadinfo()->state);
263 }
264
265
266 static void
267 __mf_pthread_cleanup (void *arg)
268 {
269 struct pthread_info *pi = arg;
270
271 /* XXX: This unregistration is not safe on platforms where distinct
272 threads share errno (or at least its virtual address). */
273 if (pi->thread_errno != NULL)
274 __mf_unregister (pi->thread_errno, sizeof (int));
275
276 /* XXX: Only detached threads should designate themselves as dead
277 here. Non-detached threads are marked dead after their
278 personalized pthread_join() call. */
279 pi->state = reentrant;
280 pi->dead_p = 1;
281
282 VERBOSE_TRACE ("thread pi %p exiting\n", pi);
283 }
284
285
286 static void *
287 __mf_pthread_spawner (void *arg)
288 {
289 struct pthread_info *pi = __mf_find_threadinfo ();
290 void *result = NULL;
291
292 /* Turn off reentrancy indications. */
293 assert (pi->state == reentrant);
294 pi->state = active;
295
296 VERBOSE_TRACE ("new user thread\n");
297
298 if (__mf_opts.heur_std_data)
299 {
300 pi->thread_errno = & errno;
301 __mf_register (pi->thread_errno, sizeof (int),
302 __MF_TYPE_GUESS, "errno area (thread)");
303 /* NB: we could use __MF_TYPE_STATIC above, but we guess that
304 the thread errno is coming out of some dynamically allocated
305 pool that we already know of as __MF_TYPE_HEAP. */
306 }
307
308 /* We considered using pthread_key_t objects instead of these
309 cleanup stacks, but they were less cooperative with the
310 interposed malloc hooks in libmudflap. */
311 pthread_cleanup_push (& __mf_pthread_cleanup, pi);
312
313 /* Call user thread */
314 {
315 /* Extract given entry point and argument. */
316 struct pthread_start_info *psi = arg;
317 void * (*user_fn)(void *) = psi->user_fn;
318 void *user_arg = psi->user_arg;
319
320 /* Signal the main thread to resume. */
321 psi->thread_info = pi;
322
323 result = (*user_fn)(user_arg);
324 }
325
326 pthread_cleanup_pop (1 /* execute */);
327
328 /* NB: there is a slight race here. The pthread_info field will now
329 say this thread is dead, but it may still be running .. right
330 here. We try to check for this possibility using the
331 pthread_kill test below. */
332
333 return result;
334 }
335
336
337 #if PIC
338 /* A special bootstrap variant. */
339 int
340 __mf_0fn_pthread_create (pthread_t *thr, const pthread_attr_t *attr,
341 void * (*start) (void *), void *arg)
342 {
343 return -1;
344 }
345 #endif
346
347
348 #undef pthread_create
349 WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
350 void * (*start) (void *), void *arg)
351 {
352 DECLARE(int, munmap, void *p, size_t l);
353 DECLARE(void *, mmap, void *p, size_t l, int prot, int flags, int fd, off_t of);
354 DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
355 void * (*start) (void *), void *arg);
356 int result;
357 pthread_attr_t override_attr;
358 void *override_stack;
359 size_t override_stacksize;
360 unsigned i;
361
362 TRACE ("pthread_create\n");
363
364 /* Garbage-collect dead threads' stacks. */
365 LOCKTH ();
366 for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
367 {
368 struct pthread_info *pi = & __mf_pthread_info [i];
369 if (! pi->used_p)
370 continue;
371 if (! pi->dead_p)
372 continue;
373
374 /* VERBOSE_TRACE ("thread %u pi %p stack cleanup deferred (%u)\n",
375 (unsigned) pi->self, pi, pi->dead_p); */
376
377 /* Delay actual deallocation by a few cycles, try to discourage the
378 race mentioned at the end of __mf_pthread_spawner(). */
379 if (pi->dead_p)
380 pi->dead_p ++;
381 if (pi->dead_p >= 10 /* XXX */)
382 {
383 if (pi->stack)
384 CALL_REAL (munmap, pi->stack, pi->stack_size);
385
386 VERBOSE_TRACE ("slot %u freed, stack %p\n", i, pi->stack);
387 memset (pi, 0, sizeof (*pi));
388
389 /* One round of garbage collection is enough. */
390 break;
391 }
392 }
393 UNLOCKTH ();
394
395 /* Let's allocate a stack for this thread, if one is not already
396 supplied by the caller. We don't want to let e.g. the
397 linuxthreads manager thread do this allocation. */
398 if (attr != NULL)
399 override_attr = *attr;
400 else
401 pthread_attr_init (& override_attr);
402
403 /* Get supplied attributes, if any. */
404 /* XXX: consider using POSIX2K attr_getstack() */
405 if (pthread_attr_getstackaddr (& override_attr, & override_stack) != 0 ||
406 pthread_attr_getstacksize (& override_attr, & override_stacksize) != 0)
407 {
408 override_stack = NULL;
409 override_stacksize = 0;
410 }
411
412 /* Do we need to allocate the new thread's stack? */
413 if (__mf_opts.thread_stack && override_stack == NULL)
414 {
415 uintptr_t alignment = 256; /* power of two */
416
417 /* Perturb the initial stack addresses slightly, to encourage
418 threads to have nonconflicting entries in the lookup cache
419 for their tracked stack objects. */
420 static unsigned perturb = 0;
421 const unsigned perturb_delta = 32;
422 const unsigned perturb_count = 16;
423 perturb += perturb_delta;
424 if (perturb > perturb_delta*perturb_count) perturb = 0;
425
426 /* Use glibc x86 defaults */
427 /* Should have been defined in <limits.h> */
428 #ifndef PTHREAD_STACK_MIN
429 #define PTHREAD_STACK_MIN 65536
430 #endif
431 override_stacksize = max (PTHREAD_STACK_MIN, __mf_opts.thread_stack * 1024);
432
433
434 #if defined(MAP_ANONYMOUS)
435 #define MF_MAP_ANON MAP_ANONYMOUS
436 #elif defined(MAP_ANON)
437 #define MF_MAP_ANON MAP_ANON
438 #else
439 #error "Cannot mmap anonymous memory."
440 #endif
441
442 override_stack = CALL_REAL (mmap, NULL, override_stacksize,
443 PROT_READ|PROT_WRITE,
444 MAP_PRIVATE|MF_MAP_ANON,
445 0, 0);
446 if (override_stack == 0 || override_stack == MAP_FAILED)
447 {
448 errno = EAGAIN;
449 return -1;
450 }
451
452 VERBOSE_TRACE ("thread stack alloc %p size %lu\n",
453 override_stack, (unsigned long) override_stacksize);
454
455 /* The stackaddr pthreads attribute is a candidate stack pointer.
456 It must point near the top or the bottom of this buffer, depending
457 on whether stack grows downward or upward, and suitably aligned.
458 On the x86, it grows down, so we set stackaddr near the top. */
459 override_stack = (void *)
460 (((uintptr_t) override_stack + override_stacksize - alignment - perturb)
461 & (~(uintptr_t)(alignment-1)));
462
463 /* XXX: consider using POSIX2K attr_setstack() */
464 if (pthread_attr_setstackaddr (& override_attr, override_stack) != 0 ||
465 pthread_attr_setstacksize (& override_attr,
466 override_stacksize - alignment - perturb) != 0)
467 {
468 /* This should not happen. */
469 CALL_REAL (munmap, override_stack, override_stacksize);
470 errno = EAGAIN;
471 return -1;
472 }
473 }
474
475 /* Actually start the child thread. */
476 {
477 struct pthread_start_info psi;
478 struct pthread_info *pi = NULL;
479
480 /* Fill in startup-control fields. */
481 psi.user_fn = start;
482 psi.user_arg = arg;
483 psi.thread_info = NULL;
484
485 /* Actually create the thread. */
486 __mf_state = reentrant;
487 result = CALL_REAL (pthread_create, thr, & override_attr,
488 & __mf_pthread_spawner, (void *) & psi);
489 __mf_state = active;
490 /* We also hook pthread_join/pthread_exit to get into reentrant
491 mode during thread shutdown/cleanup. */
492
493 /* Wait until child thread has progressed far enough into its
494 __mf_pthread_spawner() call. */
495 while (1) /* XXX: timeout? */
496 {
497 volatile struct pthread_start_info *psip = & psi;
498 pi = psip->thread_info;
499 if (pi != NULL)
500 break;
501 sched_yield ();
502 }
503
504 /* Fill in remaining fields in pthread_info. */
505 pi->stack = override_stack;
506 pi->stack_size = override_stacksize;
507 /* XXX: this might be too late for future heuristics that attempt
508 to use thread stack bounds. We may need to put the new thread
509 to sleep. */
510 }
511
512
513 /* May need to clean up if we created a pthread_attr_t of our own. */
514 if (attr == NULL)
515 pthread_attr_destroy (& override_attr); /* NB: this shouldn't deallocate stack */
516
517 return result;
518 }
519
520
521
522 #if PIC
523 /* A special bootstrap variant. */
524 int
525 __mf_0fn_pthread_join (pthread_t thr, void **rc)
526 {
527 return -1;
528 }
529 #endif
530
531
532 #undef pthread_join
533 WRAPPER(int, pthread_join, pthread_t thr, void **rc)
534 {
535 DECLARE(int, pthread_join, pthread_t thr, void **rc);
536 int result;
537
538 TRACE ("pthread_join\n");
539 __mf_state = reentrant;
540 result = CALL_REAL (pthread_join, thr, rc);
541 __mf_state = active;
542
543 return result;
544 }
545
546
547 #if PIC
548 /* A special bootstrap variant. */
549 void
550 __mf_0fn_pthread_exit (void *rc)
551 {
552 }
553 #endif
554
555
556 #undef pthread_exit
557 WRAPPER(void, pthread_exit, void *rc)
558 {
559 DECLARE(void, pthread_exit, void *rc);
560
561 TRACE ("pthread_exit\n");
562 /* __mf_state = reentrant; */
563 CALL_REAL (pthread_exit, rc);
564 /* NOTREACHED */
565 }
566
567
568
569
570
571
572
573 #endif /* pthreadstuff */