]> git.ipfire.org Git - thirdparty/gcc.git/blob - libmudflap/mf-hooks3.c
Remove docs for removed option.
[thirdparty/gcc.git] / libmudflap / mf-hooks3.c
1 /* Mudflap: narrow-pointer bounds-checking by tree rewriting.
2 Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
3 Contributed by Frank Ch. Eigler <fche@redhat.com>
4 and Graydon Hoare <graydon@redhat.com>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 In addition to the permissions in the GNU General Public License, the
14 Free Software Foundation gives you unlimited permission to link the
15 compiled version of this file into combinations with other programs,
16 and to distribute those combinations without any restriction coming
17 from the use of this file. (The General Public License restrictions
18 do apply in other respects; for example, they cover modification of
19 the file, and distribution when not linked into a combine
20 executable.)
21
22 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
23 WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
25 for more details.
26
27 You should have received a copy of the GNU General Public License
28 along with GCC; see the file COPYING. If not, write to the Free
29 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
30 02111-1307, USA. */
31
32
33 #include "config.h"
34
35 #ifndef HAVE_SOCKLEN_T
36 #define socklen_t int
37 #endif
38
39 /* These attempt to coax various unix flavours to declare all our
40 needed tidbits in the system headers. */
41 #if !defined(__FreeBSD__) && !defined(__APPLE__)
42 #define _POSIX_SOURCE
43 #endif /* Some BSDs break <sys/socket.h> if this is defined. */
44 #define _GNU_SOURCE
45 #define _XOPEN_SOURCE
46 #define _BSD_TYPES
47 #define __EXTENSIONS__
48 #define _ALL_SOURCE
49 #define _LARGE_FILE_API
50 #define _XOPEN_SOURCE_EXTENDED 1
51
52 #include <string.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 #include <sys/types.h>
56 #include <sys/mman.h>
57 #include <unistd.h>
58 #include <assert.h>
59 #include <errno.h>
60 #include <limits.h>
61 #include <sched.h>
62 #include <fcntl.h>
63
64 #include "mf-runtime.h"
65 #include "mf-impl.h"
66
67 #ifdef _MUDFLAP
68 #error "Do not compile this file with -fmudflap!"
69 #endif
70
71
72 /* Multithreading support hooks. */
73
74
75
76 #ifndef LIBMUDFLAPTH
77 #error "pthreadstuff is to be included only in libmudflapth"
78 #endif
79
80
81
82 /* Describe a thread (dead or alive). */
83 struct pthread_info
84 {
85 short used_p; /* Is this slot in use? */
86 short dead_p; /* Is this thread dead? */
87 pthread_t self; /* The thread id. */
88
89 /* If libmudflapth allocated the stack, store its adjusted base/size. */
90 void *stack;
91 size_t stack_size;
92 /* The _alloc fields store unadjusted values from the moment of allocation. */
93 void *stack_alloc;
94 size_t stack_size_alloc;
95
96 int *thread_errno;
97 enum __mf_state_enum state;
98 };
99
100
101 /* Describe the startup information for a new user thread. */
102 struct pthread_start_info
103 {
104 /* The user's thread entry point and argument. */
105 void * (*user_fn)(void *);
106 void *user_arg;
107
108 /* Set by user thread when this startup struct may be disposed of. */
109 struct pthread_info *thread_info;
110 };
111
112
113
114
115 /* To avoid dynamic memory allocation, use static array to store these
116 thread description structs. The second (_idx) array is used as a
117 simple caching hash table, mapping PTHREAD_HASH(thread) to its
118 index in __mf_pthread_info[]. */
119
120 #define LIBMUDFLAPTH_THREADS_MAX 1024
121 static struct pthread_info __mf_pthread_info[LIBMUDFLAPTH_THREADS_MAX];
122 static unsigned __mf_pthread_info_idx[LIBMUDFLAPTH_THREADS_MAX];
123 #define PTHREAD_HASH(p) ((unsigned) (p) % LIBMUDFLAPTH_THREADS_MAX)
124
125
126 /* Find any old empty entry in __mf_pthread_info; mark it used and
127 return it. Return NULL if there are no more available slots. */
128 struct pthread_info*
129 __mf_allocate_blank_threadinfo (unsigned* idx)
130 {
131 static unsigned probe = LIBMUDFLAPTH_THREADS_MAX-1;
132 unsigned probe_at_start = probe;
133 static pthread_mutex_t mutex =
134 #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
135 PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
136 #else
137 PTHREAD_MUTEX_INITIALIZER;
138 #endif
139 int rc;
140
141 rc = pthread_mutex_lock (& mutex);
142 assert (rc == 0);
143
144 /* Look for a blank spot starting one past the last one we found. */
145 do
146 {
147 probe = (probe + 1) % LIBMUDFLAPTH_THREADS_MAX;
148 struct pthread_info* pi = & __mf_pthread_info [probe];
149 if (! pi->used_p)
150 {
151 /* memset (pi, 0, sizeof (*pi)); */
152 pi->used_p = 1;
153 if (idx != NULL) *idx = probe;
154 /* VERBOSE_TRACE ("allocated threadinfo slot %u\n", probe); */
155 rc = pthread_mutex_unlock (& mutex);
156 assert (rc == 0);
157 return pi;
158 }
159 }
160 while (probe != probe_at_start);
161
162 rc = pthread_mutex_unlock (& mutex);
163 assert (rc == 0);
164 return NULL;
165 }
166
167
168 /* Find and return the pthread_info struct for the current thread.
169 There might already be one in __mf_pthread_info for this thread, in
170 which case return it. There may not be one (if this is a main
171 thread, an auxiliary -lpthread manager, or an actual user thread
172 making an early call into libmudflap. In these cases, create a new
173 entry. If not it's not the main thread, put it into reentrant
174 initial state.
175
176 NB: VERBOSE_TRACE type functions are not generally safe to call
177 from this context, since a new thread might just be "booting up",
178 making printf unsafe to call.
179 */
180 static struct pthread_info*
181 __mf_find_threadinfo ()
182 {
183 pthread_t it = pthread_self ();
184 unsigned *hash = & __mf_pthread_info_idx [PTHREAD_HASH (it)];
185 struct pthread_info *result = NULL;
186 static pthread_t last;
187 static int main_thread_seen_p;
188
189 /* Check out the lookup cache; failing that, do a linear search
190 around the table. */
191 {
192 struct pthread_info* pi = & __mf_pthread_info [*hash];
193 unsigned i;
194
195 if (pi->used_p && pi->self == it)
196 result = pi;
197 else for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
198 {
199 struct pthread_info* pi2 = & __mf_pthread_info [i];
200 if (pi2->used_p && pi2->self == it)
201 {
202 *hash = i;
203 result = pi2;
204 break;
205 }
206 }
207 }
208
209 if (result == NULL)
210 {
211 /* Create a __mf_pthread_info record for the main thread. It's
212 different from the auto-recognized worker bees because for
213 example we can assume that it's a fully stack/errno-equipped
214 thread. */
215
216 /* This must be the main thread, until now unseen in libmudflap. */
217 unsigned *hash = & __mf_pthread_info_idx [PTHREAD_HASH (it)];
218 struct pthread_info* pi = __mf_allocate_blank_threadinfo (hash);
219 assert (pi != NULL);
220 assert (pi->used_p);
221 result = pi;
222 result->self = it;
223
224 if (! main_thread_seen_p)
225 {
226 result->state = active;
227 /* NB: leave result->thread_errno unset, as main thread's errno
228 has already been registered in __mf_init. */
229 /* NB: leave stack-related fields unset, to avoid
230 deallocation. */
231 main_thread_seen_p = 1;
232 /* VERBOSE_TRACE ("identified self as main thread\n"); */
233 }
234 else
235 {
236 result->state = reentrant;
237 /* NB: leave result->thread_errno unset, as worker thread's
238 errno is unlikely to be used, and user threads fill them
239 in during __mf_pthread_spawn(). */
240 /* NB: leave stack-related fields unset, leaving pthread_create
241 to fill them in for user threads, leaving them empty for
242 other threads. */
243 /* VERBOSE_TRACE ("identified self as new aux or user thread\n"); */
244 }
245 }
246
247 if (last != it)
248 {
249 /*
250 VERBOSE_TRACE ("found threadinfo for %u, slot %u\n",
251 (unsigned) it,
252 (unsigned) *hash);
253 */
254 last = it;
255 }
256
257 assert (result != NULL);
258 assert (result->self == it);
259
260 return result;
261 }
262
263
264
265 /* Return a pointer to the per-thread __mf_state variable. */
266 enum __mf_state_enum *
267 __mf_state_perthread ()
268 {
269 assert (! __mf_starting_p);
270 return & (__mf_find_threadinfo()->state);
271 }
272
273
274 static void
275 __mf_pthread_cleanup (void *arg)
276 {
277 struct pthread_info *pi = arg;
278
279 /* XXX: This unregistration is not safe on platforms where distinct
280 threads share errno (or at least its virtual address). */
281 if (pi->thread_errno != NULL)
282 __mf_unregister (pi->thread_errno, sizeof (int), __MF_TYPE_GUESS);
283
284 /* XXX: Only detached threads should designate themselves as dead
285 here. Non-detached threads are marked dead after their
286 personalized pthread_join() call. */
287 pi->state = reentrant;
288 pi->dead_p = 1;
289
290 VERBOSE_TRACE ("thread pi %p exiting\n", pi);
291 }
292
293
294 static void *
295 __mf_pthread_spawner (void *arg)
296 {
297 struct pthread_info *pi = __mf_find_threadinfo ();
298 void *result = NULL;
299
300 /* Turn off reentrancy indications. */
301 assert (pi->state == reentrant);
302 pi->state = active;
303
304 VERBOSE_TRACE ("new user thread\n");
305
306 if (__mf_opts.heur_std_data)
307 {
308 pi->thread_errno = & errno;
309 __mf_register (pi->thread_errno, sizeof (int),
310 __MF_TYPE_GUESS, "errno area (thread)");
311 /* NB: we could use __MF_TYPE_STATIC above, but we guess that
312 the thread errno is coming out of some dynamically allocated
313 pool that we already know of as __MF_TYPE_HEAP. */
314 }
315
316 /* We considered using pthread_key_t objects instead of these
317 cleanup stacks, but they were less cooperative with the
318 interposed malloc hooks in libmudflap. */
319 pthread_cleanup_push (& __mf_pthread_cleanup, pi);
320
321 /* Call user thread */
322 {
323 /* Extract given entry point and argument. */
324 struct pthread_start_info *psi = arg;
325 void * (*user_fn)(void *) = psi->user_fn;
326 void *user_arg = psi->user_arg;
327
328 /* Signal the main thread to resume. */
329 psi->thread_info = pi;
330
331 result = (*user_fn)(user_arg);
332 }
333
334 pthread_cleanup_pop (1 /* execute */);
335
336 /* NB: there is a slight race here. The pthread_info field will now
337 say this thread is dead, but it may still be running .. right
338 here. We try to check for this possibility using the
339 pthread_kill test below. */
340
341 return result;
342 }
343
344
345 #if PIC
346 /* A special bootstrap variant. */
347 int
348 __mf_0fn_pthread_create (pthread_t *thr, const pthread_attr_t *attr,
349 void * (*start) (void *), void *arg)
350 {
351 return -1;
352 }
353 #endif
354
355
356 #undef pthread_create
357 WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
358 void * (*start) (void *), void *arg)
359 {
360 DECLARE(int, munmap, void *p, size_t l);
361 DECLARE(void *, mmap, void *p, size_t l, int prot, int flags, int fd, off_t of);
362 DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
363 void * (*start) (void *), void *arg);
364 int result;
365 pthread_attr_t override_attr;
366 void *override_stack;
367 size_t override_stacksize;
368 void *override_stack_alloc = (void *) 0;
369 size_t override_stacksize_alloc = 0;
370 unsigned i;
371
372 TRACE ("pthread_create\n");
373
374 /* Garbage-collect dead threads' stacks. */
375 LOCKTH ();
376 for (i = 0; i < LIBMUDFLAPTH_THREADS_MAX; i++)
377 {
378 struct pthread_info *pi = & __mf_pthread_info [i];
379 if (! pi->used_p)
380 continue;
381 if (! pi->dead_p)
382 continue;
383
384 /* VERBOSE_TRACE ("thread %u pi %p stack cleanup deferred (%u)\n",
385 (unsigned) pi->self, pi, pi->dead_p); */
386
387 /* Delay actual deallocation by a few cycles, try to discourage the
388 race mentioned at the end of __mf_pthread_spawner(). */
389 if (pi->dead_p)
390 pi->dead_p ++;
391 if (pi->dead_p >= 10 /* XXX */)
392 {
393 if (pi->stack)
394 CALL_REAL (munmap, pi->stack_alloc, pi->stack_size_alloc);
395
396 VERBOSE_TRACE ("slot %u freed, stack %p\n", i, pi->stack_alloc);
397 memset (pi, 0, sizeof (*pi));
398
399 /* One round of garbage collection is enough. */
400 break;
401 }
402 }
403 UNLOCKTH ();
404
405 /* Let's allocate a stack for this thread, if one is not already
406 supplied by the caller. We don't want to let e.g. the
407 linuxthreads manager thread do this allocation. */
408 if (attr != NULL)
409 override_attr = *attr;
410 else
411 pthread_attr_init (& override_attr);
412
413 /* Get supplied attributes, if any. */
414 /* XXX: consider using POSIX2K attr_getstack() */
415 if (pthread_attr_getstackaddr (& override_attr, & override_stack) != 0 ||
416 pthread_attr_getstacksize (& override_attr, & override_stacksize) != 0)
417 {
418 override_stack = NULL;
419 override_stacksize = 0;
420 }
421
422 /* Do we need to allocate the new thread's stack? */
423 if (__mf_opts.thread_stack && override_stack == NULL)
424 {
425 uintptr_t alignment = 256; /* power of two */
426
427 /* Perturb the initial stack addresses slightly, to encourage
428 threads to have nonconflicting entries in the lookup cache
429 for their tracked stack objects. */
430 static unsigned perturb = 0;
431 const unsigned perturb_delta = 32;
432 const unsigned perturb_count = 16;
433 perturb += perturb_delta;
434 if (perturb > perturb_delta*perturb_count) perturb = 0;
435
436 /* Use glibc x86 defaults */
437 /* Should have been defined in <limits.h> */
438 #ifndef PTHREAD_STACK_MIN
439 #define PTHREAD_STACK_MIN 65536
440 #endif
441 override_stacksize = max (PTHREAD_STACK_MIN, __mf_opts.thread_stack * 1024);
442
443
444 #if defined(MAP_ANONYMOUS)
445 #define MF_MAP_ANON MAP_ANONYMOUS
446 #elif defined(MAP_ANON)
447 #define MF_MAP_ANON MAP_ANON
448 #endif
449
450 #ifndef MAP_FAILED
451 #define MAP_FAILED ((void *) -1)
452 #endif
453
454 #ifdef MF_MAP_ANON
455 override_stack = CALL_REAL (mmap, NULL, override_stacksize,
456 PROT_READ|PROT_WRITE,
457 MAP_PRIVATE|MF_MAP_ANON,
458 0, 0);
459 #else
460 /* Try mapping /dev/zero instead. */
461 {
462 static int zerofd = -1;
463 if (zerofd == -1)
464 zerofd = open ("/dev/zero", O_RDWR);
465 if (zerofd == -1)
466 override_stack = MAP_FAILED;
467 else
468 override_stack = CALL_REAL (mmap, NULL, override_stacksize,
469 PROT_READ|PROT_WRITE,
470 MAP_PRIVATE, zerofd, 0);
471 }
472 #endif
473
474 if (override_stack == 0 || override_stack == MAP_FAILED)
475 {
476 errno = EAGAIN;
477 return -1;
478 }
479
480 VERBOSE_TRACE ("thread stack alloc %p size %lu\n",
481 override_stack, (unsigned long) override_stacksize);
482
483 /* Save the original allocated values for later deallocation. */
484 override_stack_alloc = override_stack;
485 override_stacksize_alloc = override_stacksize;
486
487 /* The stackaddr pthreads attribute is a candidate stack pointer.
488 It must point near the top or the bottom of this buffer, depending
489 on whether stack grows downward or upward, and suitably aligned.
490 On the x86, it grows down, so we set stackaddr near the top. */
491 /* XXX: port logic */
492 override_stack = (void *)
493 (((uintptr_t) override_stack + override_stacksize - alignment - perturb)
494 & (~(uintptr_t)(alignment-1)));
495
496 /* XXX: consider using POSIX2K attr_setstack() */
497 if (pthread_attr_setstackaddr (& override_attr, override_stack) != 0 ||
498 pthread_attr_setstacksize (& override_attr,
499 override_stacksize - alignment - perturb) != 0)
500 {
501 /* This should not happen. */
502 CALL_REAL (munmap, override_stack, override_stacksize);
503 errno = EAGAIN;
504 return -1;
505 }
506 }
507
508 /* Actually start the child thread. */
509 {
510 struct pthread_start_info psi;
511 struct pthread_info *pi = NULL;
512
513 /* Fill in startup-control fields. */
514 psi.user_fn = start;
515 psi.user_arg = arg;
516 psi.thread_info = NULL;
517
518 /* Actually create the thread. */
519 __mf_state = reentrant;
520 result = CALL_REAL (pthread_create, thr, & override_attr,
521 & __mf_pthread_spawner, (void *) & psi);
522 __mf_state = active;
523 /* We also hook pthread_join/pthread_exit to get into reentrant
524 mode during thread shutdown/cleanup. */
525
526 /* Wait until child thread has progressed far enough into its
527 __mf_pthread_spawner() call. */
528 while (1) /* XXX: timeout? */
529 {
530 volatile struct pthread_start_info *psip = & psi;
531 pi = psip->thread_info;
532 if (pi != NULL)
533 break;
534 sched_yield ();
535 }
536
537 /* Fill in remaining fields in pthread_info. */
538 pi->stack = override_stack;
539 pi->stack_size = override_stacksize;
540 pi->stack_alloc = override_stack_alloc;
541 pi->stack_size_alloc = override_stacksize_alloc;
542 /* XXX: this might be too late for future heuristics that attempt
543 to use thread stack bounds. We may need to put the new thread
544 to sleep. */
545 }
546
547
548 /* May need to clean up if we created a pthread_attr_t of our own. */
549 if (attr == NULL)
550 pthread_attr_destroy (& override_attr); /* NB: this shouldn't deallocate stack */
551
552 return result;
553 }
554
555
556
557 #if PIC
558 /* A special bootstrap variant. */
559 int
560 __mf_0fn_pthread_join (pthread_t thr, void **rc)
561 {
562 return -1;
563 }
564 #endif
565
566
567 #undef pthread_join
568 WRAPPER(int, pthread_join, pthread_t thr, void **rc)
569 {
570 DECLARE(int, pthread_join, pthread_t thr, void **rc);
571 int result;
572
573 TRACE ("pthread_join\n");
574 __mf_state = reentrant;
575 result = CALL_REAL (pthread_join, thr, rc);
576 __mf_state = active;
577
578 return result;
579 }
580
581
582 #if PIC
583 /* A special bootstrap variant. */
584 void
585 __mf_0fn_pthread_exit (void *rc)
586 {
587 }
588 #endif
589
590
591 #undef pthread_exit
592 WRAPPER(void, pthread_exit, void *rc)
593 {
594 DECLARE(void, pthread_exit, void *rc);
595
596 TRACE ("pthread_exit\n");
597 /* __mf_state = reentrant; */
598 CALL_REAL (pthread_exit, rc);
599 /* NOTREACHED */
600 exit (0); /* Satisfy noreturn attribute of pthread_exit. */
601 }