]>
Commit | Line | Data |
---|---|---|
6de9cd9a | 1 | /* Mudflap: narrow-pointer bounds-checking by tree rewriting. |
a548d7b7 | 2 | Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. |
6de9cd9a DN |
3 | Contributed by Frank Ch. Eigler <fche@redhat.com> |
4 | and Graydon Hoare <graydon@redhat.com> | |
fc5515a8 FCE |
5 | Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>, |
6 | adapted from libiberty. | |
6de9cd9a DN |
7 | |
8 | This file is part of GCC. | |
9 | ||
10 | GCC is free software; you can redistribute it and/or modify it under | |
11 | the terms of the GNU General Public License as published by the Free | |
12 | Software Foundation; either version 2, or (at your option) any later | |
13 | version. | |
14 | ||
15 | In addition to the permissions in the GNU General Public License, the | |
16 | Free Software Foundation gives you unlimited permission to link the | |
17 | compiled version of this file into combinations with other programs, | |
18 | and to distribute those combinations without any restriction coming | |
19 | from the use of this file. (The General Public License restrictions | |
20 | do apply in other respects; for example, they cover modification of | |
21 | the file, and distribution when not linked into a combine | |
22 | executable.) | |
23 | ||
24 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
25 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
26 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
27 | for more details. | |
28 | ||
29 | You should have received a copy of the GNU General Public License | |
30 | along with GCC; see the file COPYING. If not, write to the Free | |
f9d09c43 KC |
31 | Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA |
32 | 02110-1301, USA. */ | |
6de9cd9a DN |
33 | |
34 | #include "config.h" | |
35 | ||
36 | /* These attempt to coax various unix flavours to declare all our | |
37 | needed tidbits in the system headers. */ | |
029277b7 | 38 | #if !defined(__FreeBSD__) && !defined(__APPLE__) |
6de9cd9a DN |
39 | #define _POSIX_SOURCE |
40 | #endif /* Some BSDs break <sys/socket.h> if this is defined. */ | |
fb925a51 | 41 | #define _GNU_SOURCE |
6de9cd9a DN |
42 | #define _XOPEN_SOURCE |
43 | #define _BSD_TYPES | |
44 | #define __EXTENSIONS__ | |
45 | #define _ALL_SOURCE | |
46 | #define _LARGE_FILE_API | |
47 | #define _XOPEN_SOURCE_EXTENDED 1 | |
48 | ||
49 | #include <stdio.h> | |
50 | #include <stdlib.h> | |
51 | #include <sys/types.h> | |
52 | #include <sys/time.h> | |
53 | #include <time.h> | |
54 | #include <unistd.h> | |
55 | #ifdef HAVE_EXECINFO_H | |
56 | #include <execinfo.h> | |
57 | #endif | |
58 | #ifdef HAVE_SIGNAL_H | |
59 | #include <signal.h> | |
60 | #endif | |
61 | #include <assert.h> | |
62 | ||
63 | #include <string.h> | |
64 | #include <limits.h> | |
65 | #include <sys/types.h> | |
66 | #include <signal.h> | |
67 | #include <errno.h> | |
dc88d66f | 68 | #include <ctype.h> |
6de9cd9a DN |
69 | |
70 | #include "mf-runtime.h" | |
71 | #include "mf-impl.h" | |
72 | ||
73 | ||
fc5515a8 FCE |
74 | /* ------------------------------------------------------------------------ */ |
75 | /* Splay-tree implementation. */ | |
76 | ||
77 | typedef uintptr_t mfsplay_tree_key; | |
78 | typedef void *mfsplay_tree_value; | |
79 | ||
80 | /* Forward declaration for a node in the tree. */ | |
81 | typedef struct mfsplay_tree_node_s *mfsplay_tree_node; | |
82 | ||
83 | /* The type of a function used to iterate over the tree. */ | |
84 | typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *); | |
85 | ||
86 | /* The nodes in the splay tree. */ | |
87 | struct mfsplay_tree_node_s | |
88 | { | |
89 | /* Data. */ | |
90 | mfsplay_tree_key key; | |
91 | mfsplay_tree_value value; | |
92 | /* Children. */ | |
93 | mfsplay_tree_node left; | |
94 | mfsplay_tree_node right; | |
95 | /* XXX: The addition of a parent pointer may eliminate some recursion. */ | |
96 | }; | |
97 | ||
98 | /* The splay tree itself. */ | |
99 | struct mfsplay_tree_s | |
100 | { | |
101 | /* The root of the tree. */ | |
102 | mfsplay_tree_node root; | |
103 | ||
104 | /* The last key value for which the tree has been splayed, but not | |
105 | since modified. */ | |
106 | mfsplay_tree_key last_splayed_key; | |
107 | int last_splayed_key_p; | |
108 | ||
109 | /* Statistics. */ | |
110 | unsigned num_keys; | |
111 | ||
112 | /* Traversal recursion control flags. */ | |
113 | unsigned max_depth; | |
114 | unsigned depth; | |
115 | unsigned rebalance_p; | |
116 | }; | |
117 | typedef struct mfsplay_tree_s *mfsplay_tree; | |
118 | ||
119 | static mfsplay_tree mfsplay_tree_new (void); | |
120 | static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value); | |
121 | static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key); | |
122 | static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key); | |
123 | static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key); | |
124 | static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key); | |
125 | static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *); | |
126 | static void mfsplay_tree_rebalance (mfsplay_tree sp); | |
127 | ||
6de9cd9a DN |
128 | /* ------------------------------------------------------------------------ */ |
129 | /* Utility macros */ | |
130 | ||
131 | #define CTOR __attribute__ ((constructor)) | |
132 | #define DTOR __attribute__ ((destructor)) | |
133 | ||
134 | ||
135 | /* Codes to describe the context in which a violation occurs. */ | |
136 | #define __MF_VIOL_UNKNOWN 0 | |
137 | #define __MF_VIOL_READ 1 | |
138 | #define __MF_VIOL_WRITE 2 | |
139 | #define __MF_VIOL_REGISTER 3 | |
140 | #define __MF_VIOL_UNREGISTER 4 | |
141 | #define __MF_VIOL_WATCH 5 | |
142 | ||
143 | /* Protect against recursive calls. */ | |
6de9cd9a | 144 | |
7544a87f RH |
145 | static void |
146 | begin_recursion_protect1 (const char *pf) | |
147 | { | |
148 | if (__mf_get_state () == reentrant) | |
149 | { | |
150 | write (2, "mf: erroneous reentrancy detected in `", 38); | |
151 | write (2, pf, strlen(pf)); | |
152 | write (2, "'\n", 2); \ | |
153 | abort (); | |
154 | } | |
155 | __mf_set_state (reentrant); | |
156 | } | |
6de9cd9a | 157 | |
7544a87f RH |
158 | #define BEGIN_RECURSION_PROTECT() \ |
159 | begin_recursion_protect1 (__PRETTY_FUNCTION__) | |
6de9cd9a | 160 | |
7544a87f RH |
161 | #define END_RECURSION_PROTECT() \ |
162 | __mf_set_state (active) | |
6de9cd9a DN |
163 | |
164 | /* ------------------------------------------------------------------------ */ | |
165 | /* Required globals. */ | |
166 | ||
167 | #define LOOKUP_CACHE_MASK_DFL 1023 | |
ddfabf89 | 168 | #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */ |
6de9cd9a DN |
169 | #define LOOKUP_CACHE_SHIFT_DFL 2 |
170 | ||
171 | struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX]; | |
172 | uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL; | |
173 | unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL; | |
174 | #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1) | |
175 | ||
176 | struct __mf_options __mf_opts; | |
6de9cd9a | 177 | int __mf_starting_p = 1; |
7544a87f RH |
178 | |
179 | #ifdef LIBMUDFLAPTH | |
180 | #ifdef HAVE_TLS | |
22f99b82 | 181 | __thread enum __mf_state_enum __mf_state_1 = reentrant; |
7544a87f | 182 | #endif |
6de9cd9a | 183 | #else |
22f99b82 | 184 | enum __mf_state_enum __mf_state_1 = reentrant; |
6de9cd9a DN |
185 | #endif |
186 | ||
6de9cd9a DN |
187 | #ifdef LIBMUDFLAPTH |
188 | pthread_mutex_t __mf_biglock = | |
189 | #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP | |
190 | PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP; | |
191 | #else | |
192 | PTHREAD_MUTEX_INITIALIZER; | |
193 | #endif | |
194 | #endif | |
195 | ||
196 | /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even | |
197 | the libmudflap.la (no threading support) can diagnose whether | |
198 | the application is linked with -lpthread. See __mf_usage() below. */ | |
199 | #if HAVE_PTHREAD_H | |
35a1e17e | 200 | #ifdef _POSIX_THREADS |
6de9cd9a | 201 | #pragma weak pthread_join |
35a1e17e NC |
202 | #else |
203 | #define pthread_join NULL | |
204 | #endif | |
6de9cd9a DN |
205 | #endif |
206 | ||
207 | ||
208 | /* ------------------------------------------------------------------------ */ | |
209 | /* stats-related globals. */ | |
210 | ||
211 | static unsigned long __mf_count_check; | |
212 | static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX]; | |
6de9cd9a DN |
213 | static unsigned long __mf_count_register; |
214 | static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1]; | |
215 | static unsigned long __mf_count_unregister; | |
216 | static unsigned long __mf_total_unregister_size; | |
217 | static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1]; | |
218 | static unsigned long __mf_sigusr1_received; | |
219 | static unsigned long __mf_sigusr1_handled; | |
220 | /* not static */ unsigned long __mf_reentrancy; | |
221 | #ifdef LIBMUDFLAPTH | |
222 | /* not static */ unsigned long __mf_lock_contention; | |
223 | #endif | |
224 | ||
225 | ||
226 | /* ------------------------------------------------------------------------ */ | |
227 | /* mode-check-related globals. */ | |
228 | ||
229 | typedef struct __mf_object | |
230 | { | |
231 | uintptr_t low, high; /* __mf_register parameters */ | |
232 | const char *name; | |
233 | char type; /* __MF_TYPE_something */ | |
234 | char watching_p; /* Trigger a VIOL_WATCH on access? */ | |
235 | unsigned read_count; /* Number of times __mf_check/read was called on this object. */ | |
236 | unsigned write_count; /* Likewise for __mf_check/write. */ | |
237 | unsigned liveness; /* A measure of recent checking activity. */ | |
238 | unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */ | |
239 | ||
240 | uintptr_t alloc_pc; | |
241 | struct timeval alloc_time; | |
242 | char **alloc_backtrace; | |
243 | size_t alloc_backtrace_size; | |
244 | #ifdef LIBMUDFLAPTH | |
245 | pthread_t alloc_thread; | |
246 | #endif | |
247 | ||
248 | int deallocated_p; | |
249 | uintptr_t dealloc_pc; | |
250 | struct timeval dealloc_time; | |
251 | char **dealloc_backtrace; | |
252 | size_t dealloc_backtrace_size; | |
253 | #ifdef LIBMUDFLAPTH | |
254 | pthread_t dealloc_thread; | |
255 | #endif | |
256 | } __mf_object_t; | |
257 | ||
cfbd22d7 FCE |
258 | /* Live objects: splay trees, separated by type, ordered on .low (base address). */ |
259 | /* Actually stored as static vars within lookup function below. */ | |
6de9cd9a DN |
260 | |
261 | /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */ | |
262 | static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */ | |
cfbd22d7 | 263 | static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX]; |
6de9cd9a DN |
264 | |
265 | ||
266 | /* ------------------------------------------------------------------------ */ | |
267 | /* Forward function declarations */ | |
268 | ||
429b4470 | 269 | void __mf_init () CTOR; |
6de9cd9a | 270 | static void __mf_sigusr1_respond (); |
fb925a51 | 271 | static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high, |
cfbd22d7 | 272 | __mf_object_t **objs, unsigned max_objs); |
fb925a51 | 273 | static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high, |
cfbd22d7 | 274 | __mf_object_t **objs, unsigned max_objs, int type); |
fb925a51 | 275 | static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high, |
cfbd22d7 | 276 | __mf_object_t **objs, unsigned max_objs); |
6de9cd9a | 277 | static void __mf_adapt_cache (); |
6de9cd9a DN |
278 | static void __mf_describe_object (__mf_object_t *obj); |
279 | static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag); | |
fc5515a8 | 280 | static mfsplay_tree __mf_object_tree (int type); |
cfbd22d7 FCE |
281 | static void __mf_link_object (__mf_object_t *node); |
282 | static void __mf_unlink_object (__mf_object_t *node); | |
6de9cd9a DN |
283 | |
284 | ||
285 | /* ------------------------------------------------------------------------ */ | |
286 | /* Configuration engine */ | |
287 | ||
288 | static void | |
289 | __mf_set_default_options () | |
290 | { | |
291 | memset (& __mf_opts, 0, sizeof (__mf_opts)); | |
292 | ||
6de9cd9a DN |
293 | __mf_opts.adapt_cache = 1000003; |
294 | __mf_opts.abbreviate = 1; | |
295 | __mf_opts.verbose_violations = 1; | |
296 | __mf_opts.free_queue_length = 4; | |
297 | __mf_opts.persistent_count = 100; | |
298 | __mf_opts.crumple_zone = 32; | |
299 | __mf_opts.backtrace = 4; | |
a082fc7a | 300 | __mf_opts.timestamps = 1; |
6de9cd9a DN |
301 | __mf_opts.mudflap_mode = mode_check; |
302 | __mf_opts.violation_mode = viol_nop; | |
a548d7b7 FCE |
303 | #ifdef HAVE___LIBC_FREERES |
304 | __mf_opts.call_libc_freeres = 1; | |
305 | #endif | |
6de9cd9a DN |
306 | __mf_opts.heur_std_data = 1; |
307 | #ifdef LIBMUDFLAPTH | |
308 | __mf_opts.thread_stack = 0; | |
309 | #endif | |
310 | } | |
311 | ||
312 | static struct option | |
313 | { | |
314 | char *name; | |
315 | char *description; | |
316 | enum | |
317 | { | |
318 | set_option, | |
319 | read_integer_option, | |
320 | } type; | |
07c2f075 FCE |
321 | unsigned value; |
322 | unsigned *target; | |
fb925a51 | 323 | } |
6de9cd9a DN |
324 | options [] = |
325 | { | |
fb925a51 MS |
326 | {"mode-nop", |
327 | "mudflaps do nothing", | |
328 | set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode}, | |
329 | {"mode-populate", | |
330 | "mudflaps populate object tree", | |
331 | set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode}, | |
332 | {"mode-check", | |
6de9cd9a | 333 | "mudflaps check for memory violations", |
07c2f075 | 334 | set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode}, |
fb925a51 | 335 | {"mode-violate", |
6de9cd9a | 336 | "mudflaps always cause violations (diagnostic)", |
07c2f075 | 337 | set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode}, |
fb925a51 MS |
338 | |
339 | {"viol-nop", | |
6de9cd9a | 340 | "violations do not change program execution", |
07c2f075 | 341 | set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode}, |
fb925a51 | 342 | {"viol-abort", |
6de9cd9a | 343 | "violations cause a call to abort()", |
07c2f075 | 344 | set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode}, |
fb925a51 | 345 | {"viol-segv", |
6de9cd9a | 346 | "violations are promoted to SIGSEGV signals", |
07c2f075 | 347 | set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode}, |
fb925a51 | 348 | {"viol-gdb", |
6de9cd9a | 349 | "violations fork a gdb process attached to current program", |
07c2f075 | 350 | set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode}, |
fb925a51 | 351 | {"trace-calls", |
6de9cd9a DN |
352 | "trace calls to mudflap runtime library", |
353 | set_option, 1, &__mf_opts.trace_mf_calls}, | |
fb925a51 | 354 | {"verbose-trace", |
6de9cd9a DN |
355 | "trace internal events within mudflap runtime library", |
356 | set_option, 1, &__mf_opts.verbose_trace}, | |
fb925a51 | 357 | {"collect-stats", |
6de9cd9a DN |
358 | "collect statistics on mudflap's operation", |
359 | set_option, 1, &__mf_opts.collect_stats}, | |
b9d71ce3 | 360 | #ifdef SIGUSR1 |
6de9cd9a DN |
361 | {"sigusr1-report", |
362 | "print report upon SIGUSR1", | |
363 | set_option, 1, &__mf_opts.sigusr1_report}, | |
364 | #endif | |
fb925a51 | 365 | {"internal-checking", |
6de9cd9a DN |
366 | "perform more expensive internal checking", |
367 | set_option, 1, &__mf_opts.internal_checking}, | |
fb925a51 | 368 | {"print-leaks", |
6de9cd9a DN |
369 | "print any memory leaks at program shutdown", |
370 | set_option, 1, &__mf_opts.print_leaks}, | |
a548d7b7 FCE |
371 | #ifdef HAVE___LIBC_FREERES |
372 | {"libc-freeres", | |
373 | "call glibc __libc_freeres at shutdown for better leak data", | |
374 | set_option, 1, &__mf_opts.call_libc_freeres}, | |
375 | #endif | |
fb925a51 | 376 | {"check-initialization", |
6de9cd9a DN |
377 | "detect uninitialized object reads", |
378 | set_option, 1, &__mf_opts.check_initialization}, | |
fb925a51 | 379 | {"verbose-violations", |
6de9cd9a DN |
380 | "print verbose messages when memory violations occur", |
381 | set_option, 1, &__mf_opts.verbose_violations}, | |
fb925a51 | 382 | {"abbreviate", |
6de9cd9a DN |
383 | "abbreviate repetitive listings", |
384 | set_option, 1, &__mf_opts.abbreviate}, | |
fb925a51 | 385 | {"timestamps", |
a082fc7a FCE |
386 | "track object lifetime timestamps", |
387 | set_option, 1, &__mf_opts.timestamps}, | |
fb925a51 | 388 | {"ignore-reads", |
a082fc7a FCE |
389 | "ignore read accesses - assume okay", |
390 | set_option, 1, &__mf_opts.ignore_reads}, | |
6de9cd9a DN |
391 | {"wipe-stack", |
392 | "wipe stack objects at unwind", | |
393 | set_option, 1, &__mf_opts.wipe_stack}, | |
394 | {"wipe-heap", | |
395 | "wipe heap objects at free", | |
396 | set_option, 1, &__mf_opts.wipe_heap}, | |
fb925a51 | 397 | {"heur-proc-map", |
6de9cd9a DN |
398 | "support /proc/self/map heuristics", |
399 | set_option, 1, &__mf_opts.heur_proc_map}, | |
400 | {"heur-stack-bound", | |
401 | "enable a simple upper stack bound heuristic", | |
402 | set_option, 1, &__mf_opts.heur_stack_bound}, | |
fb925a51 | 403 | {"heur-start-end", |
6de9cd9a DN |
404 | "support _start.._end heuristics", |
405 | set_option, 1, &__mf_opts.heur_start_end}, | |
fb925a51 | 406 | {"heur-stdlib", |
6de9cd9a DN |
407 | "register standard library data (argv, errno, stdin, ...)", |
408 | set_option, 1, &__mf_opts.heur_std_data}, | |
fb925a51 | 409 | {"free-queue-length", |
6de9cd9a DN |
410 | "queue N deferred free() calls before performing them", |
411 | read_integer_option, 0, &__mf_opts.free_queue_length}, | |
fb925a51 | 412 | {"persistent-count", |
6de9cd9a DN |
413 | "keep a history of N unregistered regions", |
414 | read_integer_option, 0, &__mf_opts.persistent_count}, | |
fb925a51 | 415 | {"crumple-zone", |
6de9cd9a DN |
416 | "surround allocations with crumple zones of N bytes", |
417 | read_integer_option, 0, &__mf_opts.crumple_zone}, | |
418 | /* XXX: not type-safe. | |
fb925a51 | 419 | {"lc-mask", |
6de9cd9a DN |
420 | "set lookup cache size mask to N (2**M - 1)", |
421 | read_integer_option, 0, (int *)(&__mf_lc_mask)}, | |
fb925a51 | 422 | {"lc-shift", |
6de9cd9a DN |
423 | "set lookup cache pointer shift", |
424 | read_integer_option, 0, (int *)(&__mf_lc_shift)}, | |
425 | */ | |
fb925a51 | 426 | {"lc-adapt", |
6de9cd9a DN |
427 | "adapt mask/shift parameters after N cache misses", |
428 | read_integer_option, 1, &__mf_opts.adapt_cache}, | |
fb925a51 | 429 | {"backtrace", |
6de9cd9a DN |
430 | "keep an N-level stack trace of each call context", |
431 | read_integer_option, 0, &__mf_opts.backtrace}, | |
432 | #ifdef LIBMUDFLAPTH | |
fb925a51 | 433 | {"thread-stack", |
6de9cd9a DN |
434 | "override thread stacks allocation: N kB", |
435 | read_integer_option, 0, &__mf_opts.thread_stack}, | |
436 | #endif | |
437 | {0, 0, set_option, 0, NULL} | |
438 | }; | |
439 | ||
440 | static void | |
441 | __mf_usage () | |
442 | { | |
443 | struct option *opt; | |
444 | ||
fb925a51 | 445 | fprintf (stderr, |
cfbd22d7 | 446 | "This is a %s%sGCC \"mudflap\" memory-checked binary.\n" |
54419590 | 447 | "Mudflap is Copyright (C) 2002-2004 Free Software Foundation, Inc.\n" |
cfbd22d7 FCE |
448 | "\n" |
449 | "The mudflap code can be controlled by an environment variable:\n" | |
450 | "\n" | |
451 | "$ export MUDFLAP_OPTIONS='<options>'\n" | |
452 | "$ <mudflapped_program>\n" | |
453 | "\n" | |
454 | "where <options> is a space-separated list of \n" | |
455 | "any of the following options. Use `-no-OPTION' to disable options.\n" | |
456 | "\n", | |
6de9cd9a | 457 | #if HAVE_PTHREAD_H |
7544a87f | 458 | (pthread_join ? "multi-threaded " : "single-threaded "), |
6de9cd9a | 459 | #else |
cfbd22d7 | 460 | "", |
6de9cd9a DN |
461 | #endif |
462 | #if LIBMUDFLAPTH | |
cfbd22d7 | 463 | "thread-aware " |
6de9cd9a | 464 | #else |
cfbd22d7 | 465 | "thread-unaware " |
6de9cd9a | 466 | #endif |
cfbd22d7 | 467 | ); |
6de9cd9a DN |
468 | /* XXX: The multi-threaded thread-unaware combination is bad. */ |
469 | ||
470 | for (opt = options; opt->name; opt++) | |
471 | { | |
472 | int default_p = (opt->value == * opt->target); | |
473 | ||
474 | switch (opt->type) | |
cfbd22d7 FCE |
475 | { |
476 | char buf[128]; | |
477 | case set_option: | |
478 | fprintf (stderr, "-%-23.23s %s", opt->name, opt->description); | |
479 | if (default_p) | |
480 | fprintf (stderr, " [active]\n"); | |
481 | else | |
482 | fprintf (stderr, "\n"); | |
483 | break; | |
484 | case read_integer_option: | |
485 | strncpy (buf, opt->name, 128); | |
486 | strncpy (buf + strlen (opt->name), "=N", 2); | |
487 | fprintf (stderr, "-%-23.23s %s", buf, opt->description); | |
488 | fprintf (stderr, " [%d]\n", * opt->target); | |
fb925a51 | 489 | break; |
cfbd22d7 FCE |
490 | default: abort(); |
491 | } | |
6de9cd9a DN |
492 | } |
493 | ||
494 | fprintf (stderr, "\n"); | |
495 | } | |
496 | ||
497 | ||
fb925a51 | 498 | int |
6de9cd9a DN |
499 | __mf_set_options (const char *optstr) |
500 | { | |
501 | int rc; | |
502 | LOCKTH (); | |
503 | BEGIN_RECURSION_PROTECT (); | |
504 | rc = __mfu_set_options (optstr); | |
505 | /* XXX: It's not really that easy. A change to a bunch of parameters | |
fb925a51 | 506 | can require updating auxiliary state or risk crashing: |
6de9cd9a DN |
507 | free_queue_length, crumple_zone ... */ |
508 | END_RECURSION_PROTECT (); | |
509 | UNLOCKTH (); | |
510 | return rc; | |
511 | } | |
512 | ||
513 | ||
fb925a51 | 514 | int |
6de9cd9a DN |
515 | __mfu_set_options (const char *optstr) |
516 | { | |
517 | struct option *opts = 0; | |
518 | char *nxt = 0; | |
519 | long tmp = 0; | |
520 | int rc = 0; | |
521 | const char *saved_optstr = optstr; | |
522 | ||
523 | /* XXX: bounds-check for optstr! */ | |
524 | ||
525 | while (*optstr) | |
526 | { | |
527 | switch (*optstr) { | |
528 | case ' ': | |
529 | case '\t': | |
530 | case '\n': | |
cfbd22d7 FCE |
531 | optstr++; |
532 | break; | |
6de9cd9a DN |
533 | |
534 | case '-': | |
cfbd22d7 | 535 | if (*optstr+1) |
fb925a51 | 536 | { |
cfbd22d7 FCE |
537 | int negate = 0; |
538 | optstr++; | |
539 | ||
fb925a51 | 540 | if (*optstr == '?' || |
cfbd22d7 FCE |
541 | strncmp (optstr, "help", 4) == 0) |
542 | { | |
543 | /* Caller will print help and exit. */ | |
544 | return -1; | |
545 | } | |
fb925a51 | 546 | |
cfbd22d7 FCE |
547 | if (strncmp (optstr, "no-", 3) == 0) |
548 | { | |
549 | negate = 1; | |
550 | optstr = & optstr[3]; | |
551 | } | |
fb925a51 | 552 | |
cfbd22d7 FCE |
553 | for (opts = options; opts->name; opts++) |
554 | { | |
555 | if (strncmp (optstr, opts->name, strlen (opts->name)) == 0) | |
556 | { | |
557 | optstr += strlen (opts->name); | |
558 | assert (opts->target); | |
fb925a51 | 559 | switch (opts->type) |
cfbd22d7 FCE |
560 | { |
561 | case set_option: | |
562 | if (negate) | |
563 | *(opts->target) = 0; | |
564 | else | |
565 | *(opts->target) = opts->value; | |
566 | break; | |
567 | case read_integer_option: | |
568 | if (! negate && (*optstr == '=' && *(optstr+1))) | |
569 | { | |
570 | optstr++; | |
571 | tmp = strtol (optstr, &nxt, 10); | |
572 | if ((optstr != nxt) && (tmp != LONG_MAX)) | |
573 | { | |
fb925a51 | 574 | optstr = nxt; |
cfbd22d7 FCE |
575 | *(opts->target) = (int)tmp; |
576 | } | |
577 | } | |
578 | else if (negate) | |
579 | * opts->target = 0; | |
580 | break; | |
581 | } | |
582 | } | |
583 | } | |
584 | } | |
585 | break; | |
fb925a51 | 586 | |
6de9cd9a | 587 | default: |
fb925a51 | 588 | fprintf (stderr, |
cfbd22d7 FCE |
589 | "warning: unrecognized string '%s' in mudflap options\n", |
590 | optstr); | |
591 | optstr += strlen (optstr); | |
592 | rc = -1; | |
593 | break; | |
6de9cd9a DN |
594 | } |
595 | } | |
596 | ||
597 | /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */ | |
598 | __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1); | |
599 | __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1); | |
600 | ||
601 | /* Clear the lookup cache, in case the parameters got changed. */ | |
602 | /* XXX: race */ | |
603 | memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache)); | |
604 | /* void slot 0 */ | |
605 | __mf_lookup_cache[0].low = MAXPTR; | |
606 | ||
607 | TRACE ("set options from `%s'\n", saved_optstr); | |
608 | ||
609 | /* Call this unconditionally, in case -sigusr1-report was toggled. */ | |
610 | __mf_sigusr1_respond (); | |
611 | ||
612 | return rc; | |
613 | } | |
614 | ||
615 | ||
616 | #ifdef PIC | |
617 | ||
fb925a51 | 618 | void |
6de9cd9a DN |
619 | __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e) |
620 | { | |
621 | char *err; | |
622 | ||
623 | assert (e); | |
624 | if (e->pointer) return; | |
625 | ||
626 | #if HAVE_DLVSYM | |
627 | if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */ | |
628 | e->pointer = dlvsym (RTLD_NEXT, e->name, e->version); | |
629 | else | |
630 | #endif | |
631 | e->pointer = dlsym (RTLD_NEXT, e->name); | |
fb925a51 | 632 | |
6de9cd9a DN |
633 | err = dlerror (); |
634 | ||
635 | if (err) | |
636 | { | |
637 | fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n", | |
cfbd22d7 | 638 | e->name, err); |
6de9cd9a | 639 | abort (); |
fb925a51 | 640 | } |
6de9cd9a DN |
641 | if (! e->pointer) |
642 | { | |
643 | fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name); | |
644 | abort (); | |
645 | } | |
646 | } | |
647 | ||
648 | ||
fb925a51 MS |
649 | static void |
650 | __mf_resolve_dynamics () | |
6de9cd9a DN |
651 | { |
652 | int i; | |
653 | for (i = 0; i < dyn_INITRESOLVE; i++) | |
654 | __mf_resolve_single_dynamic (& __mf_dynamic[i]); | |
655 | } | |
656 | ||
657 | ||
658 | /* NB: order must match enums in mf-impl.h */ | |
659 | struct __mf_dynamic_entry __mf_dynamic [] = | |
660 | { | |
661 | {NULL, "calloc", NULL}, | |
662 | {NULL, "free", NULL}, | |
663 | {NULL, "malloc", NULL}, | |
664 | {NULL, "mmap", NULL}, | |
665 | {NULL, "munmap", NULL}, | |
666 | {NULL, "realloc", NULL}, | |
667 | {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */ | |
668 | #ifdef LIBMUDFLAPTH | |
669 | {NULL, "pthread_create", PTHREAD_CREATE_VERSION}, | |
670 | {NULL, "pthread_join", NULL}, | |
671 | {NULL, "pthread_exit", NULL} | |
672 | #endif | |
673 | }; | |
674 | ||
675 | #endif /* PIC */ | |
676 | ||
677 | ||
678 | ||
679 | /* ------------------------------------------------------------------------ */ | |
680 | ||
cfbd22d7 | 681 | /* Lookup & manage automatic initialization of the five or so splay trees. */ |
fc5515a8 | 682 | static mfsplay_tree |
cfbd22d7 FCE |
683 | __mf_object_tree (int type) |
684 | { | |
fc5515a8 | 685 | static mfsplay_tree trees [__MF_TYPE_MAX+1]; |
cfbd22d7 FCE |
686 | assert (type >= 0 && type <= __MF_TYPE_MAX); |
687 | if (UNLIKELY (trees[type] == NULL)) | |
fc5515a8 | 688 | trees[type] = mfsplay_tree_new (); |
cfbd22d7 FCE |
689 | return trees[type]; |
690 | } | |
6de9cd9a | 691 | |
cfbd22d7 | 692 | |
429b4470 | 693 | /* not static */void |
cfbd22d7 | 694 | __mf_init () |
6de9cd9a DN |
695 | { |
696 | char *ov = 0; | |
697 | ||
429b4470 FCE |
698 | /* Return if initialization has already been done. */ |
699 | if (LIKELY (__mf_starting_p == 0)) | |
700 | return; | |
701 | ||
6de9cd9a DN |
702 | /* This initial bootstrap phase requires that __mf_starting_p = 1. */ |
703 | #ifdef PIC | |
704 | __mf_resolve_dynamics (); | |
705 | #endif | |
706 | __mf_starting_p = 0; | |
707 | ||
22f99b82 UW |
708 | __mf_set_state (active); |
709 | ||
6de9cd9a DN |
710 | __mf_set_default_options (); |
711 | ||
712 | ov = getenv ("MUDFLAP_OPTIONS"); | |
713 | if (ov) | |
714 | { | |
715 | int rc = __mfu_set_options (ov); | |
716 | if (rc < 0) | |
cfbd22d7 FCE |
717 | { |
718 | __mf_usage (); | |
719 | exit (1); | |
720 | } | |
6de9cd9a DN |
721 | } |
722 | ||
723 | /* Initialize to a non-zero description epoch. */ | |
724 | __mf_describe_object (NULL); | |
725 | ||
726 | #define REG_RESERVED(obj) \ | |
727 | __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj) | |
728 | ||
729 | REG_RESERVED (__mf_lookup_cache); | |
730 | REG_RESERVED (__mf_lc_mask); | |
731 | REG_RESERVED (__mf_lc_shift); | |
732 | /* XXX: others of our statics? */ | |
733 | ||
734 | /* Prevent access to *NULL. */ | |
735 | __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL"); | |
736 | __mf_lookup_cache[0].low = (uintptr_t) -1; | |
737 | } | |
738 | ||
739 | ||
740 | ||
741 | int | |
742 | __wrap_main (int argc, char* argv[]) | |
743 | { | |
744 | extern char **environ; | |
745 | extern int main (); | |
07c2f075 | 746 | extern int __real_main (); |
6de9cd9a DN |
747 | static int been_here = 0; |
748 | ||
749 | if (__mf_opts.heur_std_data && ! been_here) | |
750 | { | |
751 | unsigned i; | |
752 | ||
753 | been_here = 1; | |
754 | __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]"); | |
755 | for (i=0; i<argc; i++) | |
cfbd22d7 FCE |
756 | { |
757 | unsigned j = strlen (argv[i]); | |
758 | __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element"); | |
759 | } | |
6de9cd9a DN |
760 | |
761 | for (i=0; ; i++) | |
cfbd22d7 FCE |
762 | { |
763 | char *e = environ[i]; | |
764 | unsigned j; | |
765 | if (e == NULL) break; | |
766 | j = strlen (environ[i]); | |
767 | __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element"); | |
768 | } | |
6de9cd9a DN |
769 | __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]"); |
770 | ||
771 | __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area"); | |
772 | ||
773 | __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin"); | |
774 | __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout"); | |
775 | __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr"); | |
dc88d66f FCE |
776 | |
777 | /* Make some effort to register ctype.h static arrays. */ | |
778 | /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */ | |
779 | /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt | |
780 | with in mf-hooks2.c. */ | |
6de9cd9a DN |
781 | } |
782 | ||
783 | #ifdef PIC | |
784 | return main (argc, argv, environ); | |
785 | #else | |
786 | return __real_main (argc, argv, environ); | |
787 | #endif | |
788 | } | |
789 | ||
790 | ||
791 | ||
792 | extern void __mf_fini () DTOR; | |
793 | void __mf_fini () | |
794 | { | |
795 | TRACE ("__mf_fini\n"); | |
796 | __mfu_report (); | |
6687a263 UW |
797 | |
798 | #ifndef PIC | |
799 | /* Since we didn't populate the tree for allocations in constructors | |
800 | before __mf_init, we cannot check destructors after __mf_fini. */ | |
801 | __mf_opts.mudflap_mode = mode_nop; | |
802 | #endif | |
6de9cd9a DN |
803 | } |
804 | ||
805 | ||
806 | ||
807 | /* ------------------------------------------------------------------------ */ | |
808 | /* __mf_check */ | |
809 | ||
810 | void __mf_check (void *ptr, size_t sz, int type, const char *location) | |
811 | { | |
812 | LOCKTH (); | |
813 | BEGIN_RECURSION_PROTECT (); | |
814 | __mfu_check (ptr, sz, type, location); | |
815 | END_RECURSION_PROTECT (); | |
816 | UNLOCKTH (); | |
817 | } | |
818 | ||
819 | ||
820 | void __mfu_check (void *ptr, size_t sz, int type, const char *location) | |
821 | { | |
822 | unsigned entry_idx = __MF_CACHE_INDEX (ptr); | |
823 | struct __mf_cache *entry = & __mf_lookup_cache [entry_idx]; | |
824 | int judgement = 0; /* 0=undecided; <0=violation; >0=okay */ | |
825 | uintptr_t ptr_low = (uintptr_t) ptr; | |
826 | uintptr_t ptr_high = CLAMPSZ (ptr, sz); | |
827 | struct __mf_cache old_entry = *entry; | |
828 | ||
829 | if (UNLIKELY (__mf_opts.sigusr1_report)) | |
830 | __mf_sigusr1_respond (); | |
0ee4e76d FCE |
831 | if (UNLIKELY (__mf_opts.ignore_reads && type == 0)) |
832 | return; | |
6de9cd9a DN |
833 | |
834 | TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n", | |
cfbd22d7 FCE |
835 | ptr, entry_idx, (unsigned long)sz, |
836 | (type == 0 ? "read" : "write"), location); | |
fb925a51 | 837 | |
6de9cd9a DN |
838 | switch (__mf_opts.mudflap_mode) |
839 | { | |
840 | case mode_nop: | |
54419590 FCE |
841 | /* It is tempting to poison the cache here similarly to |
842 | mode_populate. However that eliminates a valuable | |
843 | distinction between these two modes. mode_nop is useful to | |
844 | let a user count & trace every single check / registration | |
845 | call. mode_populate is useful to let a program run fast | |
846 | while unchecked. | |
847 | */ | |
6de9cd9a DN |
848 | judgement = 1; |
849 | break; | |
850 | ||
851 | case mode_populate: | |
852 | entry->low = ptr_low; | |
853 | entry->high = ptr_high; | |
854 | judgement = 1; | |
855 | break; | |
856 | ||
857 | case mode_check: | |
858 | { | |
cfbd22d7 | 859 | unsigned heuristics = 0; |
fb925a51 | 860 | |
cfbd22d7 FCE |
861 | /* Advance aging/adaptation counters. */ |
862 | static unsigned adapt_count; | |
863 | adapt_count ++; | |
864 | if (UNLIKELY (__mf_opts.adapt_cache > 0 && | |
865 | adapt_count > __mf_opts.adapt_cache)) | |
866 | { | |
867 | adapt_count = 0; | |
868 | __mf_adapt_cache (); | |
869 | } | |
fb925a51 | 870 | |
cfbd22d7 FCE |
871 | /* Looping only occurs if heuristics were triggered. */ |
872 | while (judgement == 0) | |
873 | { | |
874 | DECLARE (void, free, void *p); | |
875 | __mf_object_t* ovr_obj[1]; | |
876 | unsigned obj_count; | |
877 | __mf_object_t** all_ovr_obj = NULL; | |
878 | __mf_object_t** dealloc_me = NULL; | |
879 | unsigned i; | |
880 | ||
881 | /* Find all overlapping objects. Be optimistic that there is just one. */ | |
882 | obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1); | |
883 | if (UNLIKELY (obj_count > 1)) | |
884 | { | |
885 | /* Allocate a real buffer and do the search again. */ | |
886 | DECLARE (void *, malloc, size_t c); | |
887 | unsigned n; | |
888 | all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) * | |
889 | obj_count)); | |
890 | if (all_ovr_obj == NULL) abort (); | |
891 | n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count); | |
892 | assert (n == obj_count); | |
893 | dealloc_me = all_ovr_obj; | |
894 | } | |
fb925a51 | 895 | else |
cfbd22d7 FCE |
896 | { |
897 | all_ovr_obj = ovr_obj; | |
898 | dealloc_me = NULL; | |
899 | } | |
900 | ||
901 | /* Update object statistics. */ | |
902 | for (i = 0; i < obj_count; i++) | |
903 | { | |
904 | __mf_object_t *obj = all_ovr_obj[i]; | |
905 | assert (obj != NULL); | |
906 | if (type == __MF_CHECK_READ) | |
907 | obj->read_count ++; | |
908 | else | |
909 | obj->write_count ++; | |
910 | obj->liveness ++; | |
911 | } | |
fb925a51 | 912 | |
cfbd22d7 FCE |
913 | /* Iterate over the various objects. There are a number of special cases. */ |
914 | for (i = 0; i < obj_count; i++) | |
915 | { | |
916 | __mf_object_t *obj = all_ovr_obj[i]; | |
917 | ||
918 | /* Any __MF_TYPE_NOACCESS hit is bad. */ | |
919 | if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS)) | |
920 | judgement = -1; | |
921 | ||
922 | /* Any object with a watch flag is bad. */ | |
923 | if (UNLIKELY (obj->watching_p)) | |
924 | judgement = -2; /* trigger VIOL_WATCH */ | |
fb925a51 | 925 | |
cfbd22d7 FCE |
926 | /* A read from an uninitialized object is bad. */ |
927 | if (UNLIKELY (__mf_opts.check_initialization | |
928 | /* reading */ | |
929 | && type == __MF_CHECK_READ | |
930 | /* not written */ | |
931 | && obj->write_count == 0 | |
932 | /* uninitialized (heap) */ | |
933 | && obj->type == __MF_TYPE_HEAP)) | |
934 | judgement = -1; | |
935 | } | |
936 | ||
e1f4adc9 | 937 | /* We now know that the access spans no invalid objects. */ |
cfbd22d7 FCE |
938 | if (LIKELY (judgement >= 0)) |
939 | for (i = 0; i < obj_count; i++) | |
940 | { | |
941 | __mf_object_t *obj = all_ovr_obj[i]; | |
fb925a51 | 942 | |
cfbd22d7 FCE |
943 | /* Is this access entirely contained within this object? */ |
944 | if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high)) | |
945 | { | |
946 | /* Valid access. */ | |
947 | entry->low = obj->low; | |
948 | entry->high = obj->high; | |
949 | judgement = 1; | |
950 | } | |
ddfabf89 FCE |
951 | } |
952 | ||
953 | /* This access runs off the end of one valid object. That | |
954 | could be okay, if other valid objects fill in all the | |
955 | holes. We allow this only for HEAP and GUESS type | |
956 | objects. Accesses to STATIC and STACK variables | |
957 | should not be allowed to span. */ | |
958 | if (UNLIKELY ((judgement == 0) && (obj_count > 1))) | |
959 | { | |
960 | unsigned uncovered = 0; | |
961 | for (i = 0; i < obj_count; i++) | |
962 | { | |
963 | __mf_object_t *obj = all_ovr_obj[i]; | |
964 | int j, uncovered_low_p, uncovered_high_p; | |
965 | uintptr_t ptr_lower, ptr_higher; | |
966 | ||
967 | uncovered_low_p = ptr_low < obj->low; | |
968 | ptr_lower = CLAMPSUB (obj->low, 1); | |
969 | uncovered_high_p = ptr_high > obj->high; | |
970 | ptr_higher = CLAMPADD (obj->high, 1); | |
cfbd22d7 | 971 | |
ddfabf89 FCE |
972 | for (j = 0; j < obj_count; j++) |
973 | { | |
974 | __mf_object_t *obj2 = all_ovr_obj[j]; | |
975 | ||
976 | if (i == j) continue; | |
977 | ||
978 | /* Filter out objects that cannot be spanned across. */ | |
fb925a51 | 979 | if (obj2->type == __MF_TYPE_STACK |
ddfabf89 FCE |
980 | || obj2->type == __MF_TYPE_STATIC) |
981 | continue; | |
982 | ||
983 | /* Consider a side "covered" if obj2 includes | |
984 | the next byte on that side. */ | |
985 | if (uncovered_low_p | |
986 | && (ptr_lower >= obj2->low && ptr_lower <= obj2->high)) | |
987 | uncovered_low_p = 0; | |
988 | if (uncovered_high_p | |
989 | && (ptr_high >= obj2->low && ptr_higher <= obj2->high)) | |
990 | uncovered_high_p = 0; | |
991 | } | |
fb925a51 | 992 | |
ddfabf89 FCE |
993 | if (uncovered_low_p || uncovered_high_p) |
994 | uncovered ++; | |
995 | } | |
996 | ||
997 | /* Success if no overlapping objects are uncovered. */ | |
998 | if (uncovered == 0) | |
999 | judgement = 1; | |
cfbd22d7 FCE |
1000 | } |
1001 | ||
ddfabf89 | 1002 | |
cfbd22d7 FCE |
1003 | if (dealloc_me != NULL) |
1004 | CALL_REAL (free, dealloc_me); | |
1005 | ||
1006 | /* If the judgment is still unknown at this stage, loop | |
1007 | around at most one more time. */ | |
1008 | if (judgement == 0) | |
1009 | { | |
1010 | if (heuristics++ < 2) /* XXX parametrize this number? */ | |
1011 | judgement = __mf_heuristic_check (ptr_low, ptr_high); | |
1012 | else | |
1013 | judgement = -1; | |
1014 | } | |
1015 | } | |
6de9cd9a DN |
1016 | |
1017 | } | |
1018 | break; | |
1019 | ||
1020 | case mode_violate: | |
1021 | judgement = -1; | |
1022 | break; | |
1023 | } | |
1024 | ||
1025 | if (__mf_opts.collect_stats) | |
1026 | { | |
1027 | __mf_count_check ++; | |
fb925a51 | 1028 | |
6de9cd9a | 1029 | if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high)) |
cfbd22d7 | 1030 | /* && (old_entry.low != 0) && (old_entry.high != 0)) */ |
fb925a51 | 1031 | __mf_lookup_cache_reusecount [entry_idx] ++; |
6de9cd9a | 1032 | } |
fb925a51 | 1033 | |
6de9cd9a DN |
1034 | if (UNLIKELY (judgement < 0)) |
1035 | __mf_violation (ptr, sz, | |
cfbd22d7 | 1036 | (uintptr_t) __builtin_return_address (0), location, |
fb925a51 | 1037 | ((judgement == -1) ? |
cfbd22d7 FCE |
1038 | (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) : |
1039 | __MF_VIOL_WATCH)); | |
6de9cd9a DN |
1040 | } |
1041 | ||
1042 | ||
cfbd22d7 | 1043 | static __mf_object_t * |
fb925a51 | 1044 | __mf_insert_new_object (uintptr_t low, uintptr_t high, int type, |
cfbd22d7 | 1045 | const char *name, uintptr_t pc) |
6de9cd9a DN |
1046 | { |
1047 | DECLARE (void *, calloc, size_t c, size_t n); | |
1048 | ||
cfbd22d7 FCE |
1049 | __mf_object_t *new_obj; |
1050 | new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t)); | |
1051 | new_obj->low = low; | |
1052 | new_obj->high = high; | |
1053 | new_obj->type = type; | |
1054 | new_obj->name = name; | |
1055 | new_obj->alloc_pc = pc; | |
6de9cd9a | 1056 | #if HAVE_GETTIMEOFDAY |
a082fc7a FCE |
1057 | if (__mf_opts.timestamps) |
1058 | gettimeofday (& new_obj->alloc_time, NULL); | |
6de9cd9a DN |
1059 | #endif |
1060 | #if LIBMUDFLAPTH | |
cfbd22d7 | 1061 | new_obj->alloc_thread = pthread_self (); |
6de9cd9a DN |
1062 | #endif |
1063 | ||
1064 | if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I)) | |
fb925a51 | 1065 | new_obj->alloc_backtrace_size = |
cfbd22d7 FCE |
1066 | __mf_backtrace (& new_obj->alloc_backtrace, |
1067 | (void *) pc, 2); | |
fb925a51 | 1068 | |
6de9cd9a DN |
1069 | __mf_link_object (new_obj); |
1070 | return new_obj; | |
1071 | } | |
1072 | ||
1073 | ||
fb925a51 | 1074 | static void |
6de9cd9a DN |
1075 | __mf_uncache_object (__mf_object_t *old_obj) |
1076 | { | |
1077 | /* Remove any low/high pointers for this object from the lookup cache. */ | |
fb925a51 | 1078 | |
6de9cd9a DN |
1079 | /* Can it possibly exist in the cache? */ |
1080 | if (LIKELY (old_obj->read_count + old_obj->write_count)) | |
1081 | { | |
1082 | uintptr_t low = old_obj->low; | |
1083 | uintptr_t high = old_obj->high; | |
84174531 | 1084 | struct __mf_cache *entry; |
6de9cd9a | 1085 | unsigned i; |
84174531 FCE |
1086 | if ((high - low) >= (__mf_lc_mask << __mf_lc_shift)) |
1087 | { | |
1088 | /* For large objects (>= cache size - 1) check the whole cache. */ | |
1089 | entry = & __mf_lookup_cache [0]; | |
1090 | for (i = 0; i <= __mf_lc_mask; i++, entry++) | |
cfbd22d7 | 1091 | { |
84174531 FCE |
1092 | /* NB: the "||" in the following test permits this code to |
1093 | tolerate the situation introduced by __mf_check over | |
1094 | contiguous objects, where a cache entry spans several | |
1095 | objects. */ | |
1096 | if (entry->low == low || entry->high == high) | |
1097 | { | |
1098 | entry->low = MAXPTR; | |
1099 | entry->high = MINPTR; | |
1100 | } | |
cfbd22d7 FCE |
1101 | } |
1102 | } | |
84174531 FCE |
1103 | else |
1104 | { | |
1105 | /* Object is now smaller then cache size. */ | |
1106 | unsigned entry_low_idx = __MF_CACHE_INDEX (low); | |
1107 | unsigned entry_high_idx = __MF_CACHE_INDEX (high); | |
1108 | if (entry_low_idx <= entry_high_idx) | |
1109 | { | |
1110 | entry = & __mf_lookup_cache [entry_low_idx]; | |
1111 | for (i = entry_low_idx; i <= entry_high_idx; i++, entry++) | |
1112 | { | |
1113 | /* NB: the "||" in the following test permits this code to | |
1114 | tolerate the situation introduced by __mf_check over | |
1115 | contiguous objects, where a cache entry spans several | |
1116 | objects. */ | |
1117 | if (entry->low == low || entry->high == high) | |
1118 | { | |
1119 | entry->low = MAXPTR; | |
1120 | entry->high = MINPTR; | |
1121 | } | |
1122 | } | |
1123 | } | |
1124 | else | |
1125 | { | |
1126 | /* Object wrapped around the end of the cache. First search | |
1127 | from low to end of cache and then from 0 to high. */ | |
1128 | entry = & __mf_lookup_cache [entry_low_idx]; | |
1129 | for (i = entry_low_idx; i <= __mf_lc_mask; i++, entry++) | |
1130 | { | |
1131 | /* NB: the "||" in the following test permits this code to | |
1132 | tolerate the situation introduced by __mf_check over | |
1133 | contiguous objects, where a cache entry spans several | |
1134 | objects. */ | |
1135 | if (entry->low == low || entry->high == high) | |
1136 | { | |
1137 | entry->low = MAXPTR; | |
1138 | entry->high = MINPTR; | |
1139 | } | |
1140 | } | |
1141 | entry = & __mf_lookup_cache [0]; | |
1142 | for (i = 0; i <= entry_high_idx; i++, entry++) | |
1143 | { | |
1144 | /* NB: the "||" in the following test permits this code to | |
1145 | tolerate the situation introduced by __mf_check over | |
1146 | contiguous objects, where a cache entry spans several | |
1147 | objects. */ | |
1148 | if (entry->low == low || entry->high == high) | |
1149 | { | |
1150 | entry->low = MAXPTR; | |
1151 | entry->high = MINPTR; | |
1152 | } | |
1153 | } | |
1154 | } | |
1155 | } | |
6de9cd9a DN |
1156 | } |
1157 | } | |
1158 | ||
1159 | ||
1160 | void | |
1161 | __mf_register (void *ptr, size_t sz, int type, const char *name) | |
1162 | { | |
1163 | LOCKTH (); | |
1164 | BEGIN_RECURSION_PROTECT (); | |
1165 | __mfu_register (ptr, sz, type, name); | |
1166 | END_RECURSION_PROTECT (); | |
1167 | UNLOCKTH (); | |
1168 | } | |
1169 | ||
1170 | ||
1171 | void | |
1172 | __mfu_register (void *ptr, size_t sz, int type, const char *name) | |
1173 | { | |
fb925a51 | 1174 | TRACE ("register ptr=%p size=%lu type=%x name='%s'\n", |
cfbd22d7 | 1175 | ptr, (unsigned long) sz, type, name ? name : ""); |
6de9cd9a DN |
1176 | |
1177 | if (__mf_opts.collect_stats) | |
1178 | { | |
1179 | __mf_count_register ++; | |
1180 | __mf_total_register_size [(type < 0) ? 0 : | |
fb925a51 | 1181 | (type > __MF_TYPE_MAX) ? 0 : |
cfbd22d7 | 1182 | type] += sz; |
6de9cd9a DN |
1183 | } |
1184 | ||
1185 | if (UNLIKELY (__mf_opts.sigusr1_report)) | |
1186 | __mf_sigusr1_respond (); | |
1187 | ||
1188 | switch (__mf_opts.mudflap_mode) | |
1189 | { | |
1190 | case mode_nop: | |
1191 | break; | |
fb925a51 | 1192 | |
6de9cd9a DN |
1193 | case mode_violate: |
1194 | __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL, | |
cfbd22d7 | 1195 | __MF_VIOL_REGISTER); |
6de9cd9a DN |
1196 | break; |
1197 | ||
1198 | case mode_populate: | |
1199 | /* Clear the cache. */ | |
1200 | /* XXX: why the entire cache? */ | |
1201 | /* XXX: race */ | |
1202 | memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache)); | |
1203 | /* void slot 0 */ | |
1204 | __mf_lookup_cache[0].low = MAXPTR; | |
1205 | break; | |
1206 | ||
1207 | case mode_check: | |
1208 | { | |
cfbd22d7 FCE |
1209 | __mf_object_t *ovr_objs [1]; |
1210 | unsigned num_overlapping_objs; | |
1211 | uintptr_t low = (uintptr_t) ptr; | |
1212 | uintptr_t high = CLAMPSZ (ptr, sz); | |
1213 | uintptr_t pc = (uintptr_t) __builtin_return_address (0); | |
fb925a51 | 1214 | |
cfbd22d7 FCE |
1215 | /* Treat unknown size indication as 1. */ |
1216 | if (UNLIKELY (sz == 0)) sz = 1; | |
1217 | ||
1218 | /* Look for objects only of the same type. This will e.g. permit a registration | |
1219 | of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At | |
1220 | __mf_check time however harmful overlaps will be detected. */ | |
1221 | num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type); | |
1222 | ||
1223 | /* Handle overlaps. */ | |
1224 | if (UNLIKELY (num_overlapping_objs > 0)) | |
1225 | { | |
1226 | __mf_object_t *ovr_obj = ovr_objs[0]; | |
fb925a51 | 1227 | |
cfbd22d7 FCE |
1228 | /* Accept certain specific duplication pairs. */ |
1229 | if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS)) | |
1230 | && ovr_obj->low == low | |
1231 | && ovr_obj->high == high | |
1232 | && ovr_obj->type == type) | |
1233 | { | |
1234 | /* Duplicate registration for static objects may come | |
1235 | from distinct compilation units. */ | |
fb925a51 MS |
1236 | VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n", |
1237 | (void *) low, (void *) high, | |
cfbd22d7 FCE |
1238 | (ovr_obj->name ? ovr_obj->name : "")); |
1239 | break; | |
1240 | } | |
1241 | ||
1242 | /* Alas, a genuine violation. */ | |
1243 | else | |
1244 | { | |
1245 | /* Two or more *real* mappings here. */ | |
1246 | __mf_violation ((void *) ptr, sz, | |
1247 | (uintptr_t) __builtin_return_address (0), NULL, | |
1248 | __MF_VIOL_REGISTER); | |
1249 | } | |
1250 | } | |
1251 | else /* No overlapping objects: AOK. */ | |
1252 | __mf_insert_new_object (low, high, type, name, pc); | |
fb925a51 | 1253 | |
cfbd22d7 FCE |
1254 | /* We could conceivably call __mf_check() here to prime the cache, |
1255 | but then the read_count/write_count field is not reliable. */ | |
1256 | break; | |
6de9cd9a DN |
1257 | } |
1258 | } /* end switch (__mf_opts.mudflap_mode) */ | |
1259 | } | |
1260 | ||
1261 | ||
1262 | void | |
cfbd22d7 | 1263 | __mf_unregister (void *ptr, size_t sz, int type) |
6de9cd9a DN |
1264 | { |
1265 | LOCKTH (); | |
1266 | BEGIN_RECURSION_PROTECT (); | |
cfbd22d7 | 1267 | __mfu_unregister (ptr, sz, type); |
6de9cd9a DN |
1268 | END_RECURSION_PROTECT (); |
1269 | UNLOCKTH (); | |
1270 | } | |
1271 | ||
1272 | ||
1273 | void | |
cfbd22d7 | 1274 | __mfu_unregister (void *ptr, size_t sz, int type) |
6de9cd9a DN |
1275 | { |
1276 | DECLARE (void, free, void *ptr); | |
1277 | ||
1278 | if (UNLIKELY (__mf_opts.sigusr1_report)) | |
cfbd22d7 | 1279 | __mf_sigusr1_respond (); |
6de9cd9a | 1280 | |
cfbd22d7 | 1281 | TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type); |
6de9cd9a DN |
1282 | |
1283 | switch (__mf_opts.mudflap_mode) | |
fb925a51 | 1284 | { |
6de9cd9a DN |
1285 | case mode_nop: |
1286 | break; | |
1287 | ||
1288 | case mode_violate: | |
1289 | __mf_violation (ptr, sz, | |
cfbd22d7 FCE |
1290 | (uintptr_t) __builtin_return_address (0), NULL, |
1291 | __MF_VIOL_UNREGISTER); | |
6de9cd9a DN |
1292 | break; |
1293 | ||
1294 | case mode_populate: | |
1295 | /* Clear the cache. */ | |
1296 | /* XXX: race */ | |
1297 | memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache)); | |
1298 | /* void slot 0 */ | |
1299 | __mf_lookup_cache[0].low = MAXPTR; | |
1300 | break; | |
1301 | ||
1302 | case mode_check: | |
1303 | { | |
cfbd22d7 FCE |
1304 | __mf_object_t *old_obj = NULL; |
1305 | __mf_object_t *del_obj = NULL; /* Object to actually delete. */ | |
1306 | __mf_object_t *objs[1] = {NULL}; | |
1307 | unsigned num_overlapping_objs; | |
1308 | ||
1309 | num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr, | |
1310 | CLAMPSZ (ptr, sz), objs, 1, type); | |
1311 | ||
1312 | /* Special case for HEAP_I - see free & realloc hook. They don't | |
1313 | know whether the input region was HEAP or HEAP_I before | |
1314 | unmapping it. Here we give HEAP a try in case HEAP_I | |
1315 | failed. */ | |
1316 | if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0)) | |
1317 | { | |
1318 | num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr, | |
1319 | CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP); | |
1320 | } | |
1321 | ||
1322 | old_obj = objs[0]; | |
1323 | if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */ | |
1324 | || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */ | |
1325 | || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */ | |
1326 | { | |
1327 | __mf_violation (ptr, sz, | |
1328 | (uintptr_t) __builtin_return_address (0), NULL, | |
1329 | __MF_VIOL_UNREGISTER); | |
1330 | break; | |
1331 | } | |
1332 | ||
1333 | __mf_unlink_object (old_obj); | |
1334 | __mf_uncache_object (old_obj); | |
1335 | ||
1336 | /* Wipe buffer contents if desired. */ | |
1337 | if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK) | |
fb925a51 | 1338 | || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP |
cfbd22d7 FCE |
1339 | || old_obj->type == __MF_TYPE_HEAP_I))) |
1340 | { | |
1341 | memset ((void *) old_obj->low, | |
1342 | 0, | |
1343 | (size_t) (old_obj->high - old_obj->low + 1)); | |
1344 | } | |
fb925a51 | 1345 | |
cfbd22d7 | 1346 | /* Manage the object cemetary. */ |
58dc8547 AM |
1347 | if (__mf_opts.persistent_count > 0 |
1348 | && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM) | |
cfbd22d7 FCE |
1349 | { |
1350 | old_obj->deallocated_p = 1; | |
1351 | old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0); | |
6de9cd9a | 1352 | #if HAVE_GETTIMEOFDAY |
a082fc7a FCE |
1353 | if (__mf_opts.timestamps) |
1354 | gettimeofday (& old_obj->dealloc_time, NULL); | |
6de9cd9a DN |
1355 | #endif |
1356 | #ifdef LIBMUDFLAPTH | |
cfbd22d7 | 1357 | old_obj->dealloc_thread = pthread_self (); |
6de9cd9a DN |
1358 | #endif |
1359 | ||
cfbd22d7 | 1360 | if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP) |
fb925a51 | 1361 | old_obj->dealloc_backtrace_size = |
cfbd22d7 FCE |
1362 | __mf_backtrace (& old_obj->dealloc_backtrace, |
1363 | NULL, 2); | |
1364 | ||
1365 | /* Encourage this object to be displayed again in current epoch. */ | |
1366 | old_obj->description_epoch --; | |
1367 | ||
1368 | /* Put this object into the cemetary. This may require this plot to | |
1369 | be recycled, and the previous resident to be designated del_obj. */ | |
1370 | { | |
1371 | unsigned row = old_obj->type; | |
1372 | unsigned plot = __mf_object_dead_head [row]; | |
fb925a51 | 1373 | |
cfbd22d7 FCE |
1374 | del_obj = __mf_object_cemetary [row][plot]; |
1375 | __mf_object_cemetary [row][plot] = old_obj; | |
1376 | plot ++; | |
1377 | if (plot == __mf_opts.persistent_count) plot = 0; | |
1378 | __mf_object_dead_head [row] = plot; | |
1379 | } | |
1380 | } | |
1381 | else | |
1382 | del_obj = old_obj; | |
fb925a51 | 1383 | |
cfbd22d7 FCE |
1384 | if (__mf_opts.print_leaks) |
1385 | { | |
1386 | if ((old_obj->read_count + old_obj->write_count) == 0 && | |
fb925a51 | 1387 | (old_obj->type == __MF_TYPE_HEAP |
cfbd22d7 FCE |
1388 | || old_obj->type == __MF_TYPE_HEAP_I)) |
1389 | { | |
a548d7b7 FCE |
1390 | /* The problem with a warning message here is that we may not |
1391 | be privy to accesses to such objects that occur within | |
1392 | uninstrumented libraries. */ | |
1393 | #if 0 | |
fb925a51 | 1394 | fprintf (stderr, |
cfbd22d7 FCE |
1395 | "*******\n" |
1396 | "mudflap warning: unaccessed registered object:\n"); | |
1397 | __mf_describe_object (old_obj); | |
a548d7b7 | 1398 | #endif |
cfbd22d7 FCE |
1399 | } |
1400 | } | |
fb925a51 | 1401 | |
cfbd22d7 FCE |
1402 | if (del_obj != NULL) /* May or may not equal old_obj. */ |
1403 | { | |
1404 | if (__mf_opts.backtrace > 0) | |
1405 | { | |
1406 | CALL_REAL(free, del_obj->alloc_backtrace); | |
1407 | if (__mf_opts.persistent_count > 0) | |
1408 | { | |
1409 | CALL_REAL(free, del_obj->dealloc_backtrace); | |
1410 | } | |
1411 | } | |
1412 | CALL_REAL(free, del_obj); | |
1413 | } | |
fb925a51 | 1414 | |
cfbd22d7 | 1415 | break; |
6de9cd9a DN |
1416 | } |
1417 | } /* end switch (__mf_opts.mudflap_mode) */ | |
1418 | ||
1419 | ||
1420 | if (__mf_opts.collect_stats) | |
1421 | { | |
1422 | __mf_count_unregister ++; | |
1423 | __mf_total_unregister_size += sz; | |
1424 | } | |
1425 | } | |
1426 | ||
6de9cd9a DN |
1427 | |
1428 | ||
1429 | struct tree_stats | |
1430 | { | |
1431 | unsigned obj_count; | |
1432 | unsigned long total_size; | |
1433 | unsigned live_obj_count; | |
1434 | double total_weight; | |
1435 | double weighted_size; | |
1436 | unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2]; | |
1437 | }; | |
1438 | ||
1439 | ||
6de9cd9a | 1440 | |
cfbd22d7 | 1441 | static int |
fc5515a8 | 1442 | __mf_adapt_cache_fn (mfsplay_tree_node n, void *param) |
cfbd22d7 FCE |
1443 | { |
1444 | __mf_object_t *obj = (__mf_object_t *) n->value; | |
1445 | struct tree_stats *s = (struct tree_stats *) param; | |
6de9cd9a | 1446 | |
cfbd22d7 | 1447 | assert (obj != NULL && s != NULL); |
fb925a51 | 1448 | |
6de9cd9a | 1449 | /* Exclude never-accessed objects. */ |
cfbd22d7 | 1450 | if (obj->read_count + obj->write_count) |
6de9cd9a DN |
1451 | { |
1452 | s->obj_count ++; | |
cfbd22d7 FCE |
1453 | s->total_size += (obj->high - obj->low + 1); |
1454 | ||
1455 | if (obj->liveness) | |
1456 | { | |
1457 | unsigned i; | |
1458 | uintptr_t addr; | |
1459 | ||
1460 | /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n", | |
1461 | (void *) obj->low, obj->liveness, obj->name); */ | |
1462 | ||
1463 | s->live_obj_count ++; | |
1464 | s->total_weight += (double) obj->liveness; | |
1465 | s->weighted_size += | |
1466 | (double) (obj->high - obj->low + 1) * | |
1467 | (double) obj->liveness; | |
1468 | ||
1469 | addr = obj->low; | |
1470 | for (i=0; i<sizeof(uintptr_t) * 8; i++) | |
1471 | { | |
1472 | unsigned bit = addr & 1; | |
1473 | s->weighted_address_bits[i][bit] += obj->liveness; | |
1474 | addr = addr >> 1; | |
1475 | } | |
1476 | ||
1477 | /* Age the liveness value. */ | |
1478 | obj->liveness >>= 1; | |
1479 | } | |
6de9cd9a DN |
1480 | } |
1481 | ||
cfbd22d7 | 1482 | return 0; |
6de9cd9a DN |
1483 | } |
1484 | ||
1485 | ||
1486 | static void | |
1487 | __mf_adapt_cache () | |
1488 | { | |
1489 | struct tree_stats s; | |
1490 | uintptr_t new_mask = 0; | |
1491 | unsigned char new_shift; | |
1492 | float cache_utilization; | |
1493 | float max_value; | |
1494 | static float smoothed_new_shift = -1.0; | |
1495 | unsigned i; | |
1496 | ||
1497 | memset (&s, 0, sizeof (s)); | |
cfbd22d7 | 1498 | |
fc5515a8 FCE |
1499 | mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s); |
1500 | mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s); | |
1501 | mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s); | |
1502 | mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s); | |
1503 | mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s); | |
6de9cd9a DN |
1504 | |
1505 | /* Maybe we're dealing with funny aging/adaptation parameters, or an | |
1506 | empty tree. Just leave the cache alone in such cases, rather | |
1507 | than risk dying by division-by-zero. */ | |
1508 | if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0)) | |
1509 | return; | |
1510 | ||
1511 | /* Guess a good value for the shift parameter by finding an address bit that is a | |
1512 | good discriminant of lively objects. */ | |
1513 | max_value = 0.0; | |
1514 | for (i=0; i<sizeof (uintptr_t)*8; i++) | |
1515 | { | |
1516 | float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1]; | |
1517 | if (max_value < value) max_value = value; | |
1518 | } | |
1519 | for (i=0; i<sizeof (uintptr_t)*8; i++) | |
1520 | { | |
1521 | float shoulder_factor = 0.7; /* Include slightly less popular bits too. */ | |
1522 | float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1]; | |
1523 | if (value >= max_value * shoulder_factor) | |
cfbd22d7 | 1524 | break; |
6de9cd9a DN |
1525 | } |
1526 | if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift; | |
fb925a51 | 1527 | /* Converge toward this slowly to reduce flapping. */ |
6de9cd9a DN |
1528 | smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i; |
1529 | new_shift = (unsigned) (smoothed_new_shift + 0.5); | |
1530 | assert (new_shift < sizeof (uintptr_t)*8); | |
1531 | ||
1532 | /* Count number of used buckets. */ | |
1533 | cache_utilization = 0.0; | |
1534 | for (i = 0; i < (1 + __mf_lc_mask); i++) | |
1535 | if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0) | |
1536 | cache_utilization += 1.0; | |
1537 | cache_utilization /= (1 + __mf_lc_mask); | |
1538 | ||
ddfabf89 | 1539 | new_mask |= 0xffff; /* XXX: force a large cache. */ |
6de9cd9a DN |
1540 | new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1); |
1541 | ||
1542 | VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => " | |
cfbd22d7 FCE |
1543 | "util=%u%% m=%p s=%u\n", |
1544 | s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size, | |
1545 | (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift); | |
6de9cd9a DN |
1546 | |
1547 | /* We should reinitialize cache if its parameters have changed. */ | |
1548 | if (new_mask != __mf_lc_mask || | |
1549 | new_shift != __mf_lc_shift) | |
1550 | { | |
1551 | __mf_lc_mask = new_mask; | |
1552 | __mf_lc_shift = new_shift; | |
1553 | /* XXX: race */ | |
1554 | memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache)); | |
1555 | /* void slot 0 */ | |
1556 | __mf_lookup_cache[0].low = MAXPTR; | |
1557 | } | |
1558 | } | |
1559 | ||
1560 | ||
1561 | ||
6de9cd9a DN |
1562 | /* __mf_find_object[s] */ |
1563 | ||
1564 | /* Find overlapping live objecs between [low,high]. Return up to | |
1565 | max_objs of their pointers in objs[]. Return total count of | |
1566 | overlaps (may exceed max_objs). */ | |
1567 | ||
fb925a51 MS |
1568 | unsigned |
1569 | __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high, | |
cfbd22d7 | 1570 | __mf_object_t **objs, unsigned max_objs, int type) |
6de9cd9a | 1571 | { |
cfbd22d7 | 1572 | unsigned count = 0; |
fc5515a8 FCE |
1573 | mfsplay_tree t = __mf_object_tree (type); |
1574 | mfsplay_tree_key k = (mfsplay_tree_key) ptr_low; | |
cfbd22d7 | 1575 | int direction; |
6de9cd9a | 1576 | |
fc5515a8 | 1577 | mfsplay_tree_node n = mfsplay_tree_lookup (t, k); |
cfbd22d7 FCE |
1578 | /* An exact match for base address implies a hit. */ |
1579 | if (n != NULL) | |
6de9cd9a | 1580 | { |
cfbd22d7 FCE |
1581 | if (count < max_objs) |
1582 | objs[count] = (__mf_object_t *) n->value; | |
6de9cd9a | 1583 | count ++; |
6de9cd9a DN |
1584 | } |
1585 | ||
cfbd22d7 FCE |
1586 | /* Iterate left then right near this key value to find all overlapping objects. */ |
1587 | for (direction = 0; direction < 2; direction ++) | |
6de9cd9a | 1588 | { |
cfbd22d7 | 1589 | /* Reset search origin. */ |
fc5515a8 | 1590 | k = (mfsplay_tree_key) ptr_low; |
cfbd22d7 FCE |
1591 | |
1592 | while (1) | |
1593 | { | |
1594 | __mf_object_t *obj; | |
fb925a51 | 1595 | |
fc5515a8 | 1596 | n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k)); |
cfbd22d7 FCE |
1597 | if (n == NULL) break; |
1598 | obj = (__mf_object_t *) n->value; | |
fb925a51 | 1599 | |
cfbd22d7 FCE |
1600 | if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */ |
1601 | break; | |
fb925a51 | 1602 | |
cfbd22d7 FCE |
1603 | if (count < max_objs) |
1604 | objs[count] = (__mf_object_t *) n->value; | |
1605 | count ++; | |
1606 | ||
fc5515a8 | 1607 | k = (mfsplay_tree_key) obj->low; |
cfbd22d7 | 1608 | } |
6de9cd9a DN |
1609 | } |
1610 | ||
1611 | return count; | |
1612 | } | |
1613 | ||
1614 | ||
1615 | unsigned | |
1616 | __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high, | |
cfbd22d7 | 1617 | __mf_object_t **objs, unsigned max_objs) |
6de9cd9a | 1618 | { |
cfbd22d7 FCE |
1619 | int type; |
1620 | unsigned count = 0; | |
6de9cd9a | 1621 | |
cfbd22d7 FCE |
1622 | /* Search each splay tree for overlaps. */ |
1623 | for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++) | |
6de9cd9a | 1624 | { |
cfbd22d7 FCE |
1625 | unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type); |
1626 | if (c > max_objs) | |
1627 | { | |
1628 | max_objs = 0; | |
1629 | objs = NULL; | |
1630 | } | |
1631 | else /* NB: C may equal 0 */ | |
1632 | { | |
1633 | max_objs -= c; | |
1634 | objs += c; | |
1635 | } | |
1636 | count += c; | |
6de9cd9a DN |
1637 | } |
1638 | ||
cfbd22d7 | 1639 | return count; |
6de9cd9a DN |
1640 | } |
1641 | ||
1642 | ||
6de9cd9a | 1643 | |
cfbd22d7 | 1644 | /* __mf_link_object */ |
6de9cd9a DN |
1645 | |
1646 | static void | |
cfbd22d7 | 1647 | __mf_link_object (__mf_object_t *node) |
6de9cd9a | 1648 | { |
fc5515a8 FCE |
1649 | mfsplay_tree t = __mf_object_tree (node->type); |
1650 | mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node); | |
6de9cd9a DN |
1651 | } |
1652 | ||
cfbd22d7 FCE |
1653 | /* __mf_unlink_object */ |
1654 | ||
6de9cd9a | 1655 | static void |
cfbd22d7 | 1656 | __mf_unlink_object (__mf_object_t *node) |
6de9cd9a | 1657 | { |
fc5515a8 FCE |
1658 | mfsplay_tree t = __mf_object_tree (node->type); |
1659 | mfsplay_tree_remove (t, (mfsplay_tree_key) node->low); | |
6de9cd9a DN |
1660 | } |
1661 | ||
1662 | /* __mf_find_dead_objects */ | |
1663 | ||
1664 | /* Find overlapping dead objecs between [low,high]. Return up to | |
1665 | max_objs of their pointers in objs[]. Return total count of | |
1666 | overlaps (may exceed max_objs). */ | |
1667 | ||
1668 | static unsigned | |
1669 | __mf_find_dead_objects (uintptr_t low, uintptr_t high, | |
cfbd22d7 | 1670 | __mf_object_t **objs, unsigned max_objs) |
6de9cd9a DN |
1671 | { |
1672 | if (__mf_opts.persistent_count > 0) | |
1673 | { | |
1674 | unsigned count = 0; | |
1675 | unsigned recollection = 0; | |
1676 | unsigned row = 0; | |
fb925a51 | 1677 | |
6de9cd9a DN |
1678 | assert (low <= high); |
1679 | assert (max_objs == 0 || objs != NULL); | |
fb925a51 | 1680 | |
6de9cd9a | 1681 | /* Widen the search from the most recent plots in each row, looking |
cfbd22d7 | 1682 | backward in time. */ |
6de9cd9a DN |
1683 | recollection = 0; |
1684 | while (recollection < __mf_opts.persistent_count) | |
cfbd22d7 FCE |
1685 | { |
1686 | count = 0; | |
fb925a51 | 1687 | |
cfbd22d7 FCE |
1688 | for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++) |
1689 | { | |
1690 | unsigned plot; | |
1691 | unsigned i; | |
fb925a51 | 1692 | |
cfbd22d7 FCE |
1693 | plot = __mf_object_dead_head [row]; |
1694 | for (i = 0; i <= recollection; i ++) | |
1695 | { | |
1696 | __mf_object_t *obj; | |
fb925a51 | 1697 | |
cfbd22d7 FCE |
1698 | /* Look backward through row: it's a circular buffer. */ |
1699 | if (plot > 0) plot --; | |
1700 | else plot = __mf_opts.persistent_count - 1; | |
fb925a51 | 1701 | |
cfbd22d7 FCE |
1702 | obj = __mf_object_cemetary [row][plot]; |
1703 | if (obj && obj->low <= high && obj->high >= low) | |
1704 | { | |
1705 | /* Found an overlapping dead object! */ | |
1706 | if (count < max_objs) | |
1707 | objs [count] = obj; | |
1708 | count ++; | |
1709 | } | |
1710 | } | |
1711 | } | |
fb925a51 | 1712 | |
cfbd22d7 FCE |
1713 | if (count) |
1714 | break; | |
fb925a51 | 1715 | |
cfbd22d7 FCE |
1716 | /* Look farther back in time. */ |
1717 | recollection = (recollection * 2) + 1; | |
1718 | } | |
fb925a51 | 1719 | |
6de9cd9a DN |
1720 | return count; |
1721 | } else { | |
1722 | return 0; | |
1723 | } | |
1724 | } | |
1725 | ||
1726 | /* __mf_describe_object */ | |
1727 | ||
1728 | static void | |
1729 | __mf_describe_object (__mf_object_t *obj) | |
1730 | { | |
1731 | static unsigned epoch = 0; | |
1732 | if (obj == NULL) | |
1733 | { | |
1734 | epoch ++; | |
1735 | return; | |
1736 | } | |
1737 | ||
1738 | if (__mf_opts.abbreviate && obj->description_epoch == epoch) | |
1739 | { | |
1740 | fprintf (stderr, | |
66a5d3b1 FCE |
1741 | "mudflap %sobject %p: name=`%s'\n", |
1742 | (obj->deallocated_p ? "dead " : ""), | |
cfbd22d7 | 1743 | (void *) obj, (obj->name ? obj->name : "")); |
6de9cd9a DN |
1744 | return; |
1745 | } | |
1746 | else | |
1747 | obj->description_epoch = epoch; | |
1748 | ||
1749 | fprintf (stderr, | |
66a5d3b1 | 1750 | "mudflap %sobject %p: name=`%s'\n" |
cfbd22d7 FCE |
1751 | "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n" |
1752 | "alloc time=%lu.%06lu pc=%p" | |
6de9cd9a | 1753 | #ifdef LIBMUDFLAPTH |
cfbd22d7 | 1754 | " thread=%u" |
6de9cd9a | 1755 | #endif |
cfbd22d7 | 1756 | "\n", |
66a5d3b1 | 1757 | (obj->deallocated_p ? "dead " : ""), |
fb925a51 | 1758 | (void *) obj, (obj->name ? obj->name : ""), |
cfbd22d7 FCE |
1759 | (void *) obj->low, (void *) obj->high, |
1760 | (unsigned long) (obj->high - obj->low + 1), | |
1761 | (obj->type == __MF_TYPE_NOACCESS ? "no-access" : | |
1762 | obj->type == __MF_TYPE_HEAP ? "heap" : | |
1763 | obj->type == __MF_TYPE_HEAP_I ? "heap-init" : | |
1764 | obj->type == __MF_TYPE_STACK ? "stack" : | |
1765 | obj->type == __MF_TYPE_STATIC ? "static" : | |
1766 | obj->type == __MF_TYPE_GUESS ? "guess" : | |
1767 | "unknown"), | |
fb925a51 | 1768 | obj->read_count, obj->write_count, obj->liveness, |
cfbd22d7 | 1769 | obj->watching_p ? " watching" : "", |
fb925a51 | 1770 | obj->alloc_time.tv_sec, obj->alloc_time.tv_usec, |
cfbd22d7 | 1771 | (void *) obj->alloc_pc |
6de9cd9a | 1772 | #ifdef LIBMUDFLAPTH |
cfbd22d7 | 1773 | , (unsigned) obj->alloc_thread |
6de9cd9a | 1774 | #endif |
cfbd22d7 | 1775 | ); |
6de9cd9a DN |
1776 | |
1777 | if (__mf_opts.backtrace > 0) | |
1778 | { | |
1779 | unsigned i; | |
1780 | for (i=0; i<obj->alloc_backtrace_size; i++) | |
1781 | fprintf (stderr, " %s\n", obj->alloc_backtrace[i]); | |
1782 | } | |
1783 | ||
1784 | if (__mf_opts.persistent_count > 0) | |
1785 | { | |
1786 | if (obj->deallocated_p) | |
cfbd22d7 FCE |
1787 | { |
1788 | fprintf (stderr, "dealloc time=%lu.%06lu pc=%p" | |
6de9cd9a | 1789 | #ifdef LIBMUDFLAPTH |
cfbd22d7 | 1790 | " thread=%u" |
6de9cd9a | 1791 | #endif |
cfbd22d7 | 1792 | "\n", |
fb925a51 | 1793 | obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec, |
cfbd22d7 | 1794 | (void *) obj->dealloc_pc |
6de9cd9a | 1795 | #ifdef LIBMUDFLAPTH |
cfbd22d7 | 1796 | , (unsigned) obj->dealloc_thread |
6de9cd9a | 1797 | #endif |
cfbd22d7 | 1798 | ); |
6de9cd9a DN |
1799 | |
1800 | ||
cfbd22d7 FCE |
1801 | if (__mf_opts.backtrace > 0) |
1802 | { | |
1803 | unsigned i; | |
1804 | for (i=0; i<obj->dealloc_backtrace_size; i++) | |
1805 | fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]); | |
1806 | } | |
1807 | } | |
6de9cd9a DN |
1808 | } |
1809 | } | |
1810 | ||
cfbd22d7 FCE |
1811 | |
1812 | static int | |
fc5515a8 | 1813 | __mf_report_leaks_fn (mfsplay_tree_node n, void *param) |
6de9cd9a | 1814 | { |
cfbd22d7 FCE |
1815 | __mf_object_t *node = (__mf_object_t *) n->value; |
1816 | unsigned *count = (unsigned *) param; | |
6de9cd9a | 1817 | |
cfbd22d7 FCE |
1818 | if (count != NULL) |
1819 | (*count) ++; | |
6de9cd9a | 1820 | |
cfbd22d7 FCE |
1821 | fprintf (stderr, "Leaked object %u:\n", (*count)); |
1822 | __mf_describe_object (node); | |
1823 | ||
1824 | return 0; | |
1825 | } | |
1826 | ||
1827 | ||
1828 | static unsigned | |
1829 | __mf_report_leaks () | |
1830 | { | |
1831 | unsigned count = 0; | |
1832 | ||
fc5515a8 | 1833 | (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), |
cfbd22d7 | 1834 | __mf_report_leaks_fn, & count); |
fc5515a8 | 1835 | (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), |
cfbd22d7 | 1836 | __mf_report_leaks_fn, & count); |
6de9cd9a DN |
1837 | |
1838 | return count; | |
1839 | } | |
1840 | ||
1841 | /* ------------------------------------------------------------------------ */ | |
1842 | /* __mf_report */ | |
1843 | ||
1844 | void | |
1845 | __mf_report () | |
1846 | { | |
1847 | LOCKTH (); | |
1848 | BEGIN_RECURSION_PROTECT (); | |
1849 | __mfu_report (); | |
1850 | END_RECURSION_PROTECT (); | |
1851 | UNLOCKTH (); | |
1852 | } | |
1853 | ||
1854 | void | |
1855 | __mfu_report () | |
1856 | { | |
1857 | if (__mf_opts.collect_stats) | |
1858 | { | |
1859 | fprintf (stderr, | |
cfbd22d7 FCE |
1860 | "*******\n" |
1861 | "mudflap stats:\n" | |
1862 | "calls to __mf_check: %lu\n" | |
1863 | " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n" | |
1864 | " __mf_unregister: %lu [%luB]\n" | |
1865 | " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n", | |
1866 | __mf_count_check, | |
1867 | __mf_count_register, | |
1868 | __mf_total_register_size[0], __mf_total_register_size[1], | |
1869 | __mf_total_register_size[2], __mf_total_register_size[3], | |
1870 | __mf_total_register_size[4], /* XXX */ | |
1871 | __mf_count_unregister, __mf_total_unregister_size, | |
1872 | __mf_count_violation[0], __mf_count_violation[1], | |
1873 | __mf_count_violation[2], __mf_count_violation[3], | |
1874 | __mf_count_violation[4]); | |
6de9cd9a DN |
1875 | |
1876 | fprintf (stderr, | |
cfbd22d7 | 1877 | "calls with reentrancy: %lu\n", __mf_reentrancy); |
6de9cd9a DN |
1878 | #ifdef LIBMUDFLAPTH |
1879 | fprintf (stderr, | |
cfbd22d7 | 1880 | " lock contention: %lu\n", __mf_lock_contention); |
6de9cd9a DN |
1881 | #endif |
1882 | ||
1883 | /* Lookup cache stats. */ | |
1884 | { | |
cfbd22d7 FCE |
1885 | unsigned i; |
1886 | unsigned max_reuse = 0; | |
1887 | unsigned num_used = 0; | |
1888 | unsigned num_unused = 0; | |
1889 | ||
1890 | for (i = 0; i < LOOKUP_CACHE_SIZE; i++) | |
1891 | { | |
1892 | if (__mf_lookup_cache_reusecount[i]) | |
1893 | num_used ++; | |
1894 | else | |
1895 | num_unused ++; | |
1896 | if (max_reuse < __mf_lookup_cache_reusecount[i]) | |
1897 | max_reuse = __mf_lookup_cache_reusecount[i]; | |
1898 | } | |
1899 | fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n", | |
1900 | num_used, num_unused, max_reuse); | |
6de9cd9a DN |
1901 | } |
1902 | ||
1903 | { | |
cfbd22d7 FCE |
1904 | unsigned live_count; |
1905 | live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0); | |
1906 | fprintf (stderr, "number of live objects: %u\n", live_count); | |
6de9cd9a DN |
1907 | } |
1908 | ||
1909 | if (__mf_opts.persistent_count > 0) | |
cfbd22d7 FCE |
1910 | { |
1911 | unsigned dead_count = 0; | |
1912 | unsigned row, plot; | |
1913 | for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++) | |
1914 | for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++) | |
1915 | if (__mf_object_cemetary [row][plot] != 0) | |
1916 | dead_count ++; | |
1917 | fprintf (stderr, " zombie objects: %u\n", dead_count); | |
1918 | } | |
6de9cd9a DN |
1919 | } |
1920 | if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check)) | |
1921 | { | |
1922 | unsigned l; | |
1923 | extern void * __mf_wrap_alloca_indirect (size_t c); | |
1924 | ||
1925 | /* Free up any remaining alloca()'d blocks. */ | |
1926 | __mf_wrap_alloca_indirect (0); | |
a548d7b7 FCE |
1927 | #ifdef HAVE___LIBC_FREERES |
1928 | if (__mf_opts.call_libc_freeres) | |
1929 | { | |
1930 | extern void __libc_freeres (void); | |
1931 | __libc_freeres (); | |
1932 | } | |
1933 | #endif | |
1934 | ||
6de9cd9a | 1935 | __mf_describe_object (NULL); /* Reset description epoch. */ |
cfbd22d7 | 1936 | l = __mf_report_leaks (); |
6de9cd9a DN |
1937 | fprintf (stderr, "number of leaked objects: %u\n", l); |
1938 | } | |
1939 | } | |
1940 | ||
1941 | /* __mf_backtrace */ | |
1942 | ||
1943 | size_t | |
1944 | __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels) | |
1945 | { | |
1946 | void ** pc_array; | |
1947 | unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels; | |
1948 | unsigned remaining_size; | |
1949 | unsigned omitted_size = 0; | |
1950 | unsigned i; | |
1951 | DECLARE (void, free, void *ptr); | |
1952 | DECLARE (void *, calloc, size_t c, size_t n); | |
1953 | DECLARE (void *, malloc, size_t n); | |
1954 | ||
1955 | pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) ); | |
1956 | #ifdef HAVE_BACKTRACE | |
1957 | pc_array_size = backtrace (pc_array, pc_array_size); | |
1958 | #else | |
1959 | #define FETCH(n) do { if (pc_array_size >= n) { \ | |
1960 | pc_array[n] = __builtin_return_address(n); \ | |
1961 | if (pc_array[n] == 0) pc_array_size = n; } } while (0) | |
1962 | ||
1963 | /* Unroll some calls __builtin_return_address because this function | |
1964 | only takes a literal integer parameter. */ | |
1965 | FETCH (0); | |
1966 | #if 0 | |
1967 | /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments, | |
1968 | rather than simply returning 0. :-( */ | |
1969 | FETCH (1); | |
1970 | FETCH (2); | |
1971 | FETCH (3); | |
1972 | FETCH (4); | |
1973 | FETCH (5); | |
1974 | FETCH (6); | |
1975 | FETCH (7); | |
1976 | FETCH (8); | |
1977 | if (pc_array_size > 8) pc_array_size = 9; | |
1978 | #else | |
1979 | if (pc_array_size > 0) pc_array_size = 1; | |
1980 | #endif | |
1981 | ||
1982 | #undef FETCH | |
1983 | #endif | |
1984 | ||
1985 | /* We want to trim the first few levels of the stack traceback, | |
1986 | since they contain libmudflap wrappers and junk. If pc_array[] | |
1987 | ends up containing a non-NULL guess_pc, then trim everything | |
1988 | before that. Otherwise, omit the first guess_omit_levels | |
1989 | entries. */ | |
fb925a51 | 1990 | |
6de9cd9a DN |
1991 | if (guess_pc != NULL) |
1992 | for (i=0; i<pc_array_size; i++) | |
1993 | if (pc_array [i] == guess_pc) | |
cfbd22d7 | 1994 | omitted_size = i; |
6de9cd9a DN |
1995 | |
1996 | if (omitted_size == 0) /* No match? */ | |
1997 | if (pc_array_size > guess_omit_levels) | |
1998 | omitted_size = guess_omit_levels; | |
1999 | ||
2000 | remaining_size = pc_array_size - omitted_size; | |
2001 | ||
2002 | #ifdef HAVE_BACKTRACE_SYMBOLS | |
2003 | *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size); | |
2004 | #else | |
2005 | { | |
2006 | /* Let's construct a buffer by hand. It will have <remaining_size> | |
2007 | char*'s at the front, pointing at individual strings immediately | |
2008 | afterwards. */ | |
2009 | void *buffer; | |
2010 | char *chars; | |
2011 | char **pointers; | |
2012 | enum { perline = 30 }; | |
2013 | buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *))); | |
2014 | pointers = (char **) buffer; | |
2015 | chars = (char *)buffer + (remaining_size * sizeof (char *)); | |
2016 | for (i = 0; i < remaining_size; i++) | |
2017 | { | |
cfbd22d7 FCE |
2018 | pointers[i] = chars; |
2019 | sprintf (chars, "[0x%p]", pc_array [omitted_size + i]); | |
2020 | chars = chars + perline; | |
6de9cd9a DN |
2021 | } |
2022 | *symbols = pointers; | |
2023 | } | |
2024 | #endif | |
2025 | CALL_REAL (free, pc_array); | |
2026 | ||
2027 | return remaining_size; | |
2028 | } | |
2029 | ||
2030 | /* ------------------------------------------------------------------------ */ | |
2031 | /* __mf_violation */ | |
2032 | ||
2033 | void | |
fb925a51 | 2034 | __mf_violation (void *ptr, size_t sz, uintptr_t pc, |
cfbd22d7 | 2035 | const char *location, int type) |
6de9cd9a DN |
2036 | { |
2037 | char buf [128]; | |
2038 | static unsigned violation_number; | |
2039 | DECLARE(void, free, void *ptr); | |
2040 | ||
fb925a51 MS |
2041 | TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n", |
2042 | (void *) pc, | |
cfbd22d7 | 2043 | (location != NULL ? location : ""), type, ptr, (unsigned long) sz); |
6de9cd9a DN |
2044 | |
2045 | if (__mf_opts.collect_stats) | |
2046 | __mf_count_violation [(type < 0) ? 0 : | |
cfbd22d7 FCE |
2047 | (type > __MF_VIOL_WATCH) ? 0 : |
2048 | type] ++; | |
6de9cd9a DN |
2049 | |
2050 | /* Print out a basic warning message. */ | |
2051 | if (__mf_opts.verbose_violations) | |
2052 | { | |
2053 | unsigned dead_p; | |
2054 | unsigned num_helpful = 0; | |
a082fc7a | 2055 | struct timeval now = { 0, 0 }; |
6de9cd9a DN |
2056 | #if HAVE_GETTIMEOFDAY |
2057 | gettimeofday (& now, NULL); | |
2058 | #endif | |
2059 | ||
2060 | violation_number ++; | |
2061 | fprintf (stderr, | |
cfbd22d7 FCE |
2062 | "*******\n" |
2063 | "mudflap violation %u (%s): time=%lu.%06lu " | |
fb925a51 | 2064 | "ptr=%p size=%lu\npc=%p%s%s%s\n", |
cfbd22d7 FCE |
2065 | violation_number, |
2066 | ((type == __MF_VIOL_READ) ? "check/read" : | |
2067 | (type == __MF_VIOL_WRITE) ? "check/write" : | |
2068 | (type == __MF_VIOL_REGISTER) ? "register" : | |
2069 | (type == __MF_VIOL_UNREGISTER) ? "unregister" : | |
2070 | (type == __MF_VIOL_WATCH) ? "watch" : "unknown"), | |
fb925a51 | 2071 | now.tv_sec, now.tv_usec, |
cfbd22d7 FCE |
2072 | (void *) ptr, (unsigned long)sz, (void *) pc, |
2073 | (location != NULL ? " location=`" : ""), | |
2074 | (location != NULL ? location : ""), | |
2075 | (location != NULL ? "'" : "")); | |
6de9cd9a DN |
2076 | |
2077 | if (__mf_opts.backtrace > 0) | |
2078 | { | |
cfbd22d7 FCE |
2079 | char ** symbols; |
2080 | unsigned i, num; | |
fb925a51 | 2081 | |
cfbd22d7 FCE |
2082 | num = __mf_backtrace (& symbols, (void *) pc, 2); |
2083 | /* Note: backtrace_symbols calls malloc(). But since we're in | |
2084 | __mf_violation and presumably __mf_check, it'll detect | |
2085 | recursion, and not put the new string into the database. */ | |
fb925a51 | 2086 | |
cfbd22d7 FCE |
2087 | for (i=0; i<num; i++) |
2088 | fprintf (stderr, " %s\n", symbols[i]); | |
fb925a51 | 2089 | |
cfbd22d7 FCE |
2090 | /* Calling free() here would trigger a violation. */ |
2091 | CALL_REAL(free, symbols); | |
6de9cd9a | 2092 | } |
fb925a51 MS |
2093 | |
2094 | ||
6de9cd9a DN |
2095 | /* Look for nearby objects. For this, we start with s_low/s_high |
2096 | pointing to the given area, looking for overlapping objects. | |
2097 | If none show up, widen the search area and keep looking. */ | |
fb925a51 | 2098 | |
6de9cd9a | 2099 | if (sz == 0) sz = 1; |
fb925a51 | 2100 | |
6de9cd9a DN |
2101 | for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */ |
2102 | { | |
cfbd22d7 FCE |
2103 | enum {max_objs = 3}; /* magic */ |
2104 | __mf_object_t *objs[max_objs]; | |
2105 | unsigned num_objs = 0; | |
2106 | uintptr_t s_low, s_high; | |
2107 | unsigned tries = 0; | |
2108 | unsigned i; | |
fb925a51 | 2109 | |
cfbd22d7 FCE |
2110 | s_low = (uintptr_t) ptr; |
2111 | s_high = CLAMPSZ (ptr, sz); | |
2112 | ||
2113 | while (tries < 16) /* magic */ | |
2114 | { | |
2115 | if (dead_p) | |
2116 | num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs); | |
2117 | else | |
2118 | num_objs = __mf_find_objects (s_low, s_high, objs, max_objs); | |
2119 | ||
2120 | if (num_objs) /* good enough */ | |
2121 | break; | |
2122 | ||
2123 | tries ++; | |
2124 | ||
2125 | /* XXX: tune this search strategy. It's too dependent on | |
2126 | sz, which can vary from 1 to very big (when array index | |
2127 | checking) numbers. */ | |
2128 | s_low = CLAMPSUB (s_low, (sz * tries * tries)); | |
2129 | s_high = CLAMPADD (s_high, (sz * tries * tries)); | |
2130 | } | |
2131 | ||
2132 | for (i = 0; i < min (num_objs, max_objs); i++) | |
2133 | { | |
2134 | __mf_object_t *obj = objs[i]; | |
2135 | uintptr_t low = (uintptr_t) ptr; | |
2136 | uintptr_t high = CLAMPSZ (ptr, sz); | |
2137 | unsigned before1 = (low < obj->low) ? obj->low - low : 0; | |
2138 | unsigned after1 = (low > obj->high) ? low - obj->high : 0; | |
2139 | unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0; | |
2140 | unsigned before2 = (high < obj->low) ? obj->low - high : 0; | |
2141 | unsigned after2 = (high > obj->high) ? high - obj->high : 0; | |
2142 | unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0; | |
2143 | ||
2144 | fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n", | |
2145 | num_helpful + i + 1, | |
2146 | (before1 ? before1 : after1 ? after1 : into1), | |
2147 | (before1 ? "before" : after1 ? "after" : "into"), | |
2148 | (before2 ? before2 : after2 ? after2 : into2), | |
2149 | (before2 ? "before" : after2 ? "after" : "into")); | |
2150 | __mf_describe_object (obj); | |
2151 | } | |
2152 | num_helpful += num_objs; | |
6de9cd9a DN |
2153 | } |
2154 | ||
2155 | fprintf (stderr, "number of nearby objects: %u\n", num_helpful); | |
2156 | } | |
2157 | ||
2158 | /* How to finally handle this violation? */ | |
2159 | switch (__mf_opts.violation_mode) | |
2160 | { | |
2161 | case viol_nop: | |
2162 | break; | |
2163 | case viol_segv: | |
2164 | kill (getpid(), SIGSEGV); | |
2165 | break; | |
2166 | case viol_abort: | |
2167 | abort (); | |
2168 | break; | |
2169 | case viol_gdb: | |
7954e85c FCE |
2170 | |
2171 | snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ()); | |
6de9cd9a DN |
2172 | system (buf); |
2173 | /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER) | |
2174 | instead, and let the forked child execlp() gdb. That way, this | |
2175 | subject process can be resumed under the supervision of gdb. | |
2176 | This can't happen now, since system() only returns when gdb | |
2177 | dies. In that case, we need to beware of starting a second | |
2178 | concurrent gdb child upon the next violation. (But if the first | |
2179 | gdb dies, then starting a new one is appropriate.) */ | |
2180 | break; | |
2181 | } | |
2182 | } | |
2183 | ||
2184 | /* ------------------------------------------------------------------------ */ | |
2185 | ||
2186 | ||
2187 | unsigned __mf_watch (void *ptr, size_t sz) | |
2188 | { | |
2189 | unsigned rc; | |
2190 | LOCKTH (); | |
2191 | BEGIN_RECURSION_PROTECT (); | |
2192 | rc = __mf_watch_or_not (ptr, sz, 1); | |
2193 | END_RECURSION_PROTECT (); | |
2194 | UNLOCKTH (); | |
2195 | return rc; | |
2196 | } | |
2197 | ||
2198 | unsigned __mf_unwatch (void *ptr, size_t sz) | |
2199 | { | |
2200 | unsigned rc; | |
2201 | LOCKTH (); | |
2202 | rc = __mf_watch_or_not (ptr, sz, 0); | |
2203 | UNLOCKTH (); | |
2204 | return rc; | |
2205 | } | |
2206 | ||
2207 | ||
2208 | static unsigned | |
2209 | __mf_watch_or_not (void *ptr, size_t sz, char flag) | |
2210 | { | |
2211 | uintptr_t ptr_high = CLAMPSZ (ptr, sz); | |
2212 | uintptr_t ptr_low = (uintptr_t) ptr; | |
2213 | unsigned count = 0; | |
2214 | ||
2215 | TRACE ("%s ptr=%p size=%lu\n", | |
cfbd22d7 | 2216 | (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz); |
fb925a51 | 2217 | |
6de9cd9a DN |
2218 | switch (__mf_opts.mudflap_mode) |
2219 | { | |
2220 | case mode_nop: | |
2221 | case mode_populate: | |
2222 | case mode_violate: | |
2223 | count = 0; | |
2224 | break; | |
2225 | ||
2226 | case mode_check: | |
2227 | { | |
cfbd22d7 FCE |
2228 | __mf_object_t **all_ovr_objs; |
2229 | unsigned obj_count; | |
2230 | unsigned n; | |
2231 | DECLARE (void *, malloc, size_t c); | |
2232 | DECLARE (void, free, void *p); | |
2233 | ||
2234 | obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0); | |
2235 | VERBOSE_TRACE (" %u:", obj_count); | |
2236 | ||
2237 | all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count)); | |
2238 | if (all_ovr_objs == NULL) abort (); | |
2239 | n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count); | |
2240 | assert (n == obj_count); | |
2241 | ||
2242 | for (n = 0; n < obj_count; n ++) | |
2243 | { | |
2244 | __mf_object_t *obj = all_ovr_objs[n]; | |
2245 | ||
2246 | VERBOSE_TRACE (" [%p]", (void *) obj); | |
2247 | if (obj->watching_p != flag) | |
2248 | { | |
2249 | obj->watching_p = flag; | |
2250 | count ++; | |
2251 | ||
2252 | /* Remove object from cache, to ensure next access | |
2253 | goes through __mf_check(). */ | |
2254 | if (flag) | |
2255 | __mf_uncache_object (obj); | |
2256 | } | |
2257 | } | |
2258 | CALL_REAL (free, all_ovr_objs); | |
6de9cd9a DN |
2259 | } |
2260 | break; | |
2261 | } | |
2262 | ||
2263 | return count; | |
2264 | } | |
2265 | ||
2266 | ||
2267 | void | |
2268 | __mf_sigusr1_handler (int num) | |
2269 | { | |
2270 | __mf_sigusr1_received ++; | |
2271 | } | |
2272 | ||
2273 | /* Install or remove SIGUSR1 handler as necessary. | |
2274 | Also, respond to a received pending SIGUSR1. */ | |
2275 | void | |
2276 | __mf_sigusr1_respond () | |
2277 | { | |
2278 | static int handler_installed; | |
2279 | ||
b9d71ce3 | 2280 | #ifdef SIGUSR1 |
6de9cd9a DN |
2281 | /* Manage handler */ |
2282 | if (__mf_opts.sigusr1_report && ! handler_installed) | |
2283 | { | |
2284 | signal (SIGUSR1, __mf_sigusr1_handler); | |
2285 | handler_installed = 1; | |
2286 | } | |
2287 | else if(! __mf_opts.sigusr1_report && handler_installed) | |
2288 | { | |
2289 | signal (SIGUSR1, SIG_DFL); | |
2290 | handler_installed = 0; | |
2291 | } | |
2292 | #endif | |
2293 | ||
2294 | /* Manage enqueued signals */ | |
2295 | if (__mf_sigusr1_received > __mf_sigusr1_handled) | |
2296 | { | |
2297 | __mf_sigusr1_handled ++; | |
7544a87f | 2298 | assert (__mf_get_state () == reentrant); |
6de9cd9a DN |
2299 | __mfu_report (); |
2300 | handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */ | |
2301 | } | |
2302 | } | |
2303 | ||
2304 | ||
2305 | /* XXX: provide an alternative __assert_fail function that cannot | |
2306 | fail due to libmudflap infinite recursion. */ | |
2307 | #ifndef NDEBUG | |
2308 | ||
2309 | static void | |
2310 | write_itoa (int fd, unsigned n) | |
2311 | { | |
2312 | enum x { bufsize = sizeof(n)*4 }; | |
2313 | char buf [bufsize]; | |
2314 | unsigned i; | |
2315 | ||
2316 | for (i=0; i<bufsize-1; i++) | |
2317 | { | |
2318 | unsigned digit = n % 10; | |
2319 | buf[bufsize-2-i] = digit + '0'; | |
2320 | n /= 10; | |
fb925a51 | 2321 | if (n == 0) |
cfbd22d7 FCE |
2322 | { |
2323 | char *m = & buf [bufsize-2-i]; | |
2324 | buf[bufsize-1] = '\0'; | |
2325 | write (fd, m, strlen(m)); | |
2326 | break; | |
2327 | } | |
6de9cd9a DN |
2328 | } |
2329 | } | |
2330 | ||
2331 | ||
2332 | void | |
2333 | __assert_fail (const char *msg, const char *file, unsigned line, const char *func) | |
2334 | { | |
2335 | #define write2(string) write (2, (string), strlen ((string))); | |
2336 | write2("mf"); | |
2337 | #ifdef LIBMUDFLAPTH | |
2338 | write2("("); | |
fb925a51 | 2339 | write_itoa (2, (unsigned) pthread_self ()); |
6de9cd9a DN |
2340 | write2(")"); |
2341 | #endif | |
2342 | write2(": assertion failure: `"); | |
2343 | write (2, msg, strlen (msg)); | |
2344 | write2("' in "); | |
2345 | write (2, func, strlen (func)); | |
2346 | write2(" at "); | |
2347 | write (2, file, strlen (file)); | |
2348 | write2(":"); | |
2349 | write_itoa (2, line); | |
2350 | write2("\n"); | |
2351 | #undef write2 | |
2352 | abort (); | |
2353 | } | |
2354 | ||
2355 | ||
2356 | #endif | |
cfbd22d7 FCE |
2357 | |
2358 | ||
2359 | ||
fc5515a8 FCE |
2360 | /* Adapted splay tree code, originally from libiberty. It has been |
2361 | specialized for libmudflap as requested by RMS. */ | |
cfbd22d7 FCE |
2362 | |
2363 | static void | |
fc5515a8 | 2364 | mfsplay_tree_free (void *p) |
cfbd22d7 FCE |
2365 | { |
2366 | DECLARE (void, free, void *p); | |
2367 | CALL_REAL (free, p); | |
2368 | } | |
2369 | ||
2370 | static void * | |
fc5515a8 | 2371 | mfsplay_tree_xmalloc (size_t s) |
cfbd22d7 FCE |
2372 | { |
2373 | DECLARE (void *, malloc, size_t s); | |
2374 | return CALL_REAL (malloc, s); | |
2375 | } | |
2376 | ||
fc5515a8 FCE |
2377 | |
2378 | static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key); | |
2379 | static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree, | |
2380 | mfsplay_tree_key, | |
2381 | mfsplay_tree_node *, | |
2382 | mfsplay_tree_node *, | |
2383 | mfsplay_tree_node *); | |
fc5515a8 FCE |
2384 | |
2385 | ||
2386 | /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent | |
2387 | and grandparent, respectively, of NODE. */ | |
2388 | ||
2389 | static mfsplay_tree_node | |
2390 | mfsplay_tree_splay_helper (mfsplay_tree sp, | |
2391 | mfsplay_tree_key key, | |
2392 | mfsplay_tree_node * node, | |
2393 | mfsplay_tree_node * parent, | |
2394 | mfsplay_tree_node * grandparent) | |
2395 | { | |
2396 | mfsplay_tree_node *next; | |
2397 | mfsplay_tree_node n; | |
2398 | int comparison; | |
2399 | ||
2400 | n = *node; | |
2401 | ||
2402 | if (!n) | |
2403 | return *parent; | |
2404 | ||
73c3d568 | 2405 | comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0)); |
fc5515a8 FCE |
2406 | |
2407 | if (comparison == 0) | |
2408 | /* We've found the target. */ | |
2409 | next = 0; | |
2410 | else if (comparison < 0) | |
2411 | /* The target is to the left. */ | |
2412 | next = &n->left; | |
2413 | else | |
2414 | /* The target is to the right. */ | |
2415 | next = &n->right; | |
2416 | ||
2417 | if (next) | |
2418 | { | |
2419 | /* Check whether our recursion depth is too high. Abort this search, | |
2420 | and signal that a rebalance is required to continue. */ | |
2421 | if (sp->depth > sp->max_depth) | |
2422 | { | |
2423 | sp->rebalance_p = 1; | |
2424 | return n; | |
2425 | } | |
2426 | ||
2427 | /* Continue down the tree. */ | |
2428 | sp->depth ++; | |
2429 | n = mfsplay_tree_splay_helper (sp, key, next, node, parent); | |
2430 | sp->depth --; | |
2431 | ||
2432 | /* The recursive call will change the place to which NODE | |
2433 | points. */ | |
2434 | if (*node != n || sp->rebalance_p) | |
2435 | return n; | |
2436 | } | |
2437 | ||
2438 | if (!parent) | |
2439 | /* NODE is the root. We are done. */ | |
2440 | return n; | |
2441 | ||
2442 | /* First, handle the case where there is no grandparent (i.e., | |
2443 | *PARENT is the root of the tree.) */ | |
2444 | if (!grandparent) | |
2445 | { | |
2446 | if (n == (*parent)->left) | |
2447 | { | |
2448 | *node = n->right; | |
2449 | n->right = *parent; | |
2450 | } | |
2451 | else | |
2452 | { | |
2453 | *node = n->left; | |
2454 | n->left = *parent; | |
2455 | } | |
2456 | *parent = n; | |
2457 | return n; | |
2458 | } | |
2459 | ||
2460 | /* Next handle the cases where both N and *PARENT are left children, | |
2461 | or where both are right children. */ | |
2462 | if (n == (*parent)->left && *parent == (*grandparent)->left) | |
2463 | { | |
2464 | mfsplay_tree_node p = *parent; | |
2465 | ||
2466 | (*grandparent)->left = p->right; | |
2467 | p->right = *grandparent; | |
2468 | p->left = n->right; | |
2469 | n->right = p; | |
2470 | *grandparent = n; | |
2471 | return n; | |
2472 | } | |
2473 | else if (n == (*parent)->right && *parent == (*grandparent)->right) | |
2474 | { | |
2475 | mfsplay_tree_node p = *parent; | |
2476 | ||
2477 | (*grandparent)->right = p->left; | |
2478 | p->left = *grandparent; | |
2479 | p->right = n->left; | |
2480 | n->left = p; | |
2481 | *grandparent = n; | |
2482 | return n; | |
2483 | } | |
2484 | ||
2485 | /* Finally, deal with the case where N is a left child, but *PARENT | |
2486 | is a right child, or vice versa. */ | |
2487 | if (n == (*parent)->left) | |
2488 | { | |
2489 | (*parent)->left = n->right; | |
2490 | n->right = *parent; | |
2491 | (*grandparent)->right = n->left; | |
2492 | n->left = *grandparent; | |
2493 | *grandparent = n; | |
2494 | return n; | |
2495 | } | |
2496 | else | |
2497 | { | |
2498 | (*parent)->right = n->left; | |
2499 | n->left = *parent; | |
2500 | (*grandparent)->left = n->right; | |
2501 | n->right = *grandparent; | |
2502 | *grandparent = n; | |
2503 | return n; | |
2504 | } | |
2505 | } | |
2506 | ||
2507 | ||
2508 | ||
2509 | static int | |
2510 | mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr) | |
2511 | { | |
2512 | mfsplay_tree_node **p = array_ptr; | |
2513 | *(*p) = n; | |
2514 | (*p)++; | |
2515 | return 0; | |
2516 | } | |
2517 | ||
2518 | ||
2519 | static mfsplay_tree_node | |
2520 | mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low, | |
2521 | unsigned high) | |
2522 | { | |
2523 | unsigned middle = low + (high - low) / 2; | |
2524 | mfsplay_tree_node n = array[middle]; | |
2525 | ||
2526 | /* Note that since we're producing a balanced binary tree, it is not a problem | |
2527 | that this function is recursive. */ | |
2528 | if (low + 1 <= middle) | |
2529 | n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1); | |
2530 | else | |
2531 | n->left = NULL; | |
2532 | ||
2533 | if (middle + 1 <= high) | |
2534 | n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high); | |
2535 | else | |
2536 | n->right = NULL; | |
2537 | ||
2538 | return n; | |
2539 | } | |
2540 | ||
2541 | ||
2542 | /* Rebalance the entire tree. Do this by copying all the node | |
2543 | pointers into an array, then cleverly re-linking them. */ | |
2544 | static void | |
2545 | mfsplay_tree_rebalance (mfsplay_tree sp) | |
2546 | { | |
2547 | mfsplay_tree_node *all_nodes, *all_nodes_1; | |
2548 | ||
2549 | if (sp->num_keys <= 2) | |
2550 | return; | |
2551 | ||
2552 | all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys); | |
2553 | ||
2554 | /* Traverse all nodes to copy their addresses into this array. */ | |
2555 | all_nodes_1 = all_nodes; | |
2556 | mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1, | |
2557 | (void *) &all_nodes_1); | |
2558 | ||
2559 | /* Relink all the nodes. */ | |
2560 | sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1); | |
2561 | ||
2562 | mfsplay_tree_free (all_nodes); | |
2563 | } | |
2564 | ||
2565 | ||
2566 | /* Splay SP around KEY. */ | |
2567 | static void | |
2568 | mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key) | |
2569 | { | |
2570 | if (sp->root == 0) | |
2571 | return; | |
2572 | ||
2573 | /* If we just splayed the tree with the same key, do nothing. */ | |
2574 | if (sp->last_splayed_key_p && | |
73c3d568 | 2575 | (sp->last_splayed_key == key)) |
fc5515a8 FCE |
2576 | return; |
2577 | ||
2578 | /* Compute a maximum recursion depth for a splay tree with NUM nodes. | |
2579 | The idea is to limit excessive stack usage if we're facing | |
2580 | degenerate access patterns. Unfortunately such patterns can occur | |
2581 | e.g. during static initialization, where many static objects might | |
2582 | be registered in increasing address sequence, or during a case where | |
fb925a51 | 2583 | large tree-like heap data structures are allocated quickly. |
fc5515a8 | 2584 | |
fb925a51 | 2585 | On x86, this corresponds to roughly 200K of stack usage. |
fc5515a8 FCE |
2586 | XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */ |
2587 | sp->max_depth = 2500; | |
2588 | sp->rebalance_p = sp->depth = 0; | |
2589 | ||
2590 | mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL); | |
2591 | if (sp->rebalance_p) | |
2592 | { | |
2593 | mfsplay_tree_rebalance (sp); | |
2594 | ||
2595 | sp->rebalance_p = sp->depth = 0; | |
2596 | mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL); | |
2597 | ||
2598 | if (sp->rebalance_p) | |
2599 | abort (); | |
2600 | } | |
2601 | ||
2602 | ||
2603 | /* Cache this splay key. */ | |
2604 | sp->last_splayed_key = key; | |
2605 | sp->last_splayed_key_p = 1; | |
2606 | } | |
2607 | ||
2608 | ||
2609 | ||
2610 | /* Allocate a new splay tree. */ | |
2611 | static mfsplay_tree | |
2612 | mfsplay_tree_new () | |
2613 | { | |
2614 | mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s)); | |
2615 | sp->root = NULL; | |
2616 | sp->last_splayed_key_p = 0; | |
2617 | sp->num_keys = 0; | |
2618 | ||
2619 | return sp; | |
2620 | } | |
2621 | ||
2622 | ||
2623 | ||
2624 | /* Insert a new node (associating KEY with DATA) into SP. If a | |
2625 | previous node with the indicated KEY exists, its data is replaced | |
2626 | with the new value. Returns the new node. */ | |
2627 | static mfsplay_tree_node | |
2628 | mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value) | |
2629 | { | |
2630 | int comparison = 0; | |
2631 | ||
2632 | mfsplay_tree_splay (sp, key); | |
2633 | ||
2634 | if (sp->root) | |
73c3d568 FCE |
2635 | comparison = ((sp->root->key > key) ? 1 : |
2636 | ((sp->root->key < key) ? -1 : 0)); | |
fc5515a8 FCE |
2637 | |
2638 | if (sp->root && comparison == 0) | |
2639 | { | |
2640 | /* If the root of the tree already has the indicated KEY, just | |
2641 | replace the value with VALUE. */ | |
2642 | sp->root->value = value; | |
2643 | } | |
2644 | else | |
2645 | { | |
2646 | /* Create a new node, and insert it at the root. */ | |
2647 | mfsplay_tree_node node; | |
2648 | ||
2649 | node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s)); | |
2650 | node->key = key; | |
2651 | node->value = value; | |
2652 | sp->num_keys++; | |
2653 | if (!sp->root) | |
2654 | node->left = node->right = 0; | |
2655 | else if (comparison < 0) | |
2656 | { | |
2657 | node->left = sp->root; | |
2658 | node->right = node->left->right; | |
2659 | node->left->right = 0; | |
2660 | } | |
2661 | else | |
2662 | { | |
2663 | node->right = sp->root; | |
2664 | node->left = node->right->left; | |
2665 | node->right->left = 0; | |
2666 | } | |
2667 | ||
2668 | sp->root = node; | |
2669 | sp->last_splayed_key_p = 0; | |
2670 | } | |
2671 | ||
2672 | return sp->root; | |
2673 | } | |
2674 | ||
2675 | /* Remove KEY from SP. It is not an error if it did not exist. */ | |
2676 | ||
2677 | static void | |
2678 | mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key) | |
2679 | { | |
2680 | mfsplay_tree_splay (sp, key); | |
2681 | sp->last_splayed_key_p = 0; | |
73c3d568 | 2682 | if (sp->root && (sp->root->key == key)) |
fc5515a8 FCE |
2683 | { |
2684 | mfsplay_tree_node left, right; | |
2685 | left = sp->root->left; | |
2686 | right = sp->root->right; | |
2687 | /* Delete the root node itself. */ | |
2688 | mfsplay_tree_free (sp->root); | |
2689 | sp->num_keys--; | |
2690 | /* One of the children is now the root. Doesn't matter much | |
2691 | which, so long as we preserve the properties of the tree. */ | |
2692 | if (left) | |
2693 | { | |
2694 | sp->root = left; | |
fb925a51 | 2695 | /* If there was a right child as well, hang it off the |
fc5515a8 FCE |
2696 | right-most leaf of the left child. */ |
2697 | if (right) | |
2698 | { | |
2699 | while (left->right) | |
2700 | left = left->right; | |
2701 | left->right = right; | |
2702 | } | |
2703 | } | |
2704 | else | |
2705 | sp->root = right; | |
2706 | } | |
2707 | } | |
2708 | ||
fb925a51 | 2709 | /* Lookup KEY in SP, returning VALUE if present, and NULL |
fc5515a8 FCE |
2710 | otherwise. */ |
2711 | ||
2712 | static mfsplay_tree_node | |
2713 | mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key) | |
2714 | { | |
2715 | mfsplay_tree_splay (sp, key); | |
73c3d568 | 2716 | if (sp->root && (sp->root->key == key)) |
fc5515a8 FCE |
2717 | return sp->root; |
2718 | else | |
2719 | return 0; | |
2720 | } | |
2721 | ||
2722 | ||
2723 | /* Return the immediate predecessor KEY, or NULL if there is no | |
2724 | predecessor. KEY need not be present in the tree. */ | |
2725 | ||
2726 | static mfsplay_tree_node | |
2727 | mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key) | |
2728 | { | |
2729 | int comparison; | |
2730 | mfsplay_tree_node node; | |
2731 | /* If the tree is empty, there is certainly no predecessor. */ | |
2732 | if (!sp->root) | |
2733 | return NULL; | |
2734 | /* Splay the tree around KEY. That will leave either the KEY | |
2735 | itself, its predecessor, or its successor at the root. */ | |
2736 | mfsplay_tree_splay (sp, key); | |
73c3d568 FCE |
2737 | comparison = ((sp->root->key > key) ? 1 : |
2738 | ((sp->root->key < key) ? -1 : 0)); | |
2739 | ||
fc5515a8 FCE |
2740 | /* If the predecessor is at the root, just return it. */ |
2741 | if (comparison < 0) | |
2742 | return sp->root; | |
2743 | /* Otherwise, find the rightmost element of the left subtree. */ | |
2744 | node = sp->root->left; | |
2745 | if (node) | |
2746 | while (node->right) | |
2747 | node = node->right; | |
2748 | return node; | |
2749 | } | |
2750 | ||
2751 | /* Return the immediate successor KEY, or NULL if there is no | |
2752 | successor. KEY need not be present in the tree. */ | |
2753 | ||
2754 | static mfsplay_tree_node | |
2755 | mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key) | |
2756 | { | |
2757 | int comparison; | |
2758 | mfsplay_tree_node node; | |
2759 | /* If the tree is empty, there is certainly no successor. */ | |
2760 | if (!sp->root) | |
2761 | return NULL; | |
2762 | /* Splay the tree around KEY. That will leave either the KEY | |
2763 | itself, its predecessor, or its successor at the root. */ | |
2764 | mfsplay_tree_splay (sp, key); | |
73c3d568 FCE |
2765 | comparison = ((sp->root->key > key) ? 1 : |
2766 | ((sp->root->key < key) ? -1 : 0)); | |
fc5515a8 FCE |
2767 | /* If the successor is at the root, just return it. */ |
2768 | if (comparison > 0) | |
2769 | return sp->root; | |
2770 | /* Otherwise, find the leftmost element of the right subtree. */ | |
2771 | node = sp->root->right; | |
2772 | if (node) | |
2773 | while (node->left) | |
2774 | node = node->left; | |
2775 | return node; | |
2776 | } | |
2777 | ||
2778 | /* Call FN, passing it the DATA, for every node in SP, following an | |
2779 | in-order traversal. If FN every returns a non-zero value, the | |
2780 | iteration ceases immediately, and the value is returned. | |
2781 | Otherwise, this function returns 0. | |
fb925a51 | 2782 | |
fc5515a8 FCE |
2783 | This function simulates recursion using dynamically allocated |
2784 | arrays, since it may be called from mfsplay_tree_rebalance(), which | |
2785 | in turn means that the tree is already uncomfortably deep for stack | |
2786 | space limits. */ | |
2787 | static int | |
2788 | mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data) | |
2789 | { | |
2790 | mfsplay_tree_node *stack1; | |
2791 | char *stack2; | |
2792 | unsigned sp; | |
2793 | int val = 0; | |
2794 | enum s { s_left, s_here, s_right, s_up }; | |
2795 | ||
2796 | if (st->root == NULL) /* => num_keys == 0 */ | |
2797 | return 0; | |
2798 | ||
2799 | stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys); | |
2800 | stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys); | |
2801 | ||
2802 | sp = 0; | |
2803 | stack1 [sp] = st->root; | |
2804 | stack2 [sp] = s_left; | |
2805 | ||
2806 | while (1) | |
2807 | { | |
2808 | mfsplay_tree_node n; | |
2809 | enum s s; | |
2810 | ||
2811 | n = stack1 [sp]; | |
2812 | s = stack2 [sp]; | |
2813 | ||
2814 | /* Handle each of the four possible states separately. */ | |
2815 | ||
2816 | /* 1: We're here to traverse the left subtree (if any). */ | |
2817 | if (s == s_left) | |
2818 | { | |
2819 | stack2 [sp] = s_here; | |
2820 | if (n->left != NULL) | |
2821 | { | |
2822 | sp ++; | |
2823 | stack1 [sp] = n->left; | |
2824 | stack2 [sp] = s_left; | |
2825 | } | |
2826 | } | |
2827 | ||
2828 | /* 2: We're here to traverse this node. */ | |
2829 | else if (s == s_here) | |
2830 | { | |
2831 | stack2 [sp] = s_right; | |
2832 | val = (*fn) (n, data); | |
2833 | if (val) break; | |
2834 | } | |
2835 | ||
2836 | /* 3: We're here to traverse the right subtree (if any). */ | |
2837 | else if (s == s_right) | |
2838 | { | |
2839 | stack2 [sp] = s_up; | |
2840 | if (n->right != NULL) | |
2841 | { | |
2842 | sp ++; | |
2843 | stack1 [sp] = n->right; | |
2844 | stack2 [sp] = s_left; | |
2845 | } | |
2846 | } | |
2847 | ||
2848 | /* 4: We're here after both subtrees (if any) have been traversed. */ | |
2849 | else if (s == s_up) | |
2850 | { | |
2851 | /* Pop the stack. */ | |
2852 | if (sp == 0) break; /* Popping off the root note: we're finished! */ | |
2853 | sp --; | |
2854 | } | |
2855 | ||
2856 | else | |
2857 | abort (); | |
2858 | } | |
2859 | ||
2860 | mfsplay_tree_free (stack1); | |
2861 | mfsplay_tree_free (stack2); | |
2862 | return val; | |
2863 | } |