]>
Commit | Line | Data |
---|---|---|
6de9cd9a | 1 | /* Mudflap: narrow-pointer bounds-checking by tree rewriting. |
0e5997c0 JJ |
2 | Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008 |
3 | Free Software Foundation, Inc. | |
6de9cd9a DN |
4 | Contributed by Frank Ch. Eigler <fche@redhat.com> |
5 | and Graydon Hoare <graydon@redhat.com> | |
fc5515a8 FCE |
6 | Splay Tree code originally by Mark Mitchell <mark@markmitchell.com>, |
7 | adapted from libiberty. | |
6de9cd9a DN |
8 | |
9 | This file is part of GCC. | |
10 | ||
11 | GCC is free software; you can redistribute it and/or modify it under | |
12 | the terms of the GNU General Public License as published by the Free | |
13 | Software Foundation; either version 2, or (at your option) any later | |
14 | version. | |
15 | ||
16 | In addition to the permissions in the GNU General Public License, the | |
17 | Free Software Foundation gives you unlimited permission to link the | |
18 | compiled version of this file into combinations with other programs, | |
19 | and to distribute those combinations without any restriction coming | |
20 | from the use of this file. (The General Public License restrictions | |
21 | do apply in other respects; for example, they cover modification of | |
22 | the file, and distribution when not linked into a combine | |
23 | executable.) | |
24 | ||
25 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
26 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
27 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
28 | for more details. | |
29 | ||
30 | You should have received a copy of the GNU General Public License | |
31 | along with GCC; see the file COPYING. If not, write to the Free | |
f9d09c43 KC |
32 | Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA |
33 | 02110-1301, USA. */ | |
6de9cd9a DN |
34 | |
35 | #include "config.h" | |
36 | ||
37 | /* These attempt to coax various unix flavours to declare all our | |
38 | needed tidbits in the system headers. */ | |
029277b7 | 39 | #if !defined(__FreeBSD__) && !defined(__APPLE__) |
6de9cd9a DN |
40 | #define _POSIX_SOURCE |
41 | #endif /* Some BSDs break <sys/socket.h> if this is defined. */ | |
fb925a51 | 42 | #define _GNU_SOURCE |
6de9cd9a DN |
43 | #define _XOPEN_SOURCE |
44 | #define _BSD_TYPES | |
45 | #define __EXTENSIONS__ | |
46 | #define _ALL_SOURCE | |
47 | #define _LARGE_FILE_API | |
48 | #define _XOPEN_SOURCE_EXTENDED 1 | |
49 | ||
50 | #include <stdio.h> | |
51 | #include <stdlib.h> | |
52 | #include <sys/types.h> | |
53 | #include <sys/time.h> | |
54 | #include <time.h> | |
55 | #include <unistd.h> | |
56 | #ifdef HAVE_EXECINFO_H | |
57 | #include <execinfo.h> | |
58 | #endif | |
59 | #ifdef HAVE_SIGNAL_H | |
60 | #include <signal.h> | |
61 | #endif | |
62 | #include <assert.h> | |
63 | ||
64 | #include <string.h> | |
65 | #include <limits.h> | |
66 | #include <sys/types.h> | |
67 | #include <signal.h> | |
68 | #include <errno.h> | |
dc88d66f | 69 | #include <ctype.h> |
6de9cd9a DN |
70 | |
71 | #include "mf-runtime.h" | |
72 | #include "mf-impl.h" | |
73 | ||
74 | ||
fc5515a8 FCE |
75 | /* ------------------------------------------------------------------------ */ |
76 | /* Splay-tree implementation. */ | |
77 | ||
78 | typedef uintptr_t mfsplay_tree_key; | |
79 | typedef void *mfsplay_tree_value; | |
80 | ||
81 | /* Forward declaration for a node in the tree. */ | |
82 | typedef struct mfsplay_tree_node_s *mfsplay_tree_node; | |
83 | ||
84 | /* The type of a function used to iterate over the tree. */ | |
85 | typedef int (*mfsplay_tree_foreach_fn) (mfsplay_tree_node, void *); | |
86 | ||
87 | /* The nodes in the splay tree. */ | |
88 | struct mfsplay_tree_node_s | |
89 | { | |
90 | /* Data. */ | |
91 | mfsplay_tree_key key; | |
92 | mfsplay_tree_value value; | |
93 | /* Children. */ | |
94 | mfsplay_tree_node left; | |
95 | mfsplay_tree_node right; | |
96 | /* XXX: The addition of a parent pointer may eliminate some recursion. */ | |
97 | }; | |
98 | ||
99 | /* The splay tree itself. */ | |
100 | struct mfsplay_tree_s | |
101 | { | |
102 | /* The root of the tree. */ | |
103 | mfsplay_tree_node root; | |
104 | ||
105 | /* The last key value for which the tree has been splayed, but not | |
106 | since modified. */ | |
107 | mfsplay_tree_key last_splayed_key; | |
108 | int last_splayed_key_p; | |
109 | ||
110 | /* Statistics. */ | |
111 | unsigned num_keys; | |
112 | ||
113 | /* Traversal recursion control flags. */ | |
114 | unsigned max_depth; | |
115 | unsigned depth; | |
116 | unsigned rebalance_p; | |
117 | }; | |
118 | typedef struct mfsplay_tree_s *mfsplay_tree; | |
119 | ||
120 | static mfsplay_tree mfsplay_tree_new (void); | |
121 | static mfsplay_tree_node mfsplay_tree_insert (mfsplay_tree, mfsplay_tree_key, mfsplay_tree_value); | |
122 | static void mfsplay_tree_remove (mfsplay_tree, mfsplay_tree_key); | |
123 | static mfsplay_tree_node mfsplay_tree_lookup (mfsplay_tree, mfsplay_tree_key); | |
124 | static mfsplay_tree_node mfsplay_tree_predecessor (mfsplay_tree, mfsplay_tree_key); | |
125 | static mfsplay_tree_node mfsplay_tree_successor (mfsplay_tree, mfsplay_tree_key); | |
126 | static int mfsplay_tree_foreach (mfsplay_tree, mfsplay_tree_foreach_fn, void *); | |
127 | static void mfsplay_tree_rebalance (mfsplay_tree sp); | |
128 | ||
6de9cd9a DN |
129 | /* ------------------------------------------------------------------------ */ |
130 | /* Utility macros */ | |
131 | ||
132 | #define CTOR __attribute__ ((constructor)) | |
133 | #define DTOR __attribute__ ((destructor)) | |
134 | ||
135 | ||
136 | /* Codes to describe the context in which a violation occurs. */ | |
137 | #define __MF_VIOL_UNKNOWN 0 | |
138 | #define __MF_VIOL_READ 1 | |
139 | #define __MF_VIOL_WRITE 2 | |
140 | #define __MF_VIOL_REGISTER 3 | |
141 | #define __MF_VIOL_UNREGISTER 4 | |
142 | #define __MF_VIOL_WATCH 5 | |
143 | ||
144 | /* Protect against recursive calls. */ | |
6de9cd9a | 145 | |
7544a87f RH |
146 | static void |
147 | begin_recursion_protect1 (const char *pf) | |
148 | { | |
149 | if (__mf_get_state () == reentrant) | |
150 | { | |
151 | write (2, "mf: erroneous reentrancy detected in `", 38); | |
152 | write (2, pf, strlen(pf)); | |
153 | write (2, "'\n", 2); \ | |
154 | abort (); | |
155 | } | |
156 | __mf_set_state (reentrant); | |
157 | } | |
6de9cd9a | 158 | |
7544a87f RH |
159 | #define BEGIN_RECURSION_PROTECT() \ |
160 | begin_recursion_protect1 (__PRETTY_FUNCTION__) | |
6de9cd9a | 161 | |
7544a87f RH |
162 | #define END_RECURSION_PROTECT() \ |
163 | __mf_set_state (active) | |
6de9cd9a DN |
164 | |
165 | /* ------------------------------------------------------------------------ */ | |
166 | /* Required globals. */ | |
167 | ||
168 | #define LOOKUP_CACHE_MASK_DFL 1023 | |
ddfabf89 | 169 | #define LOOKUP_CACHE_SIZE_MAX 65536 /* Allows max CACHE_MASK 0xFFFF */ |
6de9cd9a DN |
170 | #define LOOKUP_CACHE_SHIFT_DFL 2 |
171 | ||
172 | struct __mf_cache __mf_lookup_cache [LOOKUP_CACHE_SIZE_MAX]; | |
173 | uintptr_t __mf_lc_mask = LOOKUP_CACHE_MASK_DFL; | |
174 | unsigned char __mf_lc_shift = LOOKUP_CACHE_SHIFT_DFL; | |
175 | #define LOOKUP_CACHE_SIZE (__mf_lc_mask + 1) | |
176 | ||
177 | struct __mf_options __mf_opts; | |
6de9cd9a | 178 | int __mf_starting_p = 1; |
7544a87f RH |
179 | |
180 | #ifdef LIBMUDFLAPTH | |
181 | #ifdef HAVE_TLS | |
22f99b82 | 182 | __thread enum __mf_state_enum __mf_state_1 = reentrant; |
7544a87f | 183 | #endif |
6de9cd9a | 184 | #else |
22f99b82 | 185 | enum __mf_state_enum __mf_state_1 = reentrant; |
6de9cd9a DN |
186 | #endif |
187 | ||
6de9cd9a DN |
188 | #ifdef LIBMUDFLAPTH |
189 | pthread_mutex_t __mf_biglock = | |
190 | #ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP | |
191 | PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP; | |
192 | #else | |
193 | PTHREAD_MUTEX_INITIALIZER; | |
194 | #endif | |
195 | #endif | |
196 | ||
197 | /* Use HAVE_PTHREAD_H here instead of LIBMUDFLAPTH, so that even | |
198 | the libmudflap.la (no threading support) can diagnose whether | |
199 | the application is linked with -lpthread. See __mf_usage() below. */ | |
200 | #if HAVE_PTHREAD_H | |
35a1e17e | 201 | #ifdef _POSIX_THREADS |
6de9cd9a | 202 | #pragma weak pthread_join |
35a1e17e NC |
203 | #else |
204 | #define pthread_join NULL | |
205 | #endif | |
6de9cd9a DN |
206 | #endif |
207 | ||
208 | ||
209 | /* ------------------------------------------------------------------------ */ | |
210 | /* stats-related globals. */ | |
211 | ||
212 | static unsigned long __mf_count_check; | |
213 | static unsigned long __mf_lookup_cache_reusecount [LOOKUP_CACHE_SIZE_MAX]; | |
6de9cd9a DN |
214 | static unsigned long __mf_count_register; |
215 | static unsigned long __mf_total_register_size [__MF_TYPE_MAX+1]; | |
216 | static unsigned long __mf_count_unregister; | |
217 | static unsigned long __mf_total_unregister_size; | |
218 | static unsigned long __mf_count_violation [__MF_VIOL_WATCH+1]; | |
219 | static unsigned long __mf_sigusr1_received; | |
220 | static unsigned long __mf_sigusr1_handled; | |
221 | /* not static */ unsigned long __mf_reentrancy; | |
222 | #ifdef LIBMUDFLAPTH | |
223 | /* not static */ unsigned long __mf_lock_contention; | |
224 | #endif | |
225 | ||
226 | ||
227 | /* ------------------------------------------------------------------------ */ | |
228 | /* mode-check-related globals. */ | |
229 | ||
230 | typedef struct __mf_object | |
231 | { | |
232 | uintptr_t low, high; /* __mf_register parameters */ | |
233 | const char *name; | |
234 | char type; /* __MF_TYPE_something */ | |
235 | char watching_p; /* Trigger a VIOL_WATCH on access? */ | |
236 | unsigned read_count; /* Number of times __mf_check/read was called on this object. */ | |
237 | unsigned write_count; /* Likewise for __mf_check/write. */ | |
238 | unsigned liveness; /* A measure of recent checking activity. */ | |
239 | unsigned description_epoch; /* Last epoch __mf_describe_object printed this. */ | |
240 | ||
241 | uintptr_t alloc_pc; | |
242 | struct timeval alloc_time; | |
243 | char **alloc_backtrace; | |
244 | size_t alloc_backtrace_size; | |
245 | #ifdef LIBMUDFLAPTH | |
246 | pthread_t alloc_thread; | |
247 | #endif | |
248 | ||
249 | int deallocated_p; | |
250 | uintptr_t dealloc_pc; | |
251 | struct timeval dealloc_time; | |
252 | char **dealloc_backtrace; | |
253 | size_t dealloc_backtrace_size; | |
254 | #ifdef LIBMUDFLAPTH | |
255 | pthread_t dealloc_thread; | |
256 | #endif | |
257 | } __mf_object_t; | |
258 | ||
cfbd22d7 FCE |
259 | /* Live objects: splay trees, separated by type, ordered on .low (base address). */ |
260 | /* Actually stored as static vars within lookup function below. */ | |
6de9cd9a DN |
261 | |
262 | /* Dead objects: circular arrays; _MIN_CEM .. _MAX_CEM only */ | |
263 | static unsigned __mf_object_dead_head[__MF_TYPE_MAX_CEM+1]; /* next empty spot */ | |
cfbd22d7 | 264 | static __mf_object_t *__mf_object_cemetary[__MF_TYPE_MAX_CEM+1][__MF_PERSIST_MAX]; |
6de9cd9a DN |
265 | |
266 | ||
267 | /* ------------------------------------------------------------------------ */ | |
268 | /* Forward function declarations */ | |
269 | ||
429b4470 | 270 | void __mf_init () CTOR; |
6de9cd9a | 271 | static void __mf_sigusr1_respond (); |
fb925a51 | 272 | static unsigned __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high, |
cfbd22d7 | 273 | __mf_object_t **objs, unsigned max_objs); |
fb925a51 | 274 | static unsigned __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high, |
cfbd22d7 | 275 | __mf_object_t **objs, unsigned max_objs, int type); |
fb925a51 | 276 | static unsigned __mf_find_dead_objects (uintptr_t ptr_low, uintptr_t ptr_high, |
cfbd22d7 | 277 | __mf_object_t **objs, unsigned max_objs); |
6de9cd9a | 278 | static void __mf_adapt_cache (); |
6de9cd9a DN |
279 | static void __mf_describe_object (__mf_object_t *obj); |
280 | static unsigned __mf_watch_or_not (void *ptr, size_t sz, char flag); | |
fc5515a8 | 281 | static mfsplay_tree __mf_object_tree (int type); |
cfbd22d7 FCE |
282 | static void __mf_link_object (__mf_object_t *node); |
283 | static void __mf_unlink_object (__mf_object_t *node); | |
6de9cd9a DN |
284 | |
285 | ||
286 | /* ------------------------------------------------------------------------ */ | |
287 | /* Configuration engine */ | |
288 | ||
289 | static void | |
290 | __mf_set_default_options () | |
291 | { | |
292 | memset (& __mf_opts, 0, sizeof (__mf_opts)); | |
293 | ||
6de9cd9a DN |
294 | __mf_opts.adapt_cache = 1000003; |
295 | __mf_opts.abbreviate = 1; | |
296 | __mf_opts.verbose_violations = 1; | |
297 | __mf_opts.free_queue_length = 4; | |
298 | __mf_opts.persistent_count = 100; | |
299 | __mf_opts.crumple_zone = 32; | |
300 | __mf_opts.backtrace = 4; | |
a082fc7a | 301 | __mf_opts.timestamps = 1; |
6de9cd9a DN |
302 | __mf_opts.mudflap_mode = mode_check; |
303 | __mf_opts.violation_mode = viol_nop; | |
a548d7b7 FCE |
304 | #ifdef HAVE___LIBC_FREERES |
305 | __mf_opts.call_libc_freeres = 1; | |
306 | #endif | |
6de9cd9a DN |
307 | __mf_opts.heur_std_data = 1; |
308 | #ifdef LIBMUDFLAPTH | |
309 | __mf_opts.thread_stack = 0; | |
310 | #endif | |
311 | } | |
312 | ||
3b088eb0 | 313 | static struct mudoption |
6de9cd9a DN |
314 | { |
315 | char *name; | |
316 | char *description; | |
317 | enum | |
318 | { | |
319 | set_option, | |
320 | read_integer_option, | |
321 | } type; | |
07c2f075 FCE |
322 | unsigned value; |
323 | unsigned *target; | |
fb925a51 | 324 | } |
6de9cd9a DN |
325 | options [] = |
326 | { | |
fb925a51 MS |
327 | {"mode-nop", |
328 | "mudflaps do nothing", | |
329 | set_option, (unsigned)mode_nop, (unsigned *)&__mf_opts.mudflap_mode}, | |
330 | {"mode-populate", | |
331 | "mudflaps populate object tree", | |
332 | set_option, (unsigned)mode_populate, (unsigned *)&__mf_opts.mudflap_mode}, | |
333 | {"mode-check", | |
6de9cd9a | 334 | "mudflaps check for memory violations", |
07c2f075 | 335 | set_option, (unsigned)mode_check, (unsigned *)&__mf_opts.mudflap_mode}, |
fb925a51 | 336 | {"mode-violate", |
6de9cd9a | 337 | "mudflaps always cause violations (diagnostic)", |
07c2f075 | 338 | set_option, (unsigned)mode_violate, (unsigned *)&__mf_opts.mudflap_mode}, |
fb925a51 MS |
339 | |
340 | {"viol-nop", | |
6de9cd9a | 341 | "violations do not change program execution", |
07c2f075 | 342 | set_option, (unsigned)viol_nop, (unsigned *)&__mf_opts.violation_mode}, |
fb925a51 | 343 | {"viol-abort", |
6de9cd9a | 344 | "violations cause a call to abort()", |
07c2f075 | 345 | set_option, (unsigned)viol_abort, (unsigned *)&__mf_opts.violation_mode}, |
fb925a51 | 346 | {"viol-segv", |
6de9cd9a | 347 | "violations are promoted to SIGSEGV signals", |
07c2f075 | 348 | set_option, (unsigned)viol_segv, (unsigned *)&__mf_opts.violation_mode}, |
fb925a51 | 349 | {"viol-gdb", |
6de9cd9a | 350 | "violations fork a gdb process attached to current program", |
07c2f075 | 351 | set_option, (unsigned)viol_gdb, (unsigned *)&__mf_opts.violation_mode}, |
fb925a51 | 352 | {"trace-calls", |
6de9cd9a DN |
353 | "trace calls to mudflap runtime library", |
354 | set_option, 1, &__mf_opts.trace_mf_calls}, | |
fb925a51 | 355 | {"verbose-trace", |
6de9cd9a DN |
356 | "trace internal events within mudflap runtime library", |
357 | set_option, 1, &__mf_opts.verbose_trace}, | |
fb925a51 | 358 | {"collect-stats", |
6de9cd9a DN |
359 | "collect statistics on mudflap's operation", |
360 | set_option, 1, &__mf_opts.collect_stats}, | |
b9d71ce3 | 361 | #ifdef SIGUSR1 |
6de9cd9a DN |
362 | {"sigusr1-report", |
363 | "print report upon SIGUSR1", | |
364 | set_option, 1, &__mf_opts.sigusr1_report}, | |
365 | #endif | |
fb925a51 | 366 | {"internal-checking", |
6de9cd9a DN |
367 | "perform more expensive internal checking", |
368 | set_option, 1, &__mf_opts.internal_checking}, | |
fb925a51 | 369 | {"print-leaks", |
6de9cd9a DN |
370 | "print any memory leaks at program shutdown", |
371 | set_option, 1, &__mf_opts.print_leaks}, | |
a548d7b7 FCE |
372 | #ifdef HAVE___LIBC_FREERES |
373 | {"libc-freeres", | |
374 | "call glibc __libc_freeres at shutdown for better leak data", | |
375 | set_option, 1, &__mf_opts.call_libc_freeres}, | |
376 | #endif | |
fb925a51 | 377 | {"check-initialization", |
6de9cd9a DN |
378 | "detect uninitialized object reads", |
379 | set_option, 1, &__mf_opts.check_initialization}, | |
fb925a51 | 380 | {"verbose-violations", |
6de9cd9a DN |
381 | "print verbose messages when memory violations occur", |
382 | set_option, 1, &__mf_opts.verbose_violations}, | |
fb925a51 | 383 | {"abbreviate", |
6de9cd9a DN |
384 | "abbreviate repetitive listings", |
385 | set_option, 1, &__mf_opts.abbreviate}, | |
fb925a51 | 386 | {"timestamps", |
a082fc7a FCE |
387 | "track object lifetime timestamps", |
388 | set_option, 1, &__mf_opts.timestamps}, | |
fb925a51 | 389 | {"ignore-reads", |
a082fc7a FCE |
390 | "ignore read accesses - assume okay", |
391 | set_option, 1, &__mf_opts.ignore_reads}, | |
6de9cd9a DN |
392 | {"wipe-stack", |
393 | "wipe stack objects at unwind", | |
394 | set_option, 1, &__mf_opts.wipe_stack}, | |
395 | {"wipe-heap", | |
396 | "wipe heap objects at free", | |
397 | set_option, 1, &__mf_opts.wipe_heap}, | |
fb925a51 | 398 | {"heur-proc-map", |
6de9cd9a DN |
399 | "support /proc/self/map heuristics", |
400 | set_option, 1, &__mf_opts.heur_proc_map}, | |
401 | {"heur-stack-bound", | |
402 | "enable a simple upper stack bound heuristic", | |
403 | set_option, 1, &__mf_opts.heur_stack_bound}, | |
fb925a51 | 404 | {"heur-start-end", |
6de9cd9a DN |
405 | "support _start.._end heuristics", |
406 | set_option, 1, &__mf_opts.heur_start_end}, | |
fb925a51 | 407 | {"heur-stdlib", |
6de9cd9a DN |
408 | "register standard library data (argv, errno, stdin, ...)", |
409 | set_option, 1, &__mf_opts.heur_std_data}, | |
fb925a51 | 410 | {"free-queue-length", |
6de9cd9a DN |
411 | "queue N deferred free() calls before performing them", |
412 | read_integer_option, 0, &__mf_opts.free_queue_length}, | |
fb925a51 | 413 | {"persistent-count", |
6de9cd9a DN |
414 | "keep a history of N unregistered regions", |
415 | read_integer_option, 0, &__mf_opts.persistent_count}, | |
fb925a51 | 416 | {"crumple-zone", |
6de9cd9a DN |
417 | "surround allocations with crumple zones of N bytes", |
418 | read_integer_option, 0, &__mf_opts.crumple_zone}, | |
419 | /* XXX: not type-safe. | |
fb925a51 | 420 | {"lc-mask", |
6de9cd9a DN |
421 | "set lookup cache size mask to N (2**M - 1)", |
422 | read_integer_option, 0, (int *)(&__mf_lc_mask)}, | |
fb925a51 | 423 | {"lc-shift", |
6de9cd9a DN |
424 | "set lookup cache pointer shift", |
425 | read_integer_option, 0, (int *)(&__mf_lc_shift)}, | |
426 | */ | |
fb925a51 | 427 | {"lc-adapt", |
6de9cd9a DN |
428 | "adapt mask/shift parameters after N cache misses", |
429 | read_integer_option, 1, &__mf_opts.adapt_cache}, | |
fb925a51 | 430 | {"backtrace", |
6de9cd9a DN |
431 | "keep an N-level stack trace of each call context", |
432 | read_integer_option, 0, &__mf_opts.backtrace}, | |
433 | #ifdef LIBMUDFLAPTH | |
fb925a51 | 434 | {"thread-stack", |
6de9cd9a DN |
435 | "override thread stacks allocation: N kB", |
436 | read_integer_option, 0, &__mf_opts.thread_stack}, | |
437 | #endif | |
438 | {0, 0, set_option, 0, NULL} | |
439 | }; | |
440 | ||
441 | static void | |
442 | __mf_usage () | |
443 | { | |
3b088eb0 | 444 | struct mudoption *opt; |
6de9cd9a | 445 | |
fb925a51 | 446 | fprintf (stderr, |
cfbd22d7 | 447 | "This is a %s%sGCC \"mudflap\" memory-checked binary.\n" |
0e5997c0 | 448 | "Mudflap is Copyright (C) 2002-2008 Free Software Foundation, Inc.\n" |
cfbd22d7 FCE |
449 | "\n" |
450 | "The mudflap code can be controlled by an environment variable:\n" | |
451 | "\n" | |
452 | "$ export MUDFLAP_OPTIONS='<options>'\n" | |
453 | "$ <mudflapped_program>\n" | |
454 | "\n" | |
455 | "where <options> is a space-separated list of \n" | |
456 | "any of the following options. Use `-no-OPTION' to disable options.\n" | |
457 | "\n", | |
6de9cd9a | 458 | #if HAVE_PTHREAD_H |
7544a87f | 459 | (pthread_join ? "multi-threaded " : "single-threaded "), |
6de9cd9a | 460 | #else |
cfbd22d7 | 461 | "", |
6de9cd9a DN |
462 | #endif |
463 | #if LIBMUDFLAPTH | |
cfbd22d7 | 464 | "thread-aware " |
6de9cd9a | 465 | #else |
cfbd22d7 | 466 | "thread-unaware " |
6de9cd9a | 467 | #endif |
cfbd22d7 | 468 | ); |
6de9cd9a DN |
469 | /* XXX: The multi-threaded thread-unaware combination is bad. */ |
470 | ||
471 | for (opt = options; opt->name; opt++) | |
472 | { | |
473 | int default_p = (opt->value == * opt->target); | |
474 | ||
475 | switch (opt->type) | |
cfbd22d7 FCE |
476 | { |
477 | char buf[128]; | |
478 | case set_option: | |
479 | fprintf (stderr, "-%-23.23s %s", opt->name, opt->description); | |
480 | if (default_p) | |
481 | fprintf (stderr, " [active]\n"); | |
482 | else | |
483 | fprintf (stderr, "\n"); | |
484 | break; | |
485 | case read_integer_option: | |
486 | strncpy (buf, opt->name, 128); | |
487 | strncpy (buf + strlen (opt->name), "=N", 2); | |
488 | fprintf (stderr, "-%-23.23s %s", buf, opt->description); | |
489 | fprintf (stderr, " [%d]\n", * opt->target); | |
fb925a51 | 490 | break; |
cfbd22d7 FCE |
491 | default: abort(); |
492 | } | |
6de9cd9a DN |
493 | } |
494 | ||
495 | fprintf (stderr, "\n"); | |
496 | } | |
497 | ||
498 | ||
fb925a51 | 499 | int |
6de9cd9a DN |
500 | __mf_set_options (const char *optstr) |
501 | { | |
502 | int rc; | |
503 | LOCKTH (); | |
504 | BEGIN_RECURSION_PROTECT (); | |
505 | rc = __mfu_set_options (optstr); | |
506 | /* XXX: It's not really that easy. A change to a bunch of parameters | |
fb925a51 | 507 | can require updating auxiliary state or risk crashing: |
6de9cd9a DN |
508 | free_queue_length, crumple_zone ... */ |
509 | END_RECURSION_PROTECT (); | |
510 | UNLOCKTH (); | |
511 | return rc; | |
512 | } | |
513 | ||
514 | ||
fb925a51 | 515 | int |
6de9cd9a DN |
516 | __mfu_set_options (const char *optstr) |
517 | { | |
3b088eb0 | 518 | struct mudoption *opts = 0; |
6de9cd9a DN |
519 | char *nxt = 0; |
520 | long tmp = 0; | |
521 | int rc = 0; | |
522 | const char *saved_optstr = optstr; | |
523 | ||
524 | /* XXX: bounds-check for optstr! */ | |
525 | ||
526 | while (*optstr) | |
527 | { | |
528 | switch (*optstr) { | |
529 | case ' ': | |
530 | case '\t': | |
531 | case '\n': | |
cfbd22d7 FCE |
532 | optstr++; |
533 | break; | |
6de9cd9a DN |
534 | |
535 | case '-': | |
cfbd22d7 | 536 | if (*optstr+1) |
fb925a51 | 537 | { |
cfbd22d7 FCE |
538 | int negate = 0; |
539 | optstr++; | |
540 | ||
fb925a51 | 541 | if (*optstr == '?' || |
cfbd22d7 FCE |
542 | strncmp (optstr, "help", 4) == 0) |
543 | { | |
544 | /* Caller will print help and exit. */ | |
545 | return -1; | |
546 | } | |
fb925a51 | 547 | |
cfbd22d7 FCE |
548 | if (strncmp (optstr, "no-", 3) == 0) |
549 | { | |
550 | negate = 1; | |
551 | optstr = & optstr[3]; | |
552 | } | |
fb925a51 | 553 | |
cfbd22d7 FCE |
554 | for (opts = options; opts->name; opts++) |
555 | { | |
556 | if (strncmp (optstr, opts->name, strlen (opts->name)) == 0) | |
557 | { | |
558 | optstr += strlen (opts->name); | |
559 | assert (opts->target); | |
fb925a51 | 560 | switch (opts->type) |
cfbd22d7 FCE |
561 | { |
562 | case set_option: | |
563 | if (negate) | |
564 | *(opts->target) = 0; | |
565 | else | |
566 | *(opts->target) = opts->value; | |
567 | break; | |
568 | case read_integer_option: | |
569 | if (! negate && (*optstr == '=' && *(optstr+1))) | |
570 | { | |
571 | optstr++; | |
572 | tmp = strtol (optstr, &nxt, 10); | |
573 | if ((optstr != nxt) && (tmp != LONG_MAX)) | |
574 | { | |
fb925a51 | 575 | optstr = nxt; |
cfbd22d7 FCE |
576 | *(opts->target) = (int)tmp; |
577 | } | |
578 | } | |
579 | else if (negate) | |
580 | * opts->target = 0; | |
581 | break; | |
582 | } | |
583 | } | |
584 | } | |
585 | } | |
586 | break; | |
fb925a51 | 587 | |
6de9cd9a | 588 | default: |
fb925a51 | 589 | fprintf (stderr, |
cfbd22d7 FCE |
590 | "warning: unrecognized string '%s' in mudflap options\n", |
591 | optstr); | |
592 | optstr += strlen (optstr); | |
593 | rc = -1; | |
594 | break; | |
6de9cd9a DN |
595 | } |
596 | } | |
597 | ||
598 | /* Special post-processing: bound __mf_lc_mask and free_queue_length for security. */ | |
599 | __mf_lc_mask &= (LOOKUP_CACHE_SIZE_MAX - 1); | |
600 | __mf_opts.free_queue_length &= (__MF_FREEQ_MAX - 1); | |
601 | ||
602 | /* Clear the lookup cache, in case the parameters got changed. */ | |
603 | /* XXX: race */ | |
604 | memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache)); | |
605 | /* void slot 0 */ | |
606 | __mf_lookup_cache[0].low = MAXPTR; | |
607 | ||
608 | TRACE ("set options from `%s'\n", saved_optstr); | |
609 | ||
610 | /* Call this unconditionally, in case -sigusr1-report was toggled. */ | |
611 | __mf_sigusr1_respond (); | |
612 | ||
613 | return rc; | |
614 | } | |
615 | ||
616 | ||
617 | #ifdef PIC | |
618 | ||
fb925a51 | 619 | void |
6de9cd9a DN |
620 | __mf_resolve_single_dynamic (struct __mf_dynamic_entry *e) |
621 | { | |
622 | char *err; | |
623 | ||
624 | assert (e); | |
625 | if (e->pointer) return; | |
626 | ||
627 | #if HAVE_DLVSYM | |
628 | if (e->version != NULL && e->version[0] != '\0') /* non-null/empty */ | |
629 | e->pointer = dlvsym (RTLD_NEXT, e->name, e->version); | |
630 | else | |
631 | #endif | |
632 | e->pointer = dlsym (RTLD_NEXT, e->name); | |
fb925a51 | 633 | |
6de9cd9a DN |
634 | err = dlerror (); |
635 | ||
636 | if (err) | |
637 | { | |
638 | fprintf (stderr, "mf: error in dlsym(\"%s\"): %s\n", | |
cfbd22d7 | 639 | e->name, err); |
6de9cd9a | 640 | abort (); |
fb925a51 | 641 | } |
6de9cd9a DN |
642 | if (! e->pointer) |
643 | { | |
644 | fprintf (stderr, "mf: dlsym(\"%s\") = NULL\n", e->name); | |
645 | abort (); | |
646 | } | |
647 | } | |
648 | ||
649 | ||
fb925a51 MS |
650 | static void |
651 | __mf_resolve_dynamics () | |
6de9cd9a DN |
652 | { |
653 | int i; | |
654 | for (i = 0; i < dyn_INITRESOLVE; i++) | |
655 | __mf_resolve_single_dynamic (& __mf_dynamic[i]); | |
656 | } | |
657 | ||
658 | ||
659 | /* NB: order must match enums in mf-impl.h */ | |
660 | struct __mf_dynamic_entry __mf_dynamic [] = | |
661 | { | |
662 | {NULL, "calloc", NULL}, | |
663 | {NULL, "free", NULL}, | |
664 | {NULL, "malloc", NULL}, | |
665 | {NULL, "mmap", NULL}, | |
666 | {NULL, "munmap", NULL}, | |
667 | {NULL, "realloc", NULL}, | |
668 | {NULL, "DUMMY", NULL}, /* dyn_INITRESOLVE */ | |
669 | #ifdef LIBMUDFLAPTH | |
670 | {NULL, "pthread_create", PTHREAD_CREATE_VERSION}, | |
671 | {NULL, "pthread_join", NULL}, | |
672 | {NULL, "pthread_exit", NULL} | |
673 | #endif | |
674 | }; | |
675 | ||
676 | #endif /* PIC */ | |
677 | ||
678 | ||
679 | ||
680 | /* ------------------------------------------------------------------------ */ | |
681 | ||
cfbd22d7 | 682 | /* Lookup & manage automatic initialization of the five or so splay trees. */ |
fc5515a8 | 683 | static mfsplay_tree |
cfbd22d7 FCE |
684 | __mf_object_tree (int type) |
685 | { | |
fc5515a8 | 686 | static mfsplay_tree trees [__MF_TYPE_MAX+1]; |
cfbd22d7 FCE |
687 | assert (type >= 0 && type <= __MF_TYPE_MAX); |
688 | if (UNLIKELY (trees[type] == NULL)) | |
fc5515a8 | 689 | trees[type] = mfsplay_tree_new (); |
cfbd22d7 FCE |
690 | return trees[type]; |
691 | } | |
6de9cd9a | 692 | |
cfbd22d7 | 693 | |
429b4470 | 694 | /* not static */void |
cfbd22d7 | 695 | __mf_init () |
6de9cd9a DN |
696 | { |
697 | char *ov = 0; | |
698 | ||
429b4470 FCE |
699 | /* Return if initialization has already been done. */ |
700 | if (LIKELY (__mf_starting_p == 0)) | |
701 | return; | |
702 | ||
6de9cd9a DN |
703 | /* This initial bootstrap phase requires that __mf_starting_p = 1. */ |
704 | #ifdef PIC | |
705 | __mf_resolve_dynamics (); | |
706 | #endif | |
707 | __mf_starting_p = 0; | |
708 | ||
22f99b82 UW |
709 | __mf_set_state (active); |
710 | ||
6de9cd9a DN |
711 | __mf_set_default_options (); |
712 | ||
713 | ov = getenv ("MUDFLAP_OPTIONS"); | |
714 | if (ov) | |
715 | { | |
716 | int rc = __mfu_set_options (ov); | |
717 | if (rc < 0) | |
cfbd22d7 FCE |
718 | { |
719 | __mf_usage (); | |
720 | exit (1); | |
721 | } | |
6de9cd9a DN |
722 | } |
723 | ||
724 | /* Initialize to a non-zero description epoch. */ | |
725 | __mf_describe_object (NULL); | |
726 | ||
727 | #define REG_RESERVED(obj) \ | |
728 | __mf_register (& obj, sizeof(obj), __MF_TYPE_NOACCESS, # obj) | |
729 | ||
730 | REG_RESERVED (__mf_lookup_cache); | |
731 | REG_RESERVED (__mf_lc_mask); | |
732 | REG_RESERVED (__mf_lc_shift); | |
733 | /* XXX: others of our statics? */ | |
734 | ||
735 | /* Prevent access to *NULL. */ | |
736 | __mf_register (MINPTR, 1, __MF_TYPE_NOACCESS, "NULL"); | |
737 | __mf_lookup_cache[0].low = (uintptr_t) -1; | |
738 | } | |
739 | ||
740 | ||
741 | ||
742 | int | |
743 | __wrap_main (int argc, char* argv[]) | |
744 | { | |
745 | extern char **environ; | |
746 | extern int main (); | |
07c2f075 | 747 | extern int __real_main (); |
6de9cd9a DN |
748 | static int been_here = 0; |
749 | ||
750 | if (__mf_opts.heur_std_data && ! been_here) | |
751 | { | |
752 | unsigned i; | |
753 | ||
754 | been_here = 1; | |
755 | __mf_register (argv, sizeof(char *)*(argc+1), __MF_TYPE_STATIC, "argv[]"); | |
756 | for (i=0; i<argc; i++) | |
cfbd22d7 FCE |
757 | { |
758 | unsigned j = strlen (argv[i]); | |
759 | __mf_register (argv[i], j+1, __MF_TYPE_STATIC, "argv element"); | |
760 | } | |
6de9cd9a DN |
761 | |
762 | for (i=0; ; i++) | |
cfbd22d7 FCE |
763 | { |
764 | char *e = environ[i]; | |
765 | unsigned j; | |
766 | if (e == NULL) break; | |
767 | j = strlen (environ[i]); | |
768 | __mf_register (environ[i], j+1, __MF_TYPE_STATIC, "environ element"); | |
769 | } | |
6de9cd9a DN |
770 | __mf_register (environ, sizeof(char *)*(i+1), __MF_TYPE_STATIC, "environ[]"); |
771 | ||
772 | __mf_register (& errno, sizeof (errno), __MF_TYPE_STATIC, "errno area"); | |
773 | ||
774 | __mf_register (stdin, sizeof (*stdin), __MF_TYPE_STATIC, "stdin"); | |
775 | __mf_register (stdout, sizeof (*stdout), __MF_TYPE_STATIC, "stdout"); | |
776 | __mf_register (stderr, sizeof (*stderr), __MF_TYPE_STATIC, "stderr"); | |
dc88d66f FCE |
777 | |
778 | /* Make some effort to register ctype.h static arrays. */ | |
779 | /* XXX: e.g., on Solaris, may need to register __ctype, _ctype, __ctype_mask, __toupper, etc. */ | |
780 | /* On modern Linux GLIBC, these are thread-specific and changeable, and are dealt | |
781 | with in mf-hooks2.c. */ | |
6de9cd9a DN |
782 | } |
783 | ||
784 | #ifdef PIC | |
785 | return main (argc, argv, environ); | |
786 | #else | |
787 | return __real_main (argc, argv, environ); | |
788 | #endif | |
789 | } | |
790 | ||
791 | ||
792 | ||
793 | extern void __mf_fini () DTOR; | |
794 | void __mf_fini () | |
795 | { | |
796 | TRACE ("__mf_fini\n"); | |
797 | __mfu_report (); | |
6687a263 UW |
798 | |
799 | #ifndef PIC | |
800 | /* Since we didn't populate the tree for allocations in constructors | |
801 | before __mf_init, we cannot check destructors after __mf_fini. */ | |
802 | __mf_opts.mudflap_mode = mode_nop; | |
803 | #endif | |
6de9cd9a DN |
804 | } |
805 | ||
806 | ||
807 | ||
808 | /* ------------------------------------------------------------------------ */ | |
809 | /* __mf_check */ | |
810 | ||
811 | void __mf_check (void *ptr, size_t sz, int type, const char *location) | |
812 | { | |
813 | LOCKTH (); | |
814 | BEGIN_RECURSION_PROTECT (); | |
815 | __mfu_check (ptr, sz, type, location); | |
816 | END_RECURSION_PROTECT (); | |
817 | UNLOCKTH (); | |
818 | } | |
819 | ||
820 | ||
821 | void __mfu_check (void *ptr, size_t sz, int type, const char *location) | |
822 | { | |
823 | unsigned entry_idx = __MF_CACHE_INDEX (ptr); | |
824 | struct __mf_cache *entry = & __mf_lookup_cache [entry_idx]; | |
825 | int judgement = 0; /* 0=undecided; <0=violation; >0=okay */ | |
826 | uintptr_t ptr_low = (uintptr_t) ptr; | |
827 | uintptr_t ptr_high = CLAMPSZ (ptr, sz); | |
828 | struct __mf_cache old_entry = *entry; | |
829 | ||
830 | if (UNLIKELY (__mf_opts.sigusr1_report)) | |
831 | __mf_sigusr1_respond (); | |
0ee4e76d FCE |
832 | if (UNLIKELY (__mf_opts.ignore_reads && type == 0)) |
833 | return; | |
6de9cd9a DN |
834 | |
835 | TRACE ("check ptr=%p b=%u size=%lu %s location=`%s'\n", | |
cfbd22d7 FCE |
836 | ptr, entry_idx, (unsigned long)sz, |
837 | (type == 0 ? "read" : "write"), location); | |
fb925a51 | 838 | |
6de9cd9a DN |
839 | switch (__mf_opts.mudflap_mode) |
840 | { | |
841 | case mode_nop: | |
54419590 FCE |
842 | /* It is tempting to poison the cache here similarly to |
843 | mode_populate. However that eliminates a valuable | |
844 | distinction between these two modes. mode_nop is useful to | |
845 | let a user count & trace every single check / registration | |
846 | call. mode_populate is useful to let a program run fast | |
847 | while unchecked. | |
848 | */ | |
6de9cd9a DN |
849 | judgement = 1; |
850 | break; | |
851 | ||
852 | case mode_populate: | |
853 | entry->low = ptr_low; | |
854 | entry->high = ptr_high; | |
855 | judgement = 1; | |
856 | break; | |
857 | ||
858 | case mode_check: | |
859 | { | |
cfbd22d7 | 860 | unsigned heuristics = 0; |
fb925a51 | 861 | |
cfbd22d7 FCE |
862 | /* Advance aging/adaptation counters. */ |
863 | static unsigned adapt_count; | |
864 | adapt_count ++; | |
865 | if (UNLIKELY (__mf_opts.adapt_cache > 0 && | |
866 | adapt_count > __mf_opts.adapt_cache)) | |
867 | { | |
868 | adapt_count = 0; | |
869 | __mf_adapt_cache (); | |
870 | } | |
fb925a51 | 871 | |
cfbd22d7 FCE |
872 | /* Looping only occurs if heuristics were triggered. */ |
873 | while (judgement == 0) | |
874 | { | |
875 | DECLARE (void, free, void *p); | |
876 | __mf_object_t* ovr_obj[1]; | |
877 | unsigned obj_count; | |
878 | __mf_object_t** all_ovr_obj = NULL; | |
879 | __mf_object_t** dealloc_me = NULL; | |
880 | unsigned i; | |
881 | ||
882 | /* Find all overlapping objects. Be optimistic that there is just one. */ | |
883 | obj_count = __mf_find_objects (ptr_low, ptr_high, ovr_obj, 1); | |
884 | if (UNLIKELY (obj_count > 1)) | |
885 | { | |
886 | /* Allocate a real buffer and do the search again. */ | |
887 | DECLARE (void *, malloc, size_t c); | |
888 | unsigned n; | |
889 | all_ovr_obj = CALL_REAL (malloc, (sizeof (__mf_object_t *) * | |
890 | obj_count)); | |
891 | if (all_ovr_obj == NULL) abort (); | |
892 | n = __mf_find_objects (ptr_low, ptr_high, all_ovr_obj, obj_count); | |
893 | assert (n == obj_count); | |
894 | dealloc_me = all_ovr_obj; | |
895 | } | |
fb925a51 | 896 | else |
cfbd22d7 FCE |
897 | { |
898 | all_ovr_obj = ovr_obj; | |
899 | dealloc_me = NULL; | |
900 | } | |
901 | ||
902 | /* Update object statistics. */ | |
903 | for (i = 0; i < obj_count; i++) | |
904 | { | |
905 | __mf_object_t *obj = all_ovr_obj[i]; | |
906 | assert (obj != NULL); | |
907 | if (type == __MF_CHECK_READ) | |
908 | obj->read_count ++; | |
909 | else | |
910 | obj->write_count ++; | |
911 | obj->liveness ++; | |
912 | } | |
fb925a51 | 913 | |
cfbd22d7 FCE |
914 | /* Iterate over the various objects. There are a number of special cases. */ |
915 | for (i = 0; i < obj_count; i++) | |
916 | { | |
917 | __mf_object_t *obj = all_ovr_obj[i]; | |
918 | ||
919 | /* Any __MF_TYPE_NOACCESS hit is bad. */ | |
920 | if (UNLIKELY (obj->type == __MF_TYPE_NOACCESS)) | |
921 | judgement = -1; | |
922 | ||
923 | /* Any object with a watch flag is bad. */ | |
924 | if (UNLIKELY (obj->watching_p)) | |
925 | judgement = -2; /* trigger VIOL_WATCH */ | |
fb925a51 | 926 | |
cfbd22d7 FCE |
927 | /* A read from an uninitialized object is bad. */ |
928 | if (UNLIKELY (__mf_opts.check_initialization | |
929 | /* reading */ | |
930 | && type == __MF_CHECK_READ | |
931 | /* not written */ | |
932 | && obj->write_count == 0 | |
933 | /* uninitialized (heap) */ | |
934 | && obj->type == __MF_TYPE_HEAP)) | |
935 | judgement = -1; | |
936 | } | |
937 | ||
e1f4adc9 | 938 | /* We now know that the access spans no invalid objects. */ |
cfbd22d7 FCE |
939 | if (LIKELY (judgement >= 0)) |
940 | for (i = 0; i < obj_count; i++) | |
941 | { | |
942 | __mf_object_t *obj = all_ovr_obj[i]; | |
fb925a51 | 943 | |
cfbd22d7 FCE |
944 | /* Is this access entirely contained within this object? */ |
945 | if (LIKELY (ptr_low >= obj->low && ptr_high <= obj->high)) | |
946 | { | |
947 | /* Valid access. */ | |
948 | entry->low = obj->low; | |
949 | entry->high = obj->high; | |
950 | judgement = 1; | |
951 | } | |
ddfabf89 FCE |
952 | } |
953 | ||
954 | /* This access runs off the end of one valid object. That | |
955 | could be okay, if other valid objects fill in all the | |
956 | holes. We allow this only for HEAP and GUESS type | |
957 | objects. Accesses to STATIC and STACK variables | |
958 | should not be allowed to span. */ | |
959 | if (UNLIKELY ((judgement == 0) && (obj_count > 1))) | |
960 | { | |
961 | unsigned uncovered = 0; | |
962 | for (i = 0; i < obj_count; i++) | |
963 | { | |
964 | __mf_object_t *obj = all_ovr_obj[i]; | |
965 | int j, uncovered_low_p, uncovered_high_p; | |
966 | uintptr_t ptr_lower, ptr_higher; | |
967 | ||
968 | uncovered_low_p = ptr_low < obj->low; | |
969 | ptr_lower = CLAMPSUB (obj->low, 1); | |
970 | uncovered_high_p = ptr_high > obj->high; | |
971 | ptr_higher = CLAMPADD (obj->high, 1); | |
cfbd22d7 | 972 | |
ddfabf89 FCE |
973 | for (j = 0; j < obj_count; j++) |
974 | { | |
975 | __mf_object_t *obj2 = all_ovr_obj[j]; | |
976 | ||
977 | if (i == j) continue; | |
978 | ||
979 | /* Filter out objects that cannot be spanned across. */ | |
fb925a51 | 980 | if (obj2->type == __MF_TYPE_STACK |
ddfabf89 FCE |
981 | || obj2->type == __MF_TYPE_STATIC) |
982 | continue; | |
983 | ||
984 | /* Consider a side "covered" if obj2 includes | |
985 | the next byte on that side. */ | |
986 | if (uncovered_low_p | |
987 | && (ptr_lower >= obj2->low && ptr_lower <= obj2->high)) | |
988 | uncovered_low_p = 0; | |
989 | if (uncovered_high_p | |
990 | && (ptr_high >= obj2->low && ptr_higher <= obj2->high)) | |
991 | uncovered_high_p = 0; | |
992 | } | |
fb925a51 | 993 | |
ddfabf89 FCE |
994 | if (uncovered_low_p || uncovered_high_p) |
995 | uncovered ++; | |
996 | } | |
997 | ||
998 | /* Success if no overlapping objects are uncovered. */ | |
999 | if (uncovered == 0) | |
1000 | judgement = 1; | |
cfbd22d7 FCE |
1001 | } |
1002 | ||
ddfabf89 | 1003 | |
cfbd22d7 FCE |
1004 | if (dealloc_me != NULL) |
1005 | CALL_REAL (free, dealloc_me); | |
1006 | ||
1007 | /* If the judgment is still unknown at this stage, loop | |
1008 | around at most one more time. */ | |
1009 | if (judgement == 0) | |
1010 | { | |
1011 | if (heuristics++ < 2) /* XXX parametrize this number? */ | |
1012 | judgement = __mf_heuristic_check (ptr_low, ptr_high); | |
1013 | else | |
1014 | judgement = -1; | |
1015 | } | |
1016 | } | |
6de9cd9a DN |
1017 | |
1018 | } | |
1019 | break; | |
1020 | ||
1021 | case mode_violate: | |
1022 | judgement = -1; | |
1023 | break; | |
1024 | } | |
1025 | ||
1026 | if (__mf_opts.collect_stats) | |
1027 | { | |
1028 | __mf_count_check ++; | |
fb925a51 | 1029 | |
6de9cd9a | 1030 | if (LIKELY (old_entry.low != entry->low || old_entry.high != entry->high)) |
cfbd22d7 | 1031 | /* && (old_entry.low != 0) && (old_entry.high != 0)) */ |
fb925a51 | 1032 | __mf_lookup_cache_reusecount [entry_idx] ++; |
6de9cd9a | 1033 | } |
fb925a51 | 1034 | |
6de9cd9a DN |
1035 | if (UNLIKELY (judgement < 0)) |
1036 | __mf_violation (ptr, sz, | |
cfbd22d7 | 1037 | (uintptr_t) __builtin_return_address (0), location, |
fb925a51 | 1038 | ((judgement == -1) ? |
cfbd22d7 FCE |
1039 | (type == __MF_CHECK_READ ? __MF_VIOL_READ : __MF_VIOL_WRITE) : |
1040 | __MF_VIOL_WATCH)); | |
6de9cd9a DN |
1041 | } |
1042 | ||
1043 | ||
cfbd22d7 | 1044 | static __mf_object_t * |
fb925a51 | 1045 | __mf_insert_new_object (uintptr_t low, uintptr_t high, int type, |
cfbd22d7 | 1046 | const char *name, uintptr_t pc) |
6de9cd9a DN |
1047 | { |
1048 | DECLARE (void *, calloc, size_t c, size_t n); | |
1049 | ||
cfbd22d7 FCE |
1050 | __mf_object_t *new_obj; |
1051 | new_obj = CALL_REAL (calloc, 1, sizeof(__mf_object_t)); | |
1052 | new_obj->low = low; | |
1053 | new_obj->high = high; | |
1054 | new_obj->type = type; | |
1055 | new_obj->name = name; | |
1056 | new_obj->alloc_pc = pc; | |
6de9cd9a | 1057 | #if HAVE_GETTIMEOFDAY |
a082fc7a FCE |
1058 | if (__mf_opts.timestamps) |
1059 | gettimeofday (& new_obj->alloc_time, NULL); | |
6de9cd9a DN |
1060 | #endif |
1061 | #if LIBMUDFLAPTH | |
cfbd22d7 | 1062 | new_obj->alloc_thread = pthread_self (); |
6de9cd9a DN |
1063 | #endif |
1064 | ||
1065 | if (__mf_opts.backtrace > 0 && (type == __MF_TYPE_HEAP || type == __MF_TYPE_HEAP_I)) | |
fb925a51 | 1066 | new_obj->alloc_backtrace_size = |
cfbd22d7 FCE |
1067 | __mf_backtrace (& new_obj->alloc_backtrace, |
1068 | (void *) pc, 2); | |
fb925a51 | 1069 | |
6de9cd9a DN |
1070 | __mf_link_object (new_obj); |
1071 | return new_obj; | |
1072 | } | |
1073 | ||
1074 | ||
fb925a51 | 1075 | static void |
6de9cd9a DN |
1076 | __mf_uncache_object (__mf_object_t *old_obj) |
1077 | { | |
1078 | /* Remove any low/high pointers for this object from the lookup cache. */ | |
fb925a51 | 1079 | |
6de9cd9a DN |
1080 | /* Can it possibly exist in the cache? */ |
1081 | if (LIKELY (old_obj->read_count + old_obj->write_count)) | |
1082 | { | |
1083 | uintptr_t low = old_obj->low; | |
1084 | uintptr_t high = old_obj->high; | |
84174531 | 1085 | struct __mf_cache *entry; |
6de9cd9a | 1086 | unsigned i; |
84174531 FCE |
1087 | if ((high - low) >= (__mf_lc_mask << __mf_lc_shift)) |
1088 | { | |
1089 | /* For large objects (>= cache size - 1) check the whole cache. */ | |
1090 | entry = & __mf_lookup_cache [0]; | |
1091 | for (i = 0; i <= __mf_lc_mask; i++, entry++) | |
cfbd22d7 | 1092 | { |
84174531 FCE |
1093 | /* NB: the "||" in the following test permits this code to |
1094 | tolerate the situation introduced by __mf_check over | |
1095 | contiguous objects, where a cache entry spans several | |
1096 | objects. */ | |
1097 | if (entry->low == low || entry->high == high) | |
1098 | { | |
1099 | entry->low = MAXPTR; | |
1100 | entry->high = MINPTR; | |
1101 | } | |
cfbd22d7 FCE |
1102 | } |
1103 | } | |
84174531 FCE |
1104 | else |
1105 | { | |
1106 | /* Object is now smaller then cache size. */ | |
1107 | unsigned entry_low_idx = __MF_CACHE_INDEX (low); | |
1108 | unsigned entry_high_idx = __MF_CACHE_INDEX (high); | |
1109 | if (entry_low_idx <= entry_high_idx) | |
1110 | { | |
1111 | entry = & __mf_lookup_cache [entry_low_idx]; | |
1112 | for (i = entry_low_idx; i <= entry_high_idx; i++, entry++) | |
1113 | { | |
1114 | /* NB: the "||" in the following test permits this code to | |
1115 | tolerate the situation introduced by __mf_check over | |
1116 | contiguous objects, where a cache entry spans several | |
1117 | objects. */ | |
1118 | if (entry->low == low || entry->high == high) | |
1119 | { | |
1120 | entry->low = MAXPTR; | |
1121 | entry->high = MINPTR; | |
1122 | } | |
1123 | } | |
1124 | } | |
1125 | else | |
1126 | { | |
1127 | /* Object wrapped around the end of the cache. First search | |
1128 | from low to end of cache and then from 0 to high. */ | |
1129 | entry = & __mf_lookup_cache [entry_low_idx]; | |
1130 | for (i = entry_low_idx; i <= __mf_lc_mask; i++, entry++) | |
1131 | { | |
1132 | /* NB: the "||" in the following test permits this code to | |
1133 | tolerate the situation introduced by __mf_check over | |
1134 | contiguous objects, where a cache entry spans several | |
1135 | objects. */ | |
1136 | if (entry->low == low || entry->high == high) | |
1137 | { | |
1138 | entry->low = MAXPTR; | |
1139 | entry->high = MINPTR; | |
1140 | } | |
1141 | } | |
1142 | entry = & __mf_lookup_cache [0]; | |
1143 | for (i = 0; i <= entry_high_idx; i++, entry++) | |
1144 | { | |
1145 | /* NB: the "||" in the following test permits this code to | |
1146 | tolerate the situation introduced by __mf_check over | |
1147 | contiguous objects, where a cache entry spans several | |
1148 | objects. */ | |
1149 | if (entry->low == low || entry->high == high) | |
1150 | { | |
1151 | entry->low = MAXPTR; | |
1152 | entry->high = MINPTR; | |
1153 | } | |
1154 | } | |
1155 | } | |
1156 | } | |
6de9cd9a DN |
1157 | } |
1158 | } | |
1159 | ||
1160 | ||
1161 | void | |
1162 | __mf_register (void *ptr, size_t sz, int type, const char *name) | |
1163 | { | |
1164 | LOCKTH (); | |
1165 | BEGIN_RECURSION_PROTECT (); | |
1166 | __mfu_register (ptr, sz, type, name); | |
1167 | END_RECURSION_PROTECT (); | |
1168 | UNLOCKTH (); | |
1169 | } | |
1170 | ||
1171 | ||
1172 | void | |
1173 | __mfu_register (void *ptr, size_t sz, int type, const char *name) | |
1174 | { | |
fb925a51 | 1175 | TRACE ("register ptr=%p size=%lu type=%x name='%s'\n", |
cfbd22d7 | 1176 | ptr, (unsigned long) sz, type, name ? name : ""); |
6de9cd9a DN |
1177 | |
1178 | if (__mf_opts.collect_stats) | |
1179 | { | |
1180 | __mf_count_register ++; | |
1181 | __mf_total_register_size [(type < 0) ? 0 : | |
fb925a51 | 1182 | (type > __MF_TYPE_MAX) ? 0 : |
cfbd22d7 | 1183 | type] += sz; |
6de9cd9a DN |
1184 | } |
1185 | ||
1186 | if (UNLIKELY (__mf_opts.sigusr1_report)) | |
1187 | __mf_sigusr1_respond (); | |
1188 | ||
1189 | switch (__mf_opts.mudflap_mode) | |
1190 | { | |
1191 | case mode_nop: | |
1192 | break; | |
fb925a51 | 1193 | |
6de9cd9a DN |
1194 | case mode_violate: |
1195 | __mf_violation (ptr, sz, (uintptr_t) __builtin_return_address (0), NULL, | |
cfbd22d7 | 1196 | __MF_VIOL_REGISTER); |
6de9cd9a DN |
1197 | break; |
1198 | ||
1199 | case mode_populate: | |
1200 | /* Clear the cache. */ | |
1201 | /* XXX: why the entire cache? */ | |
1202 | /* XXX: race */ | |
1203 | memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache)); | |
1204 | /* void slot 0 */ | |
1205 | __mf_lookup_cache[0].low = MAXPTR; | |
1206 | break; | |
1207 | ||
1208 | case mode_check: | |
1209 | { | |
cfbd22d7 FCE |
1210 | __mf_object_t *ovr_objs [1]; |
1211 | unsigned num_overlapping_objs; | |
1212 | uintptr_t low = (uintptr_t) ptr; | |
1213 | uintptr_t high = CLAMPSZ (ptr, sz); | |
1214 | uintptr_t pc = (uintptr_t) __builtin_return_address (0); | |
fb925a51 | 1215 | |
cfbd22d7 FCE |
1216 | /* Treat unknown size indication as 1. */ |
1217 | if (UNLIKELY (sz == 0)) sz = 1; | |
1218 | ||
1219 | /* Look for objects only of the same type. This will e.g. permit a registration | |
1220 | of a STATIC overlapping with a GUESS, and a HEAP with a NOACCESS. At | |
1221 | __mf_check time however harmful overlaps will be detected. */ | |
1222 | num_overlapping_objs = __mf_find_objects2 (low, high, ovr_objs, 1, type); | |
1223 | ||
1224 | /* Handle overlaps. */ | |
1225 | if (UNLIKELY (num_overlapping_objs > 0)) | |
1226 | { | |
1227 | __mf_object_t *ovr_obj = ovr_objs[0]; | |
fb925a51 | 1228 | |
cfbd22d7 FCE |
1229 | /* Accept certain specific duplication pairs. */ |
1230 | if (((type == __MF_TYPE_STATIC) || (type == __MF_TYPE_GUESS)) | |
1231 | && ovr_obj->low == low | |
1232 | && ovr_obj->high == high | |
1233 | && ovr_obj->type == type) | |
1234 | { | |
1235 | /* Duplicate registration for static objects may come | |
1236 | from distinct compilation units. */ | |
fb925a51 MS |
1237 | VERBOSE_TRACE ("harmless duplicate reg %p-%p `%s'\n", |
1238 | (void *) low, (void *) high, | |
cfbd22d7 FCE |
1239 | (ovr_obj->name ? ovr_obj->name : "")); |
1240 | break; | |
1241 | } | |
1242 | ||
1243 | /* Alas, a genuine violation. */ | |
1244 | else | |
1245 | { | |
1246 | /* Two or more *real* mappings here. */ | |
1247 | __mf_violation ((void *) ptr, sz, | |
1248 | (uintptr_t) __builtin_return_address (0), NULL, | |
1249 | __MF_VIOL_REGISTER); | |
1250 | } | |
1251 | } | |
1252 | else /* No overlapping objects: AOK. */ | |
1253 | __mf_insert_new_object (low, high, type, name, pc); | |
fb925a51 | 1254 | |
cfbd22d7 FCE |
1255 | /* We could conceivably call __mf_check() here to prime the cache, |
1256 | but then the read_count/write_count field is not reliable. */ | |
1257 | break; | |
6de9cd9a DN |
1258 | } |
1259 | } /* end switch (__mf_opts.mudflap_mode) */ | |
1260 | } | |
1261 | ||
1262 | ||
1263 | void | |
cfbd22d7 | 1264 | __mf_unregister (void *ptr, size_t sz, int type) |
6de9cd9a DN |
1265 | { |
1266 | LOCKTH (); | |
1267 | BEGIN_RECURSION_PROTECT (); | |
cfbd22d7 | 1268 | __mfu_unregister (ptr, sz, type); |
6de9cd9a DN |
1269 | END_RECURSION_PROTECT (); |
1270 | UNLOCKTH (); | |
1271 | } | |
1272 | ||
1273 | ||
1274 | void | |
cfbd22d7 | 1275 | __mfu_unregister (void *ptr, size_t sz, int type) |
6de9cd9a DN |
1276 | { |
1277 | DECLARE (void, free, void *ptr); | |
1278 | ||
1279 | if (UNLIKELY (__mf_opts.sigusr1_report)) | |
cfbd22d7 | 1280 | __mf_sigusr1_respond (); |
6de9cd9a | 1281 | |
cfbd22d7 | 1282 | TRACE ("unregister ptr=%p size=%lu type=%x\n", ptr, (unsigned long) sz, type); |
6de9cd9a DN |
1283 | |
1284 | switch (__mf_opts.mudflap_mode) | |
fb925a51 | 1285 | { |
6de9cd9a DN |
1286 | case mode_nop: |
1287 | break; | |
1288 | ||
1289 | case mode_violate: | |
1290 | __mf_violation (ptr, sz, | |
cfbd22d7 FCE |
1291 | (uintptr_t) __builtin_return_address (0), NULL, |
1292 | __MF_VIOL_UNREGISTER); | |
6de9cd9a DN |
1293 | break; |
1294 | ||
1295 | case mode_populate: | |
1296 | /* Clear the cache. */ | |
1297 | /* XXX: race */ | |
1298 | memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache)); | |
1299 | /* void slot 0 */ | |
1300 | __mf_lookup_cache[0].low = MAXPTR; | |
1301 | break; | |
1302 | ||
1303 | case mode_check: | |
1304 | { | |
cfbd22d7 FCE |
1305 | __mf_object_t *old_obj = NULL; |
1306 | __mf_object_t *del_obj = NULL; /* Object to actually delete. */ | |
1307 | __mf_object_t *objs[1] = {NULL}; | |
1308 | unsigned num_overlapping_objs; | |
1309 | ||
1310 | num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr, | |
1311 | CLAMPSZ (ptr, sz), objs, 1, type); | |
1312 | ||
1313 | /* Special case for HEAP_I - see free & realloc hook. They don't | |
1314 | know whether the input region was HEAP or HEAP_I before | |
1315 | unmapping it. Here we give HEAP a try in case HEAP_I | |
1316 | failed. */ | |
1317 | if ((type == __MF_TYPE_HEAP_I) && (num_overlapping_objs == 0)) | |
1318 | { | |
1319 | num_overlapping_objs = __mf_find_objects2 ((uintptr_t) ptr, | |
1320 | CLAMPSZ (ptr, sz), objs, 1, __MF_TYPE_HEAP); | |
1321 | } | |
1322 | ||
1323 | old_obj = objs[0]; | |
1324 | if (UNLIKELY ((num_overlapping_objs != 1) /* more than one overlap */ | |
1325 | || ((sz == 0) ? 0 : (sz != (old_obj->high - old_obj->low + 1))) /* size mismatch */ | |
1326 | || ((uintptr_t) ptr != old_obj->low))) /* base mismatch */ | |
1327 | { | |
1328 | __mf_violation (ptr, sz, | |
1329 | (uintptr_t) __builtin_return_address (0), NULL, | |
1330 | __MF_VIOL_UNREGISTER); | |
1331 | break; | |
1332 | } | |
1333 | ||
1334 | __mf_unlink_object (old_obj); | |
1335 | __mf_uncache_object (old_obj); | |
1336 | ||
1337 | /* Wipe buffer contents if desired. */ | |
1338 | if ((__mf_opts.wipe_stack && old_obj->type == __MF_TYPE_STACK) | |
fb925a51 | 1339 | || (__mf_opts.wipe_heap && (old_obj->type == __MF_TYPE_HEAP |
cfbd22d7 FCE |
1340 | || old_obj->type == __MF_TYPE_HEAP_I))) |
1341 | { | |
1342 | memset ((void *) old_obj->low, | |
1343 | 0, | |
1344 | (size_t) (old_obj->high - old_obj->low + 1)); | |
1345 | } | |
fb925a51 | 1346 | |
cfbd22d7 | 1347 | /* Manage the object cemetary. */ |
58dc8547 AM |
1348 | if (__mf_opts.persistent_count > 0 |
1349 | && (unsigned) old_obj->type <= __MF_TYPE_MAX_CEM) | |
cfbd22d7 FCE |
1350 | { |
1351 | old_obj->deallocated_p = 1; | |
1352 | old_obj->dealloc_pc = (uintptr_t) __builtin_return_address (0); | |
6de9cd9a | 1353 | #if HAVE_GETTIMEOFDAY |
a082fc7a FCE |
1354 | if (__mf_opts.timestamps) |
1355 | gettimeofday (& old_obj->dealloc_time, NULL); | |
6de9cd9a DN |
1356 | #endif |
1357 | #ifdef LIBMUDFLAPTH | |
cfbd22d7 | 1358 | old_obj->dealloc_thread = pthread_self (); |
6de9cd9a DN |
1359 | #endif |
1360 | ||
cfbd22d7 | 1361 | if (__mf_opts.backtrace > 0 && old_obj->type == __MF_TYPE_HEAP) |
fb925a51 | 1362 | old_obj->dealloc_backtrace_size = |
cfbd22d7 FCE |
1363 | __mf_backtrace (& old_obj->dealloc_backtrace, |
1364 | NULL, 2); | |
1365 | ||
1366 | /* Encourage this object to be displayed again in current epoch. */ | |
1367 | old_obj->description_epoch --; | |
1368 | ||
1369 | /* Put this object into the cemetary. This may require this plot to | |
1370 | be recycled, and the previous resident to be designated del_obj. */ | |
1371 | { | |
1372 | unsigned row = old_obj->type; | |
1373 | unsigned plot = __mf_object_dead_head [row]; | |
fb925a51 | 1374 | |
cfbd22d7 FCE |
1375 | del_obj = __mf_object_cemetary [row][plot]; |
1376 | __mf_object_cemetary [row][plot] = old_obj; | |
1377 | plot ++; | |
1378 | if (plot == __mf_opts.persistent_count) plot = 0; | |
1379 | __mf_object_dead_head [row] = plot; | |
1380 | } | |
1381 | } | |
1382 | else | |
1383 | del_obj = old_obj; | |
fb925a51 | 1384 | |
cfbd22d7 FCE |
1385 | if (__mf_opts.print_leaks) |
1386 | { | |
1387 | if ((old_obj->read_count + old_obj->write_count) == 0 && | |
fb925a51 | 1388 | (old_obj->type == __MF_TYPE_HEAP |
cfbd22d7 FCE |
1389 | || old_obj->type == __MF_TYPE_HEAP_I)) |
1390 | { | |
a548d7b7 FCE |
1391 | /* The problem with a warning message here is that we may not |
1392 | be privy to accesses to such objects that occur within | |
1393 | uninstrumented libraries. */ | |
1394 | #if 0 | |
fb925a51 | 1395 | fprintf (stderr, |
cfbd22d7 FCE |
1396 | "*******\n" |
1397 | "mudflap warning: unaccessed registered object:\n"); | |
1398 | __mf_describe_object (old_obj); | |
a548d7b7 | 1399 | #endif |
cfbd22d7 FCE |
1400 | } |
1401 | } | |
fb925a51 | 1402 | |
cfbd22d7 FCE |
1403 | if (del_obj != NULL) /* May or may not equal old_obj. */ |
1404 | { | |
1405 | if (__mf_opts.backtrace > 0) | |
1406 | { | |
1407 | CALL_REAL(free, del_obj->alloc_backtrace); | |
1408 | if (__mf_opts.persistent_count > 0) | |
1409 | { | |
1410 | CALL_REAL(free, del_obj->dealloc_backtrace); | |
1411 | } | |
1412 | } | |
1413 | CALL_REAL(free, del_obj); | |
1414 | } | |
fb925a51 | 1415 | |
cfbd22d7 | 1416 | break; |
6de9cd9a DN |
1417 | } |
1418 | } /* end switch (__mf_opts.mudflap_mode) */ | |
1419 | ||
1420 | ||
1421 | if (__mf_opts.collect_stats) | |
1422 | { | |
1423 | __mf_count_unregister ++; | |
1424 | __mf_total_unregister_size += sz; | |
1425 | } | |
1426 | } | |
1427 | ||
6de9cd9a DN |
1428 | |
1429 | ||
1430 | struct tree_stats | |
1431 | { | |
1432 | unsigned obj_count; | |
1433 | unsigned long total_size; | |
1434 | unsigned live_obj_count; | |
1435 | double total_weight; | |
1436 | double weighted_size; | |
1437 | unsigned long weighted_address_bits [sizeof (uintptr_t) * 8][2]; | |
1438 | }; | |
1439 | ||
1440 | ||
6de9cd9a | 1441 | |
cfbd22d7 | 1442 | static int |
fc5515a8 | 1443 | __mf_adapt_cache_fn (mfsplay_tree_node n, void *param) |
cfbd22d7 FCE |
1444 | { |
1445 | __mf_object_t *obj = (__mf_object_t *) n->value; | |
1446 | struct tree_stats *s = (struct tree_stats *) param; | |
6de9cd9a | 1447 | |
cfbd22d7 | 1448 | assert (obj != NULL && s != NULL); |
fb925a51 | 1449 | |
6de9cd9a | 1450 | /* Exclude never-accessed objects. */ |
cfbd22d7 | 1451 | if (obj->read_count + obj->write_count) |
6de9cd9a DN |
1452 | { |
1453 | s->obj_count ++; | |
cfbd22d7 FCE |
1454 | s->total_size += (obj->high - obj->low + 1); |
1455 | ||
1456 | if (obj->liveness) | |
1457 | { | |
1458 | unsigned i; | |
1459 | uintptr_t addr; | |
1460 | ||
1461 | /* VERBOSE_TRACE ("analyze low=%p live=%u name=`%s'\n", | |
1462 | (void *) obj->low, obj->liveness, obj->name); */ | |
1463 | ||
1464 | s->live_obj_count ++; | |
1465 | s->total_weight += (double) obj->liveness; | |
1466 | s->weighted_size += | |
1467 | (double) (obj->high - obj->low + 1) * | |
1468 | (double) obj->liveness; | |
1469 | ||
1470 | addr = obj->low; | |
1471 | for (i=0; i<sizeof(uintptr_t) * 8; i++) | |
1472 | { | |
1473 | unsigned bit = addr & 1; | |
1474 | s->weighted_address_bits[i][bit] += obj->liveness; | |
1475 | addr = addr >> 1; | |
1476 | } | |
1477 | ||
1478 | /* Age the liveness value. */ | |
1479 | obj->liveness >>= 1; | |
1480 | } | |
6de9cd9a DN |
1481 | } |
1482 | ||
cfbd22d7 | 1483 | return 0; |
6de9cd9a DN |
1484 | } |
1485 | ||
1486 | ||
1487 | static void | |
1488 | __mf_adapt_cache () | |
1489 | { | |
1490 | struct tree_stats s; | |
1491 | uintptr_t new_mask = 0; | |
1492 | unsigned char new_shift; | |
1493 | float cache_utilization; | |
1494 | float max_value; | |
1495 | static float smoothed_new_shift = -1.0; | |
1496 | unsigned i; | |
1497 | ||
1498 | memset (&s, 0, sizeof (s)); | |
cfbd22d7 | 1499 | |
fc5515a8 FCE |
1500 | mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), __mf_adapt_cache_fn, (void *) & s); |
1501 | mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), __mf_adapt_cache_fn, (void *) & s); | |
1502 | mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STACK), __mf_adapt_cache_fn, (void *) & s); | |
1503 | mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_STATIC), __mf_adapt_cache_fn, (void *) & s); | |
1504 | mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_GUESS), __mf_adapt_cache_fn, (void *) & s); | |
6de9cd9a DN |
1505 | |
1506 | /* Maybe we're dealing with funny aging/adaptation parameters, or an | |
1507 | empty tree. Just leave the cache alone in such cases, rather | |
1508 | than risk dying by division-by-zero. */ | |
1509 | if (! (s.obj_count > 0) && (s.live_obj_count > 0) && (s.total_weight > 0.0)) | |
1510 | return; | |
1511 | ||
1512 | /* Guess a good value for the shift parameter by finding an address bit that is a | |
1513 | good discriminant of lively objects. */ | |
1514 | max_value = 0.0; | |
1515 | for (i=0; i<sizeof (uintptr_t)*8; i++) | |
1516 | { | |
1517 | float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1]; | |
1518 | if (max_value < value) max_value = value; | |
1519 | } | |
1520 | for (i=0; i<sizeof (uintptr_t)*8; i++) | |
1521 | { | |
1522 | float shoulder_factor = 0.7; /* Include slightly less popular bits too. */ | |
1523 | float value = (float) s.weighted_address_bits[i][0] * (float) s.weighted_address_bits[i][1]; | |
1524 | if (value >= max_value * shoulder_factor) | |
cfbd22d7 | 1525 | break; |
6de9cd9a DN |
1526 | } |
1527 | if (smoothed_new_shift < 0) smoothed_new_shift = __mf_lc_shift; | |
fb925a51 | 1528 | /* Converge toward this slowly to reduce flapping. */ |
6de9cd9a DN |
1529 | smoothed_new_shift = 0.9*smoothed_new_shift + 0.1*i; |
1530 | new_shift = (unsigned) (smoothed_new_shift + 0.5); | |
1531 | assert (new_shift < sizeof (uintptr_t)*8); | |
1532 | ||
1533 | /* Count number of used buckets. */ | |
1534 | cache_utilization = 0.0; | |
1535 | for (i = 0; i < (1 + __mf_lc_mask); i++) | |
1536 | if (__mf_lookup_cache[i].low != 0 || __mf_lookup_cache[i].high != 0) | |
1537 | cache_utilization += 1.0; | |
1538 | cache_utilization /= (1 + __mf_lc_mask); | |
1539 | ||
ddfabf89 | 1540 | new_mask |= 0xffff; /* XXX: force a large cache. */ |
6de9cd9a DN |
1541 | new_mask &= (LOOKUP_CACHE_SIZE_MAX - 1); |
1542 | ||
1543 | VERBOSE_TRACE ("adapt cache obj=%u/%u sizes=%lu/%.0f/%.0f => " | |
cfbd22d7 FCE |
1544 | "util=%u%% m=%p s=%u\n", |
1545 | s.obj_count, s.live_obj_count, s.total_size, s.total_weight, s.weighted_size, | |
1546 | (unsigned)(cache_utilization*100.0), (void *) new_mask, new_shift); | |
6de9cd9a DN |
1547 | |
1548 | /* We should reinitialize cache if its parameters have changed. */ | |
1549 | if (new_mask != __mf_lc_mask || | |
1550 | new_shift != __mf_lc_shift) | |
1551 | { | |
1552 | __mf_lc_mask = new_mask; | |
1553 | __mf_lc_shift = new_shift; | |
1554 | /* XXX: race */ | |
1555 | memset (__mf_lookup_cache, 0, sizeof(__mf_lookup_cache)); | |
1556 | /* void slot 0 */ | |
1557 | __mf_lookup_cache[0].low = MAXPTR; | |
1558 | } | |
1559 | } | |
1560 | ||
1561 | ||
1562 | ||
6de9cd9a DN |
1563 | /* __mf_find_object[s] */ |
1564 | ||
1565 | /* Find overlapping live objecs between [low,high]. Return up to | |
1566 | max_objs of their pointers in objs[]. Return total count of | |
1567 | overlaps (may exceed max_objs). */ | |
1568 | ||
fb925a51 MS |
1569 | unsigned |
1570 | __mf_find_objects2 (uintptr_t ptr_low, uintptr_t ptr_high, | |
cfbd22d7 | 1571 | __mf_object_t **objs, unsigned max_objs, int type) |
6de9cd9a | 1572 | { |
cfbd22d7 | 1573 | unsigned count = 0; |
fc5515a8 FCE |
1574 | mfsplay_tree t = __mf_object_tree (type); |
1575 | mfsplay_tree_key k = (mfsplay_tree_key) ptr_low; | |
cfbd22d7 | 1576 | int direction; |
6de9cd9a | 1577 | |
fc5515a8 | 1578 | mfsplay_tree_node n = mfsplay_tree_lookup (t, k); |
cfbd22d7 FCE |
1579 | /* An exact match for base address implies a hit. */ |
1580 | if (n != NULL) | |
6de9cd9a | 1581 | { |
cfbd22d7 FCE |
1582 | if (count < max_objs) |
1583 | objs[count] = (__mf_object_t *) n->value; | |
6de9cd9a | 1584 | count ++; |
6de9cd9a DN |
1585 | } |
1586 | ||
cfbd22d7 FCE |
1587 | /* Iterate left then right near this key value to find all overlapping objects. */ |
1588 | for (direction = 0; direction < 2; direction ++) | |
6de9cd9a | 1589 | { |
cfbd22d7 | 1590 | /* Reset search origin. */ |
fc5515a8 | 1591 | k = (mfsplay_tree_key) ptr_low; |
cfbd22d7 FCE |
1592 | |
1593 | while (1) | |
1594 | { | |
1595 | __mf_object_t *obj; | |
fb925a51 | 1596 | |
fc5515a8 | 1597 | n = (direction == 0 ? mfsplay_tree_successor (t, k) : mfsplay_tree_predecessor (t, k)); |
cfbd22d7 FCE |
1598 | if (n == NULL) break; |
1599 | obj = (__mf_object_t *) n->value; | |
fb925a51 | 1600 | |
cfbd22d7 FCE |
1601 | if (! (obj->low <= ptr_high && obj->high >= ptr_low)) /* No overlap? */ |
1602 | break; | |
fb925a51 | 1603 | |
cfbd22d7 FCE |
1604 | if (count < max_objs) |
1605 | objs[count] = (__mf_object_t *) n->value; | |
1606 | count ++; | |
1607 | ||
fc5515a8 | 1608 | k = (mfsplay_tree_key) obj->low; |
cfbd22d7 | 1609 | } |
6de9cd9a DN |
1610 | } |
1611 | ||
1612 | return count; | |
1613 | } | |
1614 | ||
1615 | ||
1616 | unsigned | |
1617 | __mf_find_objects (uintptr_t ptr_low, uintptr_t ptr_high, | |
cfbd22d7 | 1618 | __mf_object_t **objs, unsigned max_objs) |
6de9cd9a | 1619 | { |
cfbd22d7 FCE |
1620 | int type; |
1621 | unsigned count = 0; | |
6de9cd9a | 1622 | |
cfbd22d7 FCE |
1623 | /* Search each splay tree for overlaps. */ |
1624 | for (type = __MF_TYPE_NOACCESS; type <= __MF_TYPE_GUESS; type++) | |
6de9cd9a | 1625 | { |
cfbd22d7 FCE |
1626 | unsigned c = __mf_find_objects2 (ptr_low, ptr_high, objs, max_objs, type); |
1627 | if (c > max_objs) | |
1628 | { | |
1629 | max_objs = 0; | |
1630 | objs = NULL; | |
1631 | } | |
1632 | else /* NB: C may equal 0 */ | |
1633 | { | |
1634 | max_objs -= c; | |
1635 | objs += c; | |
1636 | } | |
1637 | count += c; | |
6de9cd9a DN |
1638 | } |
1639 | ||
cfbd22d7 | 1640 | return count; |
6de9cd9a DN |
1641 | } |
1642 | ||
1643 | ||
6de9cd9a | 1644 | |
cfbd22d7 | 1645 | /* __mf_link_object */ |
6de9cd9a DN |
1646 | |
1647 | static void | |
cfbd22d7 | 1648 | __mf_link_object (__mf_object_t *node) |
6de9cd9a | 1649 | { |
fc5515a8 FCE |
1650 | mfsplay_tree t = __mf_object_tree (node->type); |
1651 | mfsplay_tree_insert (t, (mfsplay_tree_key) node->low, (mfsplay_tree_value) node); | |
6de9cd9a DN |
1652 | } |
1653 | ||
cfbd22d7 FCE |
1654 | /* __mf_unlink_object */ |
1655 | ||
6de9cd9a | 1656 | static void |
cfbd22d7 | 1657 | __mf_unlink_object (__mf_object_t *node) |
6de9cd9a | 1658 | { |
fc5515a8 FCE |
1659 | mfsplay_tree t = __mf_object_tree (node->type); |
1660 | mfsplay_tree_remove (t, (mfsplay_tree_key) node->low); | |
6de9cd9a DN |
1661 | } |
1662 | ||
1663 | /* __mf_find_dead_objects */ | |
1664 | ||
1665 | /* Find overlapping dead objecs between [low,high]. Return up to | |
1666 | max_objs of their pointers in objs[]. Return total count of | |
1667 | overlaps (may exceed max_objs). */ | |
1668 | ||
1669 | static unsigned | |
1670 | __mf_find_dead_objects (uintptr_t low, uintptr_t high, | |
cfbd22d7 | 1671 | __mf_object_t **objs, unsigned max_objs) |
6de9cd9a DN |
1672 | { |
1673 | if (__mf_opts.persistent_count > 0) | |
1674 | { | |
1675 | unsigned count = 0; | |
1676 | unsigned recollection = 0; | |
1677 | unsigned row = 0; | |
fb925a51 | 1678 | |
6de9cd9a DN |
1679 | assert (low <= high); |
1680 | assert (max_objs == 0 || objs != NULL); | |
fb925a51 | 1681 | |
6de9cd9a | 1682 | /* Widen the search from the most recent plots in each row, looking |
cfbd22d7 | 1683 | backward in time. */ |
6de9cd9a DN |
1684 | recollection = 0; |
1685 | while (recollection < __mf_opts.persistent_count) | |
cfbd22d7 FCE |
1686 | { |
1687 | count = 0; | |
fb925a51 | 1688 | |
cfbd22d7 FCE |
1689 | for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++) |
1690 | { | |
1691 | unsigned plot; | |
1692 | unsigned i; | |
fb925a51 | 1693 | |
cfbd22d7 FCE |
1694 | plot = __mf_object_dead_head [row]; |
1695 | for (i = 0; i <= recollection; i ++) | |
1696 | { | |
1697 | __mf_object_t *obj; | |
fb925a51 | 1698 | |
cfbd22d7 FCE |
1699 | /* Look backward through row: it's a circular buffer. */ |
1700 | if (plot > 0) plot --; | |
1701 | else plot = __mf_opts.persistent_count - 1; | |
fb925a51 | 1702 | |
cfbd22d7 FCE |
1703 | obj = __mf_object_cemetary [row][plot]; |
1704 | if (obj && obj->low <= high && obj->high >= low) | |
1705 | { | |
1706 | /* Found an overlapping dead object! */ | |
1707 | if (count < max_objs) | |
1708 | objs [count] = obj; | |
1709 | count ++; | |
1710 | } | |
1711 | } | |
1712 | } | |
fb925a51 | 1713 | |
cfbd22d7 FCE |
1714 | if (count) |
1715 | break; | |
fb925a51 | 1716 | |
cfbd22d7 FCE |
1717 | /* Look farther back in time. */ |
1718 | recollection = (recollection * 2) + 1; | |
1719 | } | |
fb925a51 | 1720 | |
6de9cd9a DN |
1721 | return count; |
1722 | } else { | |
1723 | return 0; | |
1724 | } | |
1725 | } | |
1726 | ||
1727 | /* __mf_describe_object */ | |
1728 | ||
1729 | static void | |
1730 | __mf_describe_object (__mf_object_t *obj) | |
1731 | { | |
1732 | static unsigned epoch = 0; | |
1733 | if (obj == NULL) | |
1734 | { | |
1735 | epoch ++; | |
1736 | return; | |
1737 | } | |
1738 | ||
1739 | if (__mf_opts.abbreviate && obj->description_epoch == epoch) | |
1740 | { | |
1741 | fprintf (stderr, | |
66a5d3b1 FCE |
1742 | "mudflap %sobject %p: name=`%s'\n", |
1743 | (obj->deallocated_p ? "dead " : ""), | |
cfbd22d7 | 1744 | (void *) obj, (obj->name ? obj->name : "")); |
6de9cd9a DN |
1745 | return; |
1746 | } | |
1747 | else | |
1748 | obj->description_epoch = epoch; | |
1749 | ||
1750 | fprintf (stderr, | |
66a5d3b1 | 1751 | "mudflap %sobject %p: name=`%s'\n" |
cfbd22d7 FCE |
1752 | "bounds=[%p,%p] size=%lu area=%s check=%ur/%uw liveness=%u%s\n" |
1753 | "alloc time=%lu.%06lu pc=%p" | |
6de9cd9a | 1754 | #ifdef LIBMUDFLAPTH |
cfbd22d7 | 1755 | " thread=%u" |
6de9cd9a | 1756 | #endif |
cfbd22d7 | 1757 | "\n", |
66a5d3b1 | 1758 | (obj->deallocated_p ? "dead " : ""), |
fb925a51 | 1759 | (void *) obj, (obj->name ? obj->name : ""), |
cfbd22d7 FCE |
1760 | (void *) obj->low, (void *) obj->high, |
1761 | (unsigned long) (obj->high - obj->low + 1), | |
1762 | (obj->type == __MF_TYPE_NOACCESS ? "no-access" : | |
1763 | obj->type == __MF_TYPE_HEAP ? "heap" : | |
1764 | obj->type == __MF_TYPE_HEAP_I ? "heap-init" : | |
1765 | obj->type == __MF_TYPE_STACK ? "stack" : | |
1766 | obj->type == __MF_TYPE_STATIC ? "static" : | |
1767 | obj->type == __MF_TYPE_GUESS ? "guess" : | |
1768 | "unknown"), | |
fb925a51 | 1769 | obj->read_count, obj->write_count, obj->liveness, |
cfbd22d7 | 1770 | obj->watching_p ? " watching" : "", |
fb925a51 | 1771 | obj->alloc_time.tv_sec, obj->alloc_time.tv_usec, |
cfbd22d7 | 1772 | (void *) obj->alloc_pc |
6de9cd9a | 1773 | #ifdef LIBMUDFLAPTH |
cfbd22d7 | 1774 | , (unsigned) obj->alloc_thread |
6de9cd9a | 1775 | #endif |
cfbd22d7 | 1776 | ); |
6de9cd9a DN |
1777 | |
1778 | if (__mf_opts.backtrace > 0) | |
1779 | { | |
1780 | unsigned i; | |
1781 | for (i=0; i<obj->alloc_backtrace_size; i++) | |
1782 | fprintf (stderr, " %s\n", obj->alloc_backtrace[i]); | |
1783 | } | |
1784 | ||
1785 | if (__mf_opts.persistent_count > 0) | |
1786 | { | |
1787 | if (obj->deallocated_p) | |
cfbd22d7 FCE |
1788 | { |
1789 | fprintf (stderr, "dealloc time=%lu.%06lu pc=%p" | |
6de9cd9a | 1790 | #ifdef LIBMUDFLAPTH |
cfbd22d7 | 1791 | " thread=%u" |
6de9cd9a | 1792 | #endif |
cfbd22d7 | 1793 | "\n", |
fb925a51 | 1794 | obj->dealloc_time.tv_sec, obj->dealloc_time.tv_usec, |
cfbd22d7 | 1795 | (void *) obj->dealloc_pc |
6de9cd9a | 1796 | #ifdef LIBMUDFLAPTH |
cfbd22d7 | 1797 | , (unsigned) obj->dealloc_thread |
6de9cd9a | 1798 | #endif |
cfbd22d7 | 1799 | ); |
6de9cd9a DN |
1800 | |
1801 | ||
cfbd22d7 FCE |
1802 | if (__mf_opts.backtrace > 0) |
1803 | { | |
1804 | unsigned i; | |
1805 | for (i=0; i<obj->dealloc_backtrace_size; i++) | |
1806 | fprintf (stderr, " %s\n", obj->dealloc_backtrace[i]); | |
1807 | } | |
1808 | } | |
6de9cd9a DN |
1809 | } |
1810 | } | |
1811 | ||
cfbd22d7 FCE |
1812 | |
1813 | static int | |
fc5515a8 | 1814 | __mf_report_leaks_fn (mfsplay_tree_node n, void *param) |
6de9cd9a | 1815 | { |
cfbd22d7 FCE |
1816 | __mf_object_t *node = (__mf_object_t *) n->value; |
1817 | unsigned *count = (unsigned *) param; | |
6de9cd9a | 1818 | |
cfbd22d7 FCE |
1819 | if (count != NULL) |
1820 | (*count) ++; | |
6de9cd9a | 1821 | |
cfbd22d7 FCE |
1822 | fprintf (stderr, "Leaked object %u:\n", (*count)); |
1823 | __mf_describe_object (node); | |
1824 | ||
1825 | return 0; | |
1826 | } | |
1827 | ||
1828 | ||
1829 | static unsigned | |
1830 | __mf_report_leaks () | |
1831 | { | |
1832 | unsigned count = 0; | |
1833 | ||
fc5515a8 | 1834 | (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP), |
cfbd22d7 | 1835 | __mf_report_leaks_fn, & count); |
fc5515a8 | 1836 | (void) mfsplay_tree_foreach (__mf_object_tree (__MF_TYPE_HEAP_I), |
cfbd22d7 | 1837 | __mf_report_leaks_fn, & count); |
6de9cd9a DN |
1838 | |
1839 | return count; | |
1840 | } | |
1841 | ||
1842 | /* ------------------------------------------------------------------------ */ | |
1843 | /* __mf_report */ | |
1844 | ||
1845 | void | |
1846 | __mf_report () | |
1847 | { | |
1848 | LOCKTH (); | |
1849 | BEGIN_RECURSION_PROTECT (); | |
1850 | __mfu_report (); | |
1851 | END_RECURSION_PROTECT (); | |
1852 | UNLOCKTH (); | |
1853 | } | |
1854 | ||
1855 | void | |
1856 | __mfu_report () | |
1857 | { | |
1858 | if (__mf_opts.collect_stats) | |
1859 | { | |
1860 | fprintf (stderr, | |
cfbd22d7 FCE |
1861 | "*******\n" |
1862 | "mudflap stats:\n" | |
1863 | "calls to __mf_check: %lu\n" | |
1864 | " __mf_register: %lu [%luB, %luB, %luB, %luB, %luB]\n" | |
1865 | " __mf_unregister: %lu [%luB]\n" | |
1866 | " __mf_violation: [%lu, %lu, %lu, %lu, %lu]\n", | |
1867 | __mf_count_check, | |
1868 | __mf_count_register, | |
1869 | __mf_total_register_size[0], __mf_total_register_size[1], | |
1870 | __mf_total_register_size[2], __mf_total_register_size[3], | |
1871 | __mf_total_register_size[4], /* XXX */ | |
1872 | __mf_count_unregister, __mf_total_unregister_size, | |
1873 | __mf_count_violation[0], __mf_count_violation[1], | |
1874 | __mf_count_violation[2], __mf_count_violation[3], | |
1875 | __mf_count_violation[4]); | |
6de9cd9a DN |
1876 | |
1877 | fprintf (stderr, | |
cfbd22d7 | 1878 | "calls with reentrancy: %lu\n", __mf_reentrancy); |
6de9cd9a DN |
1879 | #ifdef LIBMUDFLAPTH |
1880 | fprintf (stderr, | |
cfbd22d7 | 1881 | " lock contention: %lu\n", __mf_lock_contention); |
6de9cd9a DN |
1882 | #endif |
1883 | ||
1884 | /* Lookup cache stats. */ | |
1885 | { | |
cfbd22d7 FCE |
1886 | unsigned i; |
1887 | unsigned max_reuse = 0; | |
1888 | unsigned num_used = 0; | |
1889 | unsigned num_unused = 0; | |
1890 | ||
1891 | for (i = 0; i < LOOKUP_CACHE_SIZE; i++) | |
1892 | { | |
1893 | if (__mf_lookup_cache_reusecount[i]) | |
1894 | num_used ++; | |
1895 | else | |
1896 | num_unused ++; | |
1897 | if (max_reuse < __mf_lookup_cache_reusecount[i]) | |
1898 | max_reuse = __mf_lookup_cache_reusecount[i]; | |
1899 | } | |
1900 | fprintf (stderr, "lookup cache slots used: %u unused: %u peak-reuse: %u\n", | |
1901 | num_used, num_unused, max_reuse); | |
6de9cd9a DN |
1902 | } |
1903 | ||
1904 | { | |
cfbd22d7 FCE |
1905 | unsigned live_count; |
1906 | live_count = __mf_find_objects (MINPTR, MAXPTR, NULL, 0); | |
1907 | fprintf (stderr, "number of live objects: %u\n", live_count); | |
6de9cd9a DN |
1908 | } |
1909 | ||
1910 | if (__mf_opts.persistent_count > 0) | |
cfbd22d7 FCE |
1911 | { |
1912 | unsigned dead_count = 0; | |
1913 | unsigned row, plot; | |
1914 | for (row = 0; row <= __MF_TYPE_MAX_CEM; row ++) | |
1915 | for (plot = 0 ; plot < __mf_opts.persistent_count; plot ++) | |
1916 | if (__mf_object_cemetary [row][plot] != 0) | |
1917 | dead_count ++; | |
1918 | fprintf (stderr, " zombie objects: %u\n", dead_count); | |
1919 | } | |
6de9cd9a DN |
1920 | } |
1921 | if (__mf_opts.print_leaks && (__mf_opts.mudflap_mode == mode_check)) | |
1922 | { | |
1923 | unsigned l; | |
1924 | extern void * __mf_wrap_alloca_indirect (size_t c); | |
1925 | ||
1926 | /* Free up any remaining alloca()'d blocks. */ | |
1927 | __mf_wrap_alloca_indirect (0); | |
a548d7b7 FCE |
1928 | #ifdef HAVE___LIBC_FREERES |
1929 | if (__mf_opts.call_libc_freeres) | |
1930 | { | |
1931 | extern void __libc_freeres (void); | |
1932 | __libc_freeres (); | |
1933 | } | |
1934 | #endif | |
1935 | ||
6de9cd9a | 1936 | __mf_describe_object (NULL); /* Reset description epoch. */ |
cfbd22d7 | 1937 | l = __mf_report_leaks (); |
6de9cd9a DN |
1938 | fprintf (stderr, "number of leaked objects: %u\n", l); |
1939 | } | |
1940 | } | |
1941 | ||
1942 | /* __mf_backtrace */ | |
1943 | ||
1944 | size_t | |
1945 | __mf_backtrace (char ***symbols, void *guess_pc, unsigned guess_omit_levels) | |
1946 | { | |
1947 | void ** pc_array; | |
1948 | unsigned pc_array_size = __mf_opts.backtrace + guess_omit_levels; | |
1949 | unsigned remaining_size; | |
1950 | unsigned omitted_size = 0; | |
1951 | unsigned i; | |
1952 | DECLARE (void, free, void *ptr); | |
1953 | DECLARE (void *, calloc, size_t c, size_t n); | |
1954 | DECLARE (void *, malloc, size_t n); | |
1955 | ||
1956 | pc_array = CALL_REAL (calloc, pc_array_size, sizeof (void *) ); | |
1957 | #ifdef HAVE_BACKTRACE | |
1958 | pc_array_size = backtrace (pc_array, pc_array_size); | |
1959 | #else | |
1960 | #define FETCH(n) do { if (pc_array_size >= n) { \ | |
1961 | pc_array[n] = __builtin_return_address(n); \ | |
1962 | if (pc_array[n] == 0) pc_array_size = n; } } while (0) | |
1963 | ||
1964 | /* Unroll some calls __builtin_return_address because this function | |
1965 | only takes a literal integer parameter. */ | |
1966 | FETCH (0); | |
1967 | #if 0 | |
1968 | /* XXX: __builtin_return_address sometimes crashes (!) on >0 arguments, | |
1969 | rather than simply returning 0. :-( */ | |
1970 | FETCH (1); | |
1971 | FETCH (2); | |
1972 | FETCH (3); | |
1973 | FETCH (4); | |
1974 | FETCH (5); | |
1975 | FETCH (6); | |
1976 | FETCH (7); | |
1977 | FETCH (8); | |
1978 | if (pc_array_size > 8) pc_array_size = 9; | |
1979 | #else | |
1980 | if (pc_array_size > 0) pc_array_size = 1; | |
1981 | #endif | |
1982 | ||
1983 | #undef FETCH | |
1984 | #endif | |
1985 | ||
1986 | /* We want to trim the first few levels of the stack traceback, | |
1987 | since they contain libmudflap wrappers and junk. If pc_array[] | |
1988 | ends up containing a non-NULL guess_pc, then trim everything | |
1989 | before that. Otherwise, omit the first guess_omit_levels | |
1990 | entries. */ | |
fb925a51 | 1991 | |
6de9cd9a DN |
1992 | if (guess_pc != NULL) |
1993 | for (i=0; i<pc_array_size; i++) | |
1994 | if (pc_array [i] == guess_pc) | |
cfbd22d7 | 1995 | omitted_size = i; |
6de9cd9a DN |
1996 | |
1997 | if (omitted_size == 0) /* No match? */ | |
1998 | if (pc_array_size > guess_omit_levels) | |
1999 | omitted_size = guess_omit_levels; | |
2000 | ||
2001 | remaining_size = pc_array_size - omitted_size; | |
2002 | ||
2003 | #ifdef HAVE_BACKTRACE_SYMBOLS | |
2004 | *symbols = backtrace_symbols (pc_array + omitted_size, remaining_size); | |
2005 | #else | |
2006 | { | |
2007 | /* Let's construct a buffer by hand. It will have <remaining_size> | |
2008 | char*'s at the front, pointing at individual strings immediately | |
2009 | afterwards. */ | |
2010 | void *buffer; | |
2011 | char *chars; | |
2012 | char **pointers; | |
2013 | enum { perline = 30 }; | |
2014 | buffer = CALL_REAL (malloc, remaining_size * (perline + sizeof(char *))); | |
2015 | pointers = (char **) buffer; | |
2016 | chars = (char *)buffer + (remaining_size * sizeof (char *)); | |
2017 | for (i = 0; i < remaining_size; i++) | |
2018 | { | |
cfbd22d7 FCE |
2019 | pointers[i] = chars; |
2020 | sprintf (chars, "[0x%p]", pc_array [omitted_size + i]); | |
2021 | chars = chars + perline; | |
6de9cd9a DN |
2022 | } |
2023 | *symbols = pointers; | |
2024 | } | |
2025 | #endif | |
2026 | CALL_REAL (free, pc_array); | |
2027 | ||
2028 | return remaining_size; | |
2029 | } | |
2030 | ||
2031 | /* ------------------------------------------------------------------------ */ | |
2032 | /* __mf_violation */ | |
2033 | ||
2034 | void | |
fb925a51 | 2035 | __mf_violation (void *ptr, size_t sz, uintptr_t pc, |
cfbd22d7 | 2036 | const char *location, int type) |
6de9cd9a DN |
2037 | { |
2038 | char buf [128]; | |
2039 | static unsigned violation_number; | |
2040 | DECLARE(void, free, void *ptr); | |
2041 | ||
fb925a51 MS |
2042 | TRACE ("violation pc=%p location=%s type=%d ptr=%p size=%lu\n", |
2043 | (void *) pc, | |
cfbd22d7 | 2044 | (location != NULL ? location : ""), type, ptr, (unsigned long) sz); |
6de9cd9a DN |
2045 | |
2046 | if (__mf_opts.collect_stats) | |
2047 | __mf_count_violation [(type < 0) ? 0 : | |
cfbd22d7 FCE |
2048 | (type > __MF_VIOL_WATCH) ? 0 : |
2049 | type] ++; | |
6de9cd9a DN |
2050 | |
2051 | /* Print out a basic warning message. */ | |
2052 | if (__mf_opts.verbose_violations) | |
2053 | { | |
2054 | unsigned dead_p; | |
2055 | unsigned num_helpful = 0; | |
a082fc7a | 2056 | struct timeval now = { 0, 0 }; |
6de9cd9a DN |
2057 | #if HAVE_GETTIMEOFDAY |
2058 | gettimeofday (& now, NULL); | |
2059 | #endif | |
2060 | ||
2061 | violation_number ++; | |
2062 | fprintf (stderr, | |
cfbd22d7 FCE |
2063 | "*******\n" |
2064 | "mudflap violation %u (%s): time=%lu.%06lu " | |
fb925a51 | 2065 | "ptr=%p size=%lu\npc=%p%s%s%s\n", |
cfbd22d7 FCE |
2066 | violation_number, |
2067 | ((type == __MF_VIOL_READ) ? "check/read" : | |
2068 | (type == __MF_VIOL_WRITE) ? "check/write" : | |
2069 | (type == __MF_VIOL_REGISTER) ? "register" : | |
2070 | (type == __MF_VIOL_UNREGISTER) ? "unregister" : | |
2071 | (type == __MF_VIOL_WATCH) ? "watch" : "unknown"), | |
fb925a51 | 2072 | now.tv_sec, now.tv_usec, |
cfbd22d7 FCE |
2073 | (void *) ptr, (unsigned long)sz, (void *) pc, |
2074 | (location != NULL ? " location=`" : ""), | |
2075 | (location != NULL ? location : ""), | |
2076 | (location != NULL ? "'" : "")); | |
6de9cd9a DN |
2077 | |
2078 | if (__mf_opts.backtrace > 0) | |
2079 | { | |
cfbd22d7 FCE |
2080 | char ** symbols; |
2081 | unsigned i, num; | |
fb925a51 | 2082 | |
cfbd22d7 FCE |
2083 | num = __mf_backtrace (& symbols, (void *) pc, 2); |
2084 | /* Note: backtrace_symbols calls malloc(). But since we're in | |
2085 | __mf_violation and presumably __mf_check, it'll detect | |
2086 | recursion, and not put the new string into the database. */ | |
fb925a51 | 2087 | |
cfbd22d7 FCE |
2088 | for (i=0; i<num; i++) |
2089 | fprintf (stderr, " %s\n", symbols[i]); | |
fb925a51 | 2090 | |
cfbd22d7 FCE |
2091 | /* Calling free() here would trigger a violation. */ |
2092 | CALL_REAL(free, symbols); | |
6de9cd9a | 2093 | } |
fb925a51 MS |
2094 | |
2095 | ||
6de9cd9a DN |
2096 | /* Look for nearby objects. For this, we start with s_low/s_high |
2097 | pointing to the given area, looking for overlapping objects. | |
2098 | If none show up, widen the search area and keep looking. */ | |
fb925a51 | 2099 | |
6de9cd9a | 2100 | if (sz == 0) sz = 1; |
fb925a51 | 2101 | |
6de9cd9a DN |
2102 | for (dead_p = 0; dead_p <= 1; dead_p ++) /* for dead_p in 0 1 */ |
2103 | { | |
cfbd22d7 FCE |
2104 | enum {max_objs = 3}; /* magic */ |
2105 | __mf_object_t *objs[max_objs]; | |
2106 | unsigned num_objs = 0; | |
2107 | uintptr_t s_low, s_high; | |
2108 | unsigned tries = 0; | |
2109 | unsigned i; | |
fb925a51 | 2110 | |
cfbd22d7 FCE |
2111 | s_low = (uintptr_t) ptr; |
2112 | s_high = CLAMPSZ (ptr, sz); | |
2113 | ||
2114 | while (tries < 16) /* magic */ | |
2115 | { | |
2116 | if (dead_p) | |
2117 | num_objs = __mf_find_dead_objects (s_low, s_high, objs, max_objs); | |
2118 | else | |
2119 | num_objs = __mf_find_objects (s_low, s_high, objs, max_objs); | |
2120 | ||
2121 | if (num_objs) /* good enough */ | |
2122 | break; | |
2123 | ||
2124 | tries ++; | |
2125 | ||
2126 | /* XXX: tune this search strategy. It's too dependent on | |
2127 | sz, which can vary from 1 to very big (when array index | |
2128 | checking) numbers. */ | |
2129 | s_low = CLAMPSUB (s_low, (sz * tries * tries)); | |
2130 | s_high = CLAMPADD (s_high, (sz * tries * tries)); | |
2131 | } | |
2132 | ||
2133 | for (i = 0; i < min (num_objs, max_objs); i++) | |
2134 | { | |
2135 | __mf_object_t *obj = objs[i]; | |
2136 | uintptr_t low = (uintptr_t) ptr; | |
2137 | uintptr_t high = CLAMPSZ (ptr, sz); | |
2138 | unsigned before1 = (low < obj->low) ? obj->low - low : 0; | |
2139 | unsigned after1 = (low > obj->high) ? low - obj->high : 0; | |
2140 | unsigned into1 = (high >= obj->low && low <= obj->high) ? low - obj->low : 0; | |
2141 | unsigned before2 = (high < obj->low) ? obj->low - high : 0; | |
2142 | unsigned after2 = (high > obj->high) ? high - obj->high : 0; | |
2143 | unsigned into2 = (high >= obj->low && low <= obj->high) ? high - obj->low : 0; | |
2144 | ||
2145 | fprintf (stderr, "Nearby object %u: checked region begins %uB %s and ends %uB %s\n", | |
2146 | num_helpful + i + 1, | |
2147 | (before1 ? before1 : after1 ? after1 : into1), | |
2148 | (before1 ? "before" : after1 ? "after" : "into"), | |
2149 | (before2 ? before2 : after2 ? after2 : into2), | |
2150 | (before2 ? "before" : after2 ? "after" : "into")); | |
2151 | __mf_describe_object (obj); | |
2152 | } | |
2153 | num_helpful += num_objs; | |
6de9cd9a DN |
2154 | } |
2155 | ||
2156 | fprintf (stderr, "number of nearby objects: %u\n", num_helpful); | |
2157 | } | |
2158 | ||
2159 | /* How to finally handle this violation? */ | |
2160 | switch (__mf_opts.violation_mode) | |
2161 | { | |
2162 | case viol_nop: | |
2163 | break; | |
2164 | case viol_segv: | |
2165 | kill (getpid(), SIGSEGV); | |
2166 | break; | |
2167 | case viol_abort: | |
2168 | abort (); | |
2169 | break; | |
2170 | case viol_gdb: | |
7954e85c FCE |
2171 | |
2172 | snprintf (buf, 128, "gdb --pid=%u", (unsigned) getpid ()); | |
6de9cd9a DN |
2173 | system (buf); |
2174 | /* XXX: should probably fork() && sleep(GDB_WAIT_PARAMETER) | |
2175 | instead, and let the forked child execlp() gdb. That way, this | |
2176 | subject process can be resumed under the supervision of gdb. | |
2177 | This can't happen now, since system() only returns when gdb | |
2178 | dies. In that case, we need to beware of starting a second | |
2179 | concurrent gdb child upon the next violation. (But if the first | |
2180 | gdb dies, then starting a new one is appropriate.) */ | |
2181 | break; | |
2182 | } | |
2183 | } | |
2184 | ||
2185 | /* ------------------------------------------------------------------------ */ | |
2186 | ||
2187 | ||
2188 | unsigned __mf_watch (void *ptr, size_t sz) | |
2189 | { | |
2190 | unsigned rc; | |
2191 | LOCKTH (); | |
2192 | BEGIN_RECURSION_PROTECT (); | |
2193 | rc = __mf_watch_or_not (ptr, sz, 1); | |
2194 | END_RECURSION_PROTECT (); | |
2195 | UNLOCKTH (); | |
2196 | return rc; | |
2197 | } | |
2198 | ||
2199 | unsigned __mf_unwatch (void *ptr, size_t sz) | |
2200 | { | |
2201 | unsigned rc; | |
2202 | LOCKTH (); | |
2203 | rc = __mf_watch_or_not (ptr, sz, 0); | |
2204 | UNLOCKTH (); | |
2205 | return rc; | |
2206 | } | |
2207 | ||
2208 | ||
2209 | static unsigned | |
2210 | __mf_watch_or_not (void *ptr, size_t sz, char flag) | |
2211 | { | |
2212 | uintptr_t ptr_high = CLAMPSZ (ptr, sz); | |
2213 | uintptr_t ptr_low = (uintptr_t) ptr; | |
2214 | unsigned count = 0; | |
2215 | ||
2216 | TRACE ("%s ptr=%p size=%lu\n", | |
cfbd22d7 | 2217 | (flag ? "watch" : "unwatch"), ptr, (unsigned long) sz); |
fb925a51 | 2218 | |
6de9cd9a DN |
2219 | switch (__mf_opts.mudflap_mode) |
2220 | { | |
2221 | case mode_nop: | |
2222 | case mode_populate: | |
2223 | case mode_violate: | |
2224 | count = 0; | |
2225 | break; | |
2226 | ||
2227 | case mode_check: | |
2228 | { | |
cfbd22d7 FCE |
2229 | __mf_object_t **all_ovr_objs; |
2230 | unsigned obj_count; | |
2231 | unsigned n; | |
2232 | DECLARE (void *, malloc, size_t c); | |
2233 | DECLARE (void, free, void *p); | |
2234 | ||
2235 | obj_count = __mf_find_objects (ptr_low, ptr_high, NULL, 0); | |
2236 | VERBOSE_TRACE (" %u:", obj_count); | |
2237 | ||
2238 | all_ovr_objs = CALL_REAL (malloc, (sizeof (__mf_object_t *) * obj_count)); | |
2239 | if (all_ovr_objs == NULL) abort (); | |
2240 | n = __mf_find_objects (ptr_low, ptr_high, all_ovr_objs, obj_count); | |
2241 | assert (n == obj_count); | |
2242 | ||
2243 | for (n = 0; n < obj_count; n ++) | |
2244 | { | |
2245 | __mf_object_t *obj = all_ovr_objs[n]; | |
2246 | ||
2247 | VERBOSE_TRACE (" [%p]", (void *) obj); | |
2248 | if (obj->watching_p != flag) | |
2249 | { | |
2250 | obj->watching_p = flag; | |
2251 | count ++; | |
2252 | ||
2253 | /* Remove object from cache, to ensure next access | |
2254 | goes through __mf_check(). */ | |
2255 | if (flag) | |
2256 | __mf_uncache_object (obj); | |
2257 | } | |
2258 | } | |
2259 | CALL_REAL (free, all_ovr_objs); | |
6de9cd9a DN |
2260 | } |
2261 | break; | |
2262 | } | |
2263 | ||
2264 | return count; | |
2265 | } | |
2266 | ||
2267 | ||
2268 | void | |
2269 | __mf_sigusr1_handler (int num) | |
2270 | { | |
2271 | __mf_sigusr1_received ++; | |
2272 | } | |
2273 | ||
2274 | /* Install or remove SIGUSR1 handler as necessary. | |
2275 | Also, respond to a received pending SIGUSR1. */ | |
2276 | void | |
2277 | __mf_sigusr1_respond () | |
2278 | { | |
2279 | static int handler_installed; | |
2280 | ||
b9d71ce3 | 2281 | #ifdef SIGUSR1 |
6de9cd9a DN |
2282 | /* Manage handler */ |
2283 | if (__mf_opts.sigusr1_report && ! handler_installed) | |
2284 | { | |
2285 | signal (SIGUSR1, __mf_sigusr1_handler); | |
2286 | handler_installed = 1; | |
2287 | } | |
2288 | else if(! __mf_opts.sigusr1_report && handler_installed) | |
2289 | { | |
2290 | signal (SIGUSR1, SIG_DFL); | |
2291 | handler_installed = 0; | |
2292 | } | |
2293 | #endif | |
2294 | ||
2295 | /* Manage enqueued signals */ | |
2296 | if (__mf_sigusr1_received > __mf_sigusr1_handled) | |
2297 | { | |
2298 | __mf_sigusr1_handled ++; | |
7544a87f | 2299 | assert (__mf_get_state () == reentrant); |
6de9cd9a DN |
2300 | __mfu_report (); |
2301 | handler_installed = 0; /* We may need to re-enable signal; this might be a SysV library. */ | |
2302 | } | |
2303 | } | |
2304 | ||
2305 | ||
2306 | /* XXX: provide an alternative __assert_fail function that cannot | |
2307 | fail due to libmudflap infinite recursion. */ | |
2308 | #ifndef NDEBUG | |
2309 | ||
2310 | static void | |
2311 | write_itoa (int fd, unsigned n) | |
2312 | { | |
2313 | enum x { bufsize = sizeof(n)*4 }; | |
2314 | char buf [bufsize]; | |
2315 | unsigned i; | |
2316 | ||
2317 | for (i=0; i<bufsize-1; i++) | |
2318 | { | |
2319 | unsigned digit = n % 10; | |
2320 | buf[bufsize-2-i] = digit + '0'; | |
2321 | n /= 10; | |
fb925a51 | 2322 | if (n == 0) |
cfbd22d7 FCE |
2323 | { |
2324 | char *m = & buf [bufsize-2-i]; | |
2325 | buf[bufsize-1] = '\0'; | |
2326 | write (fd, m, strlen(m)); | |
2327 | break; | |
2328 | } | |
6de9cd9a DN |
2329 | } |
2330 | } | |
2331 | ||
2332 | ||
2333 | void | |
2334 | __assert_fail (const char *msg, const char *file, unsigned line, const char *func) | |
2335 | { | |
2336 | #define write2(string) write (2, (string), strlen ((string))); | |
2337 | write2("mf"); | |
2338 | #ifdef LIBMUDFLAPTH | |
2339 | write2("("); | |
fb925a51 | 2340 | write_itoa (2, (unsigned) pthread_self ()); |
6de9cd9a DN |
2341 | write2(")"); |
2342 | #endif | |
2343 | write2(": assertion failure: `"); | |
2344 | write (2, msg, strlen (msg)); | |
2345 | write2("' in "); | |
2346 | write (2, func, strlen (func)); | |
2347 | write2(" at "); | |
2348 | write (2, file, strlen (file)); | |
2349 | write2(":"); | |
2350 | write_itoa (2, line); | |
2351 | write2("\n"); | |
2352 | #undef write2 | |
2353 | abort (); | |
2354 | } | |
2355 | ||
2356 | ||
2357 | #endif | |
cfbd22d7 FCE |
2358 | |
2359 | ||
2360 | ||
fc5515a8 FCE |
2361 | /* Adapted splay tree code, originally from libiberty. It has been |
2362 | specialized for libmudflap as requested by RMS. */ | |
cfbd22d7 FCE |
2363 | |
2364 | static void | |
fc5515a8 | 2365 | mfsplay_tree_free (void *p) |
cfbd22d7 FCE |
2366 | { |
2367 | DECLARE (void, free, void *p); | |
2368 | CALL_REAL (free, p); | |
2369 | } | |
2370 | ||
2371 | static void * | |
fc5515a8 | 2372 | mfsplay_tree_xmalloc (size_t s) |
cfbd22d7 FCE |
2373 | { |
2374 | DECLARE (void *, malloc, size_t s); | |
2375 | return CALL_REAL (malloc, s); | |
2376 | } | |
2377 | ||
fc5515a8 FCE |
2378 | |
2379 | static void mfsplay_tree_splay (mfsplay_tree, mfsplay_tree_key); | |
2380 | static mfsplay_tree_node mfsplay_tree_splay_helper (mfsplay_tree, | |
2381 | mfsplay_tree_key, | |
2382 | mfsplay_tree_node *, | |
2383 | mfsplay_tree_node *, | |
2384 | mfsplay_tree_node *); | |
fc5515a8 FCE |
2385 | |
2386 | ||
2387 | /* Help splay SP around KEY. PARENT and GRANDPARENT are the parent | |
2388 | and grandparent, respectively, of NODE. */ | |
2389 | ||
2390 | static mfsplay_tree_node | |
2391 | mfsplay_tree_splay_helper (mfsplay_tree sp, | |
2392 | mfsplay_tree_key key, | |
2393 | mfsplay_tree_node * node, | |
2394 | mfsplay_tree_node * parent, | |
2395 | mfsplay_tree_node * grandparent) | |
2396 | { | |
2397 | mfsplay_tree_node *next; | |
2398 | mfsplay_tree_node n; | |
2399 | int comparison; | |
2400 | ||
2401 | n = *node; | |
2402 | ||
2403 | if (!n) | |
2404 | return *parent; | |
2405 | ||
73c3d568 | 2406 | comparison = ((key > n->key) ? 1 : ((key < n->key) ? -1 : 0)); |
fc5515a8 FCE |
2407 | |
2408 | if (comparison == 0) | |
2409 | /* We've found the target. */ | |
2410 | next = 0; | |
2411 | else if (comparison < 0) | |
2412 | /* The target is to the left. */ | |
2413 | next = &n->left; | |
2414 | else | |
2415 | /* The target is to the right. */ | |
2416 | next = &n->right; | |
2417 | ||
2418 | if (next) | |
2419 | { | |
2420 | /* Check whether our recursion depth is too high. Abort this search, | |
2421 | and signal that a rebalance is required to continue. */ | |
2422 | if (sp->depth > sp->max_depth) | |
2423 | { | |
2424 | sp->rebalance_p = 1; | |
2425 | return n; | |
2426 | } | |
2427 | ||
2428 | /* Continue down the tree. */ | |
2429 | sp->depth ++; | |
2430 | n = mfsplay_tree_splay_helper (sp, key, next, node, parent); | |
2431 | sp->depth --; | |
2432 | ||
2433 | /* The recursive call will change the place to which NODE | |
2434 | points. */ | |
2435 | if (*node != n || sp->rebalance_p) | |
2436 | return n; | |
2437 | } | |
2438 | ||
2439 | if (!parent) | |
2440 | /* NODE is the root. We are done. */ | |
2441 | return n; | |
2442 | ||
2443 | /* First, handle the case where there is no grandparent (i.e., | |
2444 | *PARENT is the root of the tree.) */ | |
2445 | if (!grandparent) | |
2446 | { | |
2447 | if (n == (*parent)->left) | |
2448 | { | |
2449 | *node = n->right; | |
2450 | n->right = *parent; | |
2451 | } | |
2452 | else | |
2453 | { | |
2454 | *node = n->left; | |
2455 | n->left = *parent; | |
2456 | } | |
2457 | *parent = n; | |
2458 | return n; | |
2459 | } | |
2460 | ||
2461 | /* Next handle the cases where both N and *PARENT are left children, | |
2462 | or where both are right children. */ | |
2463 | if (n == (*parent)->left && *parent == (*grandparent)->left) | |
2464 | { | |
2465 | mfsplay_tree_node p = *parent; | |
2466 | ||
2467 | (*grandparent)->left = p->right; | |
2468 | p->right = *grandparent; | |
2469 | p->left = n->right; | |
2470 | n->right = p; | |
2471 | *grandparent = n; | |
2472 | return n; | |
2473 | } | |
2474 | else if (n == (*parent)->right && *parent == (*grandparent)->right) | |
2475 | { | |
2476 | mfsplay_tree_node p = *parent; | |
2477 | ||
2478 | (*grandparent)->right = p->left; | |
2479 | p->left = *grandparent; | |
2480 | p->right = n->left; | |
2481 | n->left = p; | |
2482 | *grandparent = n; | |
2483 | return n; | |
2484 | } | |
2485 | ||
2486 | /* Finally, deal with the case where N is a left child, but *PARENT | |
2487 | is a right child, or vice versa. */ | |
2488 | if (n == (*parent)->left) | |
2489 | { | |
2490 | (*parent)->left = n->right; | |
2491 | n->right = *parent; | |
2492 | (*grandparent)->right = n->left; | |
2493 | n->left = *grandparent; | |
2494 | *grandparent = n; | |
2495 | return n; | |
2496 | } | |
2497 | else | |
2498 | { | |
2499 | (*parent)->right = n->left; | |
2500 | n->left = *parent; | |
2501 | (*grandparent)->left = n->right; | |
2502 | n->right = *grandparent; | |
2503 | *grandparent = n; | |
2504 | return n; | |
2505 | } | |
2506 | } | |
2507 | ||
2508 | ||
2509 | ||
2510 | static int | |
2511 | mfsplay_tree_rebalance_helper1 (mfsplay_tree_node n, void *array_ptr) | |
2512 | { | |
2513 | mfsplay_tree_node **p = array_ptr; | |
2514 | *(*p) = n; | |
2515 | (*p)++; | |
2516 | return 0; | |
2517 | } | |
2518 | ||
2519 | ||
2520 | static mfsplay_tree_node | |
2521 | mfsplay_tree_rebalance_helper2 (mfsplay_tree_node * array, unsigned low, | |
2522 | unsigned high) | |
2523 | { | |
2524 | unsigned middle = low + (high - low) / 2; | |
2525 | mfsplay_tree_node n = array[middle]; | |
2526 | ||
2527 | /* Note that since we're producing a balanced binary tree, it is not a problem | |
2528 | that this function is recursive. */ | |
2529 | if (low + 1 <= middle) | |
2530 | n->left = mfsplay_tree_rebalance_helper2 (array, low, middle - 1); | |
2531 | else | |
2532 | n->left = NULL; | |
2533 | ||
2534 | if (middle + 1 <= high) | |
2535 | n->right = mfsplay_tree_rebalance_helper2 (array, middle + 1, high); | |
2536 | else | |
2537 | n->right = NULL; | |
2538 | ||
2539 | return n; | |
2540 | } | |
2541 | ||
2542 | ||
2543 | /* Rebalance the entire tree. Do this by copying all the node | |
2544 | pointers into an array, then cleverly re-linking them. */ | |
2545 | static void | |
2546 | mfsplay_tree_rebalance (mfsplay_tree sp) | |
2547 | { | |
2548 | mfsplay_tree_node *all_nodes, *all_nodes_1; | |
2549 | ||
2550 | if (sp->num_keys <= 2) | |
2551 | return; | |
2552 | ||
2553 | all_nodes = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * sp->num_keys); | |
2554 | ||
2555 | /* Traverse all nodes to copy their addresses into this array. */ | |
2556 | all_nodes_1 = all_nodes; | |
2557 | mfsplay_tree_foreach (sp, mfsplay_tree_rebalance_helper1, | |
2558 | (void *) &all_nodes_1); | |
2559 | ||
2560 | /* Relink all the nodes. */ | |
2561 | sp->root = mfsplay_tree_rebalance_helper2 (all_nodes, 0, sp->num_keys - 1); | |
2562 | ||
2563 | mfsplay_tree_free (all_nodes); | |
2564 | } | |
2565 | ||
2566 | ||
2567 | /* Splay SP around KEY. */ | |
2568 | static void | |
2569 | mfsplay_tree_splay (mfsplay_tree sp, mfsplay_tree_key key) | |
2570 | { | |
2571 | if (sp->root == 0) | |
2572 | return; | |
2573 | ||
2574 | /* If we just splayed the tree with the same key, do nothing. */ | |
2575 | if (sp->last_splayed_key_p && | |
73c3d568 | 2576 | (sp->last_splayed_key == key)) |
fc5515a8 FCE |
2577 | return; |
2578 | ||
2579 | /* Compute a maximum recursion depth for a splay tree with NUM nodes. | |
2580 | The idea is to limit excessive stack usage if we're facing | |
2581 | degenerate access patterns. Unfortunately such patterns can occur | |
2582 | e.g. during static initialization, where many static objects might | |
2583 | be registered in increasing address sequence, or during a case where | |
fb925a51 | 2584 | large tree-like heap data structures are allocated quickly. |
fc5515a8 | 2585 | |
fb925a51 | 2586 | On x86, this corresponds to roughly 200K of stack usage. |
fc5515a8 FCE |
2587 | XXX: For libmudflapth, this could be a function of __mf_opts.thread_stack. */ |
2588 | sp->max_depth = 2500; | |
2589 | sp->rebalance_p = sp->depth = 0; | |
2590 | ||
2591 | mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL); | |
2592 | if (sp->rebalance_p) | |
2593 | { | |
2594 | mfsplay_tree_rebalance (sp); | |
2595 | ||
2596 | sp->rebalance_p = sp->depth = 0; | |
2597 | mfsplay_tree_splay_helper (sp, key, &sp->root, NULL, NULL); | |
2598 | ||
2599 | if (sp->rebalance_p) | |
2600 | abort (); | |
2601 | } | |
2602 | ||
2603 | ||
2604 | /* Cache this splay key. */ | |
2605 | sp->last_splayed_key = key; | |
2606 | sp->last_splayed_key_p = 1; | |
2607 | } | |
2608 | ||
2609 | ||
2610 | ||
2611 | /* Allocate a new splay tree. */ | |
2612 | static mfsplay_tree | |
2613 | mfsplay_tree_new () | |
2614 | { | |
2615 | mfsplay_tree sp = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_s)); | |
2616 | sp->root = NULL; | |
2617 | sp->last_splayed_key_p = 0; | |
2618 | sp->num_keys = 0; | |
2619 | ||
2620 | return sp; | |
2621 | } | |
2622 | ||
2623 | ||
2624 | ||
2625 | /* Insert a new node (associating KEY with DATA) into SP. If a | |
2626 | previous node with the indicated KEY exists, its data is replaced | |
2627 | with the new value. Returns the new node. */ | |
2628 | static mfsplay_tree_node | |
2629 | mfsplay_tree_insert (mfsplay_tree sp, mfsplay_tree_key key, mfsplay_tree_value value) | |
2630 | { | |
2631 | int comparison = 0; | |
2632 | ||
2633 | mfsplay_tree_splay (sp, key); | |
2634 | ||
2635 | if (sp->root) | |
73c3d568 FCE |
2636 | comparison = ((sp->root->key > key) ? 1 : |
2637 | ((sp->root->key < key) ? -1 : 0)); | |
fc5515a8 FCE |
2638 | |
2639 | if (sp->root && comparison == 0) | |
2640 | { | |
2641 | /* If the root of the tree already has the indicated KEY, just | |
2642 | replace the value with VALUE. */ | |
2643 | sp->root->value = value; | |
2644 | } | |
2645 | else | |
2646 | { | |
2647 | /* Create a new node, and insert it at the root. */ | |
2648 | mfsplay_tree_node node; | |
2649 | ||
2650 | node = mfsplay_tree_xmalloc (sizeof (struct mfsplay_tree_node_s)); | |
2651 | node->key = key; | |
2652 | node->value = value; | |
2653 | sp->num_keys++; | |
2654 | if (!sp->root) | |
2655 | node->left = node->right = 0; | |
2656 | else if (comparison < 0) | |
2657 | { | |
2658 | node->left = sp->root; | |
2659 | node->right = node->left->right; | |
2660 | node->left->right = 0; | |
2661 | } | |
2662 | else | |
2663 | { | |
2664 | node->right = sp->root; | |
2665 | node->left = node->right->left; | |
2666 | node->right->left = 0; | |
2667 | } | |
2668 | ||
2669 | sp->root = node; | |
2670 | sp->last_splayed_key_p = 0; | |
2671 | } | |
2672 | ||
2673 | return sp->root; | |
2674 | } | |
2675 | ||
2676 | /* Remove KEY from SP. It is not an error if it did not exist. */ | |
2677 | ||
2678 | static void | |
2679 | mfsplay_tree_remove (mfsplay_tree sp, mfsplay_tree_key key) | |
2680 | { | |
2681 | mfsplay_tree_splay (sp, key); | |
2682 | sp->last_splayed_key_p = 0; | |
73c3d568 | 2683 | if (sp->root && (sp->root->key == key)) |
fc5515a8 FCE |
2684 | { |
2685 | mfsplay_tree_node left, right; | |
2686 | left = sp->root->left; | |
2687 | right = sp->root->right; | |
2688 | /* Delete the root node itself. */ | |
2689 | mfsplay_tree_free (sp->root); | |
2690 | sp->num_keys--; | |
2691 | /* One of the children is now the root. Doesn't matter much | |
2692 | which, so long as we preserve the properties of the tree. */ | |
2693 | if (left) | |
2694 | { | |
2695 | sp->root = left; | |
fb925a51 | 2696 | /* If there was a right child as well, hang it off the |
fc5515a8 FCE |
2697 | right-most leaf of the left child. */ |
2698 | if (right) | |
2699 | { | |
2700 | while (left->right) | |
2701 | left = left->right; | |
2702 | left->right = right; | |
2703 | } | |
2704 | } | |
2705 | else | |
2706 | sp->root = right; | |
2707 | } | |
2708 | } | |
2709 | ||
fb925a51 | 2710 | /* Lookup KEY in SP, returning VALUE if present, and NULL |
fc5515a8 FCE |
2711 | otherwise. */ |
2712 | ||
2713 | static mfsplay_tree_node | |
2714 | mfsplay_tree_lookup (mfsplay_tree sp, mfsplay_tree_key key) | |
2715 | { | |
2716 | mfsplay_tree_splay (sp, key); | |
73c3d568 | 2717 | if (sp->root && (sp->root->key == key)) |
fc5515a8 FCE |
2718 | return sp->root; |
2719 | else | |
2720 | return 0; | |
2721 | } | |
2722 | ||
2723 | ||
2724 | /* Return the immediate predecessor KEY, or NULL if there is no | |
2725 | predecessor. KEY need not be present in the tree. */ | |
2726 | ||
2727 | static mfsplay_tree_node | |
2728 | mfsplay_tree_predecessor (mfsplay_tree sp, mfsplay_tree_key key) | |
2729 | { | |
2730 | int comparison; | |
2731 | mfsplay_tree_node node; | |
2732 | /* If the tree is empty, there is certainly no predecessor. */ | |
2733 | if (!sp->root) | |
2734 | return NULL; | |
2735 | /* Splay the tree around KEY. That will leave either the KEY | |
2736 | itself, its predecessor, or its successor at the root. */ | |
2737 | mfsplay_tree_splay (sp, key); | |
73c3d568 FCE |
2738 | comparison = ((sp->root->key > key) ? 1 : |
2739 | ((sp->root->key < key) ? -1 : 0)); | |
2740 | ||
fc5515a8 FCE |
2741 | /* If the predecessor is at the root, just return it. */ |
2742 | if (comparison < 0) | |
2743 | return sp->root; | |
2744 | /* Otherwise, find the rightmost element of the left subtree. */ | |
2745 | node = sp->root->left; | |
2746 | if (node) | |
2747 | while (node->right) | |
2748 | node = node->right; | |
2749 | return node; | |
2750 | } | |
2751 | ||
2752 | /* Return the immediate successor KEY, or NULL if there is no | |
2753 | successor. KEY need not be present in the tree. */ | |
2754 | ||
2755 | static mfsplay_tree_node | |
2756 | mfsplay_tree_successor (mfsplay_tree sp, mfsplay_tree_key key) | |
2757 | { | |
2758 | int comparison; | |
2759 | mfsplay_tree_node node; | |
2760 | /* If the tree is empty, there is certainly no successor. */ | |
2761 | if (!sp->root) | |
2762 | return NULL; | |
2763 | /* Splay the tree around KEY. That will leave either the KEY | |
2764 | itself, its predecessor, or its successor at the root. */ | |
2765 | mfsplay_tree_splay (sp, key); | |
73c3d568 FCE |
2766 | comparison = ((sp->root->key > key) ? 1 : |
2767 | ((sp->root->key < key) ? -1 : 0)); | |
fc5515a8 FCE |
2768 | /* If the successor is at the root, just return it. */ |
2769 | if (comparison > 0) | |
2770 | return sp->root; | |
2771 | /* Otherwise, find the leftmost element of the right subtree. */ | |
2772 | node = sp->root->right; | |
2773 | if (node) | |
2774 | while (node->left) | |
2775 | node = node->left; | |
2776 | return node; | |
2777 | } | |
2778 | ||
2779 | /* Call FN, passing it the DATA, for every node in SP, following an | |
2780 | in-order traversal. If FN every returns a non-zero value, the | |
2781 | iteration ceases immediately, and the value is returned. | |
2782 | Otherwise, this function returns 0. | |
fb925a51 | 2783 | |
fc5515a8 FCE |
2784 | This function simulates recursion using dynamically allocated |
2785 | arrays, since it may be called from mfsplay_tree_rebalance(), which | |
2786 | in turn means that the tree is already uncomfortably deep for stack | |
2787 | space limits. */ | |
2788 | static int | |
2789 | mfsplay_tree_foreach (mfsplay_tree st, mfsplay_tree_foreach_fn fn, void *data) | |
2790 | { | |
2791 | mfsplay_tree_node *stack1; | |
2792 | char *stack2; | |
2793 | unsigned sp; | |
2794 | int val = 0; | |
2795 | enum s { s_left, s_here, s_right, s_up }; | |
2796 | ||
2797 | if (st->root == NULL) /* => num_keys == 0 */ | |
2798 | return 0; | |
2799 | ||
2800 | stack1 = mfsplay_tree_xmalloc (sizeof (mfsplay_tree_node) * st->num_keys); | |
2801 | stack2 = mfsplay_tree_xmalloc (sizeof (char) * st->num_keys); | |
2802 | ||
2803 | sp = 0; | |
2804 | stack1 [sp] = st->root; | |
2805 | stack2 [sp] = s_left; | |
2806 | ||
2807 | while (1) | |
2808 | { | |
2809 | mfsplay_tree_node n; | |
2810 | enum s s; | |
2811 | ||
2812 | n = stack1 [sp]; | |
2813 | s = stack2 [sp]; | |
2814 | ||
2815 | /* Handle each of the four possible states separately. */ | |
2816 | ||
2817 | /* 1: We're here to traverse the left subtree (if any). */ | |
2818 | if (s == s_left) | |
2819 | { | |
2820 | stack2 [sp] = s_here; | |
2821 | if (n->left != NULL) | |
2822 | { | |
2823 | sp ++; | |
2824 | stack1 [sp] = n->left; | |
2825 | stack2 [sp] = s_left; | |
2826 | } | |
2827 | } | |
2828 | ||
2829 | /* 2: We're here to traverse this node. */ | |
2830 | else if (s == s_here) | |
2831 | { | |
2832 | stack2 [sp] = s_right; | |
2833 | val = (*fn) (n, data); | |
2834 | if (val) break; | |
2835 | } | |
2836 | ||
2837 | /* 3: We're here to traverse the right subtree (if any). */ | |
2838 | else if (s == s_right) | |
2839 | { | |
2840 | stack2 [sp] = s_up; | |
2841 | if (n->right != NULL) | |
2842 | { | |
2843 | sp ++; | |
2844 | stack1 [sp] = n->right; | |
2845 | stack2 [sp] = s_left; | |
2846 | } | |
2847 | } | |
2848 | ||
2849 | /* 4: We're here after both subtrees (if any) have been traversed. */ | |
2850 | else if (s == s_up) | |
2851 | { | |
2852 | /* Pop the stack. */ | |
2853 | if (sp == 0) break; /* Popping off the root note: we're finished! */ | |
2854 | sp --; | |
2855 | } | |
2856 | ||
2857 | else | |
2858 | abort (); | |
2859 | } | |
2860 | ||
2861 | mfsplay_tree_free (stack1); | |
2862 | mfsplay_tree_free (stack2); | |
2863 | return val; | |
2864 | } |