Int VG_(fd_soft_limit) = -1;
Int VG_(fd_hard_limit) = -1;
+/* Useful addresses extracted from the client */
+/* Where is the __libc_freeres_wrapper routine we made? */
+Addr VG_(client___libc_freeres_wrapper) = 0;
+
/*--------------------------------------------------------------------*/
/*--- end ---*/
/* A structure which contains information pertaining to one mapped
- text segment. (typedef in tool.h) */
+ text segment. This type is exported only abstractly - in
+ pub_tool_debuginfo.h. */
struct _SegInfo {
struct _SegInfo* next; /* list of SegInfos */
The GNU General Public License is contained in the file COPYING.
*/
+/*
+ Stabs reader greatly improved by Nick Nethercote, Apr 02.
+*/
+
+
#include "pub_core_basics.h"
#include "pub_core_threadstate.h"
#include "pub_core_debuginfo.h"
#include "pub_core_machine.h"
#include "pub_core_mallocfree.h"
#include "pub_core_options.h"
-#include "pub_core_redir.h"
-#include "pub_core_tooliface.h" // For VG_(needs).data_syms
+#include "pub_core_redir.h" // VG_(redir_notify_{new,delete}_SegInfo)
+#include "pub_core_tooliface.h" // VG_(needs).data_syms
#include "pub_core_aspacemgr.h"
}
-/*------------------------------------------------------------*/
-/*--- ---*/
-/*------------------------------------------------------------*/
-
-/* Majorly rewritten Sun 3 Feb 02 to enable loading symbols from
- dlopen()ed libraries, which is something that KDE3 does a lot.
-
- Stabs reader greatly improved by Nick Nethercote, Apr 02.
-*/
-
-static void freeSegInfo ( SegInfo* si )
-{
- struct strchunk *chunk, *next;
- vg_assert(si != NULL);
- if (si->filename) VG_(arena_free)(VG_AR_SYMTAB, si->filename);
- if (si->symtab) VG_(arena_free)(VG_AR_SYMTAB, si->symtab);
- if (si->loctab) VG_(arena_free)(VG_AR_SYMTAB, si->loctab);
- if (si->scopetab) VG_(arena_free)(VG_AR_SYMTAB, si->scopetab);
- if (si->cfisi) VG_(arena_free)(VG_AR_SYMTAB, si->cfisi);
-
- for(chunk = si->strchunks; chunk != NULL; chunk = next) {
- next = chunk->next;
- VG_(arena_free)(VG_AR_SYMTAB, chunk);
- }
- VG_(arena_free)(VG_AR_SYMTAB, si);
-}
-
-
/*------------------------------------------------------------*/
/*--- Adding stuff ---*/
/*------------------------------------------------------------*/
TRACE_SYMTAB("choosing between '%s' and '%s'\n", a->name, b->name);
+ /* MPI hack: prefer PMPI_Foo over MPI_Foo */
+ if (0==VG_(strncmp)(a->name, "MPI_", 4)
+ && 0==VG_(strncmp)(b->name, "PMPI_", 5)
+ && 0==VG_(strcmp)(a->name, 1+b->name))
+ return b;
+ else
+ if (0==VG_(strncmp)(b->name, "MPI_", 4)
+ && 0==VG_(strncmp)(a->name, "PMPI_", 5)
+ && 0==VG_(strcmp)(b->name, 1+a->name))
+ return a;
+
/* Select the shortest unversioned name */
if (vlena < vlenb)
return a;
# endif
name = ML_(addStr) ( si, sym_name, -1 );
vg_assert(name != NULL);
-
- /*
- * Is this symbol a magic valgrind-intercept symbol? If so,
- * hand this off to the redir module.
- *
- * Note: this function can change the symbol name just added to
- * the string table. Importantly, it never makes it bigger.
- */
- if (do_intercepts) {
- VG_(maybe_redir_or_notify)( name, sym_addr_really );
- }
-
risym.addr = sym_addr_really;
risym.size = sym->st_size;
risym.name = name;
struct vki_stat stat_buf;
oimage = (Addr)NULL;
- if (VG_(clo_verbosity) > 1)
+ if (VG_(clo_verbosity) > 1 || VG_(clo_trace_redir))
VG_(message)(Vg_DebugMsg, "Reading syms from %s (%p)",
si->filename, si->start );
o_phdr = &((ElfXX_Phdr *)(oimage + ehdr->e_phoff))[i];
- // Try to get the soname.
+ /* Try to get the soname. If there isn't one, use "NONE".
+ The seginfo needs to have some kind of soname in order to
+ facilitate writing redirect functions, since all redirect
+ specifications require a soname (pattern). */
if (o_phdr->p_type == PT_DYNAMIC && si->soname == NULL) {
const ElfXX_Dyn *dyn = (const ElfXX_Dyn *)(oimage + o_phdr->p_offset);
Int stroff = -1;
for(j = 0; dyn[j].d_tag != DT_NULL; j++) {
switch(dyn[j].d_tag) {
case DT_SONAME:
- stroff = dyn[j].d_un.d_val;
+ stroff = dyn[j].d_un.d_val;
break;
case DT_STRTAB:
}
}
+ /* If, after looking at all the program headers, we still didn't
+ find a soname, add a fake one. */
+ if (si->soname == NULL) {
+ TRACE_SYMTAB("soname(fake)=\"NONE\"\n");
+ si->soname = "NONE";
+ }
+
TRACE_SYMTAB("shoff = %d, shnum = %d, size = %d, n_vg_oimage = %d\n",
ehdr->e_shoff, ehdr->e_shnum, sizeof(ElfXX_Shdr), n_oimage );
return si;
}
+static void freeSegInfo ( SegInfo* si )
+{
+ struct strchunk *chunk, *next;
+ vg_assert(si != NULL);
+ if (si->filename) VG_(arena_free)(VG_AR_SYMTAB, si->filename);
+ if (si->symtab) VG_(arena_free)(VG_AR_SYMTAB, si->symtab);
+ if (si->loctab) VG_(arena_free)(VG_AR_SYMTAB, si->loctab);
+ if (si->scopetab) VG_(arena_free)(VG_AR_SYMTAB, si->scopetab);
+ if (si->cfisi) VG_(arena_free)(VG_AR_SYMTAB, si->cfisi);
+
+ for(chunk = si->strchunks; chunk != NULL; chunk = next) {
+ next = chunk->next;
+ VG_(arena_free)(VG_AR_SYMTAB, chunk);
+ }
+ VG_(arena_free)(VG_AR_SYMTAB, si);
+}
+
+
SegInfo *VG_(read_seg_symbols) ( Addr seg_addr, SizeT seg_len,
OffT seg_offset, const Char* seg_filename)
{
canonicaliseScopetab ( si );
canonicaliseCfiSI ( si );
- /* do redirects */
- VG_(resolve_existing_redirs_with_seginfo)( si );
+ /* notify m_redir about it */
+ VG_(redir_notify_new_SegInfo)( si );
}
return si;
while (curr) {
if (start == curr->start) {
// Found it; remove from list and free it.
- if (VG_(clo_verbosity) > 1)
+ if (VG_(clo_verbosity) > 1 || VG_(clo_trace_redir))
VG_(message)(Vg_DebugMsg,
- "discard syms at %p-%p in %s due to munmap()",
+ "Discarding syms at %p-%p in %s due to munmap()",
start, start+length,
curr->filename ? curr->filename : (Char *)"???");
vg_assert(*prev_next_ptr == curr);
*prev_next_ptr = curr->next;
+ VG_(redir_notify_delete_SegInfo)( curr );
freeSegInfo(curr);
return;
}
}
-/* SLOW (Linear search). Try and map a symbol name to an address.
- Since this is searching in the direction opposite to which the
- table is designed we have no option but to do a complete linear
- scan of the table. Returns NULL if not found. */
-
-Addr VG_(reverse_search_one_symtab) ( const SegInfo* si, const Char* name )
-{
- UInt i;
- for (i = 0; i < si->symtab_used; i++) {
- if (0)
- VG_(printf)("%p %s\n", si->symtab[i].addr, si->symtab[i].name);
- if (0 == VG_(strcmp)(name, si->symtab[i].name))
- return si->symtab[i].addr;
- }
- return (Addr)NULL;
-}
-
-
/* Search all symtabs that we know about to locate ptr. If found, set
*psi to the relevant SegInfo, and *symno to the symtab entry number
within that. If not found, *psi is set to NULL. */
for(si = segInfo_list; si != NULL; si = si->next) {
if (a >= si->start && a < (si->start + si->size)) {
+
if (0)
- VG_(printf)("addr=%p si=%p %s got=%p %d plt=%p %d data=%p %d bss=%p %d\n",
- a, si, si->filename,
- si->got_start_vma, si->got_size,
- si->plt_start_vma, si->plt_size,
- si->data_start_vma, si->data_size,
- si->bss_start_vma, si->bss_size);
+ VG_(printf)(
+ "addr=%p si=%p %s got=%p %d plt=%p %d data=%p %d bss=%p %d\n",
+ a, si, si->filename,
+ si->got_start_vma, si->got_size,
+ si->plt_start_vma, si->plt_size,
+ si->data_start_vma, si->data_size,
+ si->bss_start_vma, si->bss_size);
+
ret = Vg_SectText;
if (a >= si->data_start_vma && a < (si->data_start_vma + si->data_size))
return ret;
}
+Int VG_(seginfo_syms_howmany) ( const SegInfo *si )
+{
+ return si->symtab_used;
+}
+
+void VG_(seginfo_syms_getidx) ( const SegInfo *si,
+ Int idx,
+ /*OUT*/Addr* addr,
+ /*OUT*/UInt* size,
+ /*OUT*/HChar** name )
+{
+ vg_assert(idx >= 0 && idx < si->symtab_used);
+ if (addr) *addr = si->symtab[idx].addr;
+ if (size) *size = si->symtab[idx].size;
+ if (name) *name = (HChar*)si->symtab[idx].name;
+}
+
+
/*--------------------------------------------------------------------*/
/*--- end ---*/
/*--------------------------------------------------------------------*/
#include "pub_core_libcbase.h"
#include "pub_core_mallocfree.h"
#include "pub_core_options.h"
+#include "pub_core_libcassert.h"
#include "demangle.h"
+#include "pub_core_libcprint.h"
+
+/* The demangler's job is to take a raw symbol name and turn it into
+ something a Human Bean can understand. There are two levels of
+ mangling.
+
+ 1. First, C++ names are mangled by the compiler. So we'll have to
+ undo that.
+
+ 2. Optionally, in relatively rare cases, the resulting name is then
+ itself encoded using Z-escaping (see pub_core_redir.h) so as to
+ become part of a redirect-specification.
+
+ Therefore, VG_(demangle) first tries to undo (2). If successful,
+ the soname part is discarded (humans don't want to see that).
+ Then, it tries to undo (1) (using demangling code from GNU/FSF).
+*/
+
+/* This is the main, standard demangler entry point. */
void VG_(demangle) ( Char* orig, Char* result, Int result_size )
{
- Char* demangled = NULL;
+# define N_ZBUF 4096
+ HChar* demangled = NULL;
+ HChar z_demangled[N_ZBUF];
- if (VG_(clo_demangle))
- demangled = VG_(cplus_demangle) ( orig, DMGL_ANSI | DMGL_PARAMS );
+ if (!VG_(clo_demangle)) {
+ VG_(strncpy_safely)(result, orig, result_size);
+ return;
+ }
+
+ /* Demangling was requested. First see if it's a Z-mangled
+ intercept specification. The fastest way is just to attempt a
+ Z-demangling (with NULL soname buffer, since we're not
+ interested in that). */
+ if (VG_(maybe_Z_demangle)( orig, NULL,0,/*soname*/
+ z_demangled, N_ZBUF, NULL)) {
+ orig = z_demangled;
+ }
+
+ demangled = VG_(cplus_demangle) ( orig, DMGL_ANSI | DMGL_PARAMS );
if (demangled) {
VG_(strncpy_safely)(result, demangled, result_size);
// very rarely (ie. I've heard of it twice in 3 years), the demangler
// does leak. But, we can't do much about it, and it's not a disaster,
// so we just let it slide without aborting or telling the user.
+
+# undef N_ZBUF
+}
+
+
+/*------------------------------------------------------------*/
+/*--- DEMANGLE Z-ENCODED NAMES ---*/
+/*------------------------------------------------------------*/
+
+/* Demangle a Z-encoded name as described in pub_tool_redir.h.
+ Z-encoded names are used by Valgrind for doing function
+ interception/wrapping.
+
+ Demangle 'sym' into its soname and fnname parts, putting them in
+ the specified buffers. Returns a Bool indicating whether the
+ demangled failed or not. A failure can occur because the prefix
+ isn't recognised, the internal Z-escaping is wrong, or because one
+ or the other (or both) of the output buffers becomes full. Passing
+ 'so' as NULL is acceptable if the caller is only interested in the
+ function name part. */
+
+Bool VG_(maybe_Z_demangle) ( const HChar* sym,
+ /*OUT*/HChar* so, Int soLen,
+ /*OUT*/HChar* fn, Int fnLen,
+ /*OUT*/Bool* isWrap )
+{
+# define EMITSO(ch) \
+ do { \
+ if (so) { \
+ if (soi >= soLen) { \
+ so[soLen-1] = 0; oflow = True; \
+ } else { \
+ so[soi++] = ch; so[soi] = 0; \
+ } \
+ } \
+ } while (0)
+# define EMITFN(ch) \
+ do { \
+ if (fni >= fnLen) { \
+ fn[fnLen-1] = 0; oflow = True; \
+ } else { \
+ fn[fni++] = ch; fn[fni] = 0; \
+ } \
+ } while (0)
+
+ Bool error, oflow, valid, fn_is_encoded;
+ Int soi, fni, i;
+
+ vg_assert(soLen > 0 || (soLen == 0 && so == NULL));
+ vg_assert(fnLen > 0);
+ error = False;
+ oflow = False;
+ soi = 0;
+ fni = 0;
+
+ valid = sym[0] == '_'
+ && sym[1] == 'v'
+ && sym[2] == 'g'
+ && (sym[3] == 'r' || sym[3] == 'w' || sym[3] == 'n')
+ && sym[4] == 'Z'
+ && (sym[5] == 'Z' || sym[5] == 'U')
+ && sym[6] == '_';
+ if (!valid)
+ return False;
+
+ fn_is_encoded = sym[5] == 'Z';
+
+ if (isWrap)
+ *isWrap = sym[3] == 'w';
+
+ /* Now scan the Z-encoded soname. */
+ i = 7;
+ while (True) {
+
+ if (sym[i] == '_')
+ /* Found the delimiter. Move on to the fnname loop. */
+ break;
+
+ if (sym[i] == 0) {
+ error = True;
+ goto out;
+ }
+
+ if (sym[i] != 'Z') {
+ EMITSO(sym[i]);
+ i++;
+ continue;
+ }
+
+ /* We've got a Z-escape. */
+ i++;
+ switch (sym[i]) {
+ case 'a': EMITSO('*'); break;
+ case 'p': EMITSO('+'); break;
+ case 'c': EMITSO(':'); break;
+ case 'd': EMITSO('.'); break;
+ case 'u': EMITSO('_'); break;
+ case 'h': EMITSO('-'); break;
+ case 's': EMITSO(' '); break;
+ case 'Z': EMITSO('Z'); break;
+ case 'A': EMITSO('@'); break;
+ default: error = True; goto out;
+ }
+ i++;
+ }
+
+ vg_assert(sym[i] == '_');
+ i++;
+
+ /* Now deal with the function name part. */
+ if (!fn_is_encoded) {
+
+ /* simple; just copy. */
+ while (True) {
+ if (sym[i] == 0)
+ break;
+ EMITFN(sym[i]);
+ i++;
+ }
+ goto out;
+
+ }
+
+ /* else use a Z-decoding loop like with soname */
+ while (True) {
+
+ if (sym[i] == 0)
+ break;
+
+ if (sym[i] != 'Z') {
+ EMITFN(sym[i]);
+ i++;
+ continue;
+ }
+
+ /* We've got a Z-escape. */
+ i++;
+ switch (sym[i]) {
+ case 'a': EMITFN('*'); break;
+ case 'p': EMITFN('+'); break;
+ case 'c': EMITFN(':'); break;
+ case 'd': EMITFN('.'); break;
+ case 'u': EMITFN('_'); break;
+ case 'h': EMITFN('-'); break;
+ case 's': EMITFN(' '); break;
+ case 'Z': EMITFN('Z'); break;
+ case 'A': EMITFN('@'); break;
+ default: error = True; goto out;
+ }
+ i++;
+ }
+
+ out:
+ EMITSO(0);
+ EMITFN(0);
+
+ if (error) {
+ /* Something's wrong. Give up. */
+ VG_(message)(Vg_UserMsg, "m_demangle: error Z-demangling: %s", sym);
+ return False;
+ }
+ if (oflow) {
+ /* It didn't fit. Give up. */
+ VG_(message)(Vg_UserMsg, "m_demangle: oflow Z-demangling: %s", sym);
+ return False;
+ }
+
+ return True;
}
+
/*--------------------------------------------------------------------*/
/*--- end ---*/
/*--------------------------------------------------------------------*/
/*------------------------------------------------------------*/
-/*--- The dispatch loop. ---*/
+/*--- ---*/
+/*--- The dispatch loop. VG_(run_innerloop) is used to ---*/
+/*--- run all translations except no-redir ones. ---*/
+/*--- ---*/
/*------------------------------------------------------------*/
/*----------------------------------------------------*/
popl %ebx
ret
-
+
+/*------------------------------------------------------------*/
+/*--- ---*/
+/*--- A special dispatcher, for running no-redir ---*/
+/*--- translations. Just runs the given translation once. ---*/
+/*--- ---*/
+/*------------------------------------------------------------*/
+
+/* signature:
+void VG_(run_a_noredir_translation) ( UWord* argblock );
+*/
+
+/* Run a no-redir translation. argblock points to 4 UWords, 2 to carry args
+ and 2 to carry results:
+ 0: input: ptr to translation
+ 1: input: ptr to guest state
+ 2: output: next guest PC
+ 3: output: guest state pointer afterwards (== thread return code)
+*/
+.align 16
+.global VG_(run_a_noredir_translation)
+VG_(run_a_noredir_translation):
+ /* Save callee-saves regs */
+ pushl %esi
+ pushl %edi
+ pushl %ebp
+ pushl %ebx
+
+ movl 20(%esp), %edi /* %edi = argblock */
+ movl 4(%edi), %ebp /* argblock[1] */
+ jmp *0(%edi) /* argblock[0] */
+ /*NOTREACHED*/
+ ud2
+ /* If the translation has been correctly constructed, we
+ should resume at the the following label. */
+.global VG_(run_a_noredir_translation__return_point)
+VG_(run_a_noredir_translation__return_point):
+ movl 20(%esp), %edi
+ movl %eax, 8(%edi) /* argblock[2] */
+ movl %ebp, 12(%edi) /* argblock[3] */
+
+ popl %ebx
+ popl %ebp
+ popl %edi
+ popl %esi
+ ret
+
+
/* Let the linker know we don't need an executable stack */
.section .note.GNU-stack,"",@progbits
StackTrace ips = VG_(extract_StackTrace)(p_min->where);
VG_(translate) ( 0 /* dummy ThreadId; irrelevant due to debugging*/,
ips[0], /*debugging*/True, 0xFE/*verbosity*/,
- /*bbs_done*/0);
+ /*bbs_done*/0,
+ /*allow redir?*/True);
}
p_min->count = 1 << 30;
" --trace-signals=no|yes show signal handling details? [no]\n"
" --trace-symtab=no|yes show symbol table details? [no]\n"
" --trace-cfi=no|yes show call-frame-info details? [no]\n"
+" --trace-redir=no|yes show redirection details? [no]\n"
" --trace-sched=no|yes show thread scheduler details? [no]\n"
" --wait-for-gdb=yes|no pause on startup to wait for gdb attach\n"
#if 0
score_cumul, buf_cumul,
score_here, buf_here, tops[r].addr, name );
VG_(printf)("\n");
- VG_(translate)(0, tops[r].addr, True, VG_(clo_profile_flags), 0);
+ VG_(translate)(0, tops[r].addr, True, VG_(clo_profile_flags), 0, True);
VG_(printf)("=-=-=-=-=-=-=-=-=-=-=-=-=-= end BB rank %d "
"=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n", r);
}
// p: aspacem [so can change ownership of sysinfo pages]
//--------------------------------------------------------------
VG_(debugLog)(1, "main", "Initialise redirects\n");
- VG_(setup_code_redirect_table)();
+ VG_(redir_initialise)();
//--------------------------------------------------------------
// Allow GDB attach
*/
static void final_tidyup(ThreadId tid)
{
- Addr __libc_freeres_wrapper;
+ Addr __libc_freeres_wrapper = VG_(client___libc_freeres_wrapper);
vg_assert(VG_(is_running_thread)(tid));
if ( !VG_(needs).libc_freeres ||
!VG_(clo_run_libc_freeres) ||
- 0 == (__libc_freeres_wrapper = VG_(get_libc_freeres_wrapper)()) )
+ 0 == __libc_freeres_wrapper )
return; /* can't/won't do it */
if (VG_(clo_verbosity) > 2 ||
VG_(message)(Vg_DebugMsg,
"Caught __NR_exit; running __libc_freeres()");
- /* point thread context to point to libc_freeres_wrapper */
+ /* set thread context to point to libc_freeres_wrapper */
VG_(set_IP)(tid, __libc_freeres_wrapper);
// XXX should we use a special stack?
#include "pub_core_transtab.h"
#include "pub_core_tooliface.h" // VG_(needs).malloc_replacement
#include "pub_tool_machine.h" // VG_(fnptr_to_fnentry)
+#include "pub_core_aspacemgr.h" // VG_(am_find_nsegment)
+#include "pub_core_clientstate.h" // VG_(client___libc_freeres_wrapper)
+#include "pub_core_demangle.h" // VG_(maybe_Z_demangle)
+
+
+/* This module is a critical part of the redirection/intercept system.
+ It keeps track of the current intercept state, cleans up the
+ translation caches when that state changes, and finally, answers
+ queries about the whether an address is currently redirected or
+ not. It doesn't do any of the control-flow trickery needed to put
+ the redirections into practice. That is the job of m_translate,
+ which calls here to find out which translations need to be
+ redirected.
+
+ The interface is simple. VG_(redir_initialise) initialises and
+ loads some hardwired redirects which never disappear; this is
+ platform-specific.
+
+ The module is notified of redirection state changes by m_debuginfo.
+ That calls VG_(redir_notify_new_SegInfo) when a new SegInfo (shared
+ object symbol table, basically) appears. Appearance of new symbols
+ can cause new (active) redirections to appear for two reasons: the
+ symbols in the new table may match existing redirection
+ specifications (see comments below), and because the symbols in the
+ new table may themselves supply new redirect specifications which
+ match existing symbols (or ones in the new table).
+
+ Redirect specifications are really symbols with "funny" prefixes
+ (_vgrZU_ and _vgrZZ_). These names tell m_redir that the
+ associated code should replace the standard entry point for some
+ set of functions. The set of functions is specified by a (soname
+ pattern, function name pattern) pair which is encoded in the symbol
+ name following the prefix. The names use a Z-encoding scheme so
+ that they may contain punctuation characters and wildcards (*).
+ The encoding scheme is described in pub_tool_redir.h and is decoded
+ by VG_(maybe_Z_demangle).
+
+ When a shared object is unloaded, this module learns of it via a
+ call to VG_(redir_notify_delete_SegInfo). It then removes from its
+ tables all active redirections in any way associated with that
+ object, and tidies up the translation caches accordingly.
+
+ That takes care of tracking the redirection state. When a
+ translation is actually to be made, m_translate calls to
+ VG_(redir_do_lookup) in this module to find out if the
+ translation's address should be redirected.
+*/
+
+/*------------------------------------------------------------*/
+/*--- Semantics ---*/
+/*------------------------------------------------------------*/
+
+/* The redirector holds two pieces of state:
+
+ Specs - a set of (soname pattern, fnname pattern) -> redir addr
+ Active - a set of orig addr -> (bool, redir addr)
+
+ Active is the currently active set of bindings that the translator
+ consults. Specs is the current set of specifications as harvested
+ from reading symbol tables of the currently loaded objects.
+
+ Active is a pure function of Specs and the current symbol table
+ state (maintained by m_debuginfo). Call the latter SyminfoState.
+
+ Therefore whenever either Specs or SyminfoState changes, Active
+ must be recomputed. [Inefficient if done naively, but this is a
+ spec].
+
+ Active is computed as follows:
+
+ Active = empty
+ for spec in Specs {
+ sopatt = spec.soname pattern
+ fnpatt = spec.fnname pattern
+ redir = spec.redir addr
+ for so matching sopatt in SyminfoState {
+ for fn matching fnpatt in fnnames_of(so) {
+ &fn -> redir is added to Active
+ }
+ }
+ }
+
+ [as an implementation detail, when a binding (orig -> redir) is
+ deleted from Active as a result of recomputing it, then all
+ translations intersecting redir must be deleted. However, this is
+ not part of the spec].
+
+ [Active also depends on where the aspacemgr has decided to put all
+ the pieces of code -- that affects the "orig addr" and "redir addr"
+ values.]
+
+ ---------------------
+
+ That completes the spec, apart from one difficult issue: duplicates.
+
+ Clearly we must impose the requirement that domain(Active) contains
+ no duplicates. The difficulty is how to constrain Specs enough to
+ avoid getting into that situation. It's easy to write specs which
+ could cause conflicting bindings in Active, eg:
+
+ (libpthread.so, pthread_mutex_lock) -> a1
+ (libpthread.so, pthread_*) -> a2
+
+ for a1 != a2. Or even hairier:
+
+ (libpthread.so, pthread_mutex_*) -> a1
+ (libpthread.so, pthread_*_lock) -> a2
+
+ I can't think of any sane way of detecting when an addition to
+ Specs would generate conflicts. However, considering we don't
+ actually want to have a system that allows this, I propose this:
+ all changes to Specs are acceptable. But, when recomputing Active
+ following the change, if the same orig is bound to more than one
+ redir, then the first binding for orig is retained, and all the
+ rest ignored.
+
+ ===========================================================
+ ===========================================================
+ Incremental implementation:
+
+ When a new SegInfo appears:
+ - it may be the source of new specs
+ - it may be the source of new matches for existing specs
+ Therefore:
+
+ - (new Specs x existing SegInfos): scan all symbols in the new
+ SegInfo to find new specs. Each of these needs to be compared
+ against all symbols in all the existing SegInfos to generate
+ new actives.
+
+ - (existing Specs x new SegInfo): scan all symbols in the SegInfo,
+ trying to match them to any existing specs, also generating
+ new actives.
+
+ - (new Specs x new SegInfo): scan all symbols in the new SegInfo,
+ trying to match them against the new specs, to generate new
+ actives.
+
+ - Finally, add new new specs to the current set of specs.
+
+ When adding a new active (s,d) to the Actives:
+ lookup s in Actives
+ if already bound to d, ignore
+ if already bound to something other than d, complain loudly and ignore
+ else add (s,d) to Actives
+ and discard (s,1) and (d,1) (maybe overly conservative)
+
+ When a SegInfo disappears:
+ - delete all specs acquired from the seginfo
+ - delete all actives derived from the just-deleted specs
+ - if each active (s,d) deleted, discard (s,1) and (d,1)
+*/
/*------------------------------------------------------------*/
-/*--- General purpose redirection. ---*/
+/*--- REDIRECTION SPECIFICATIONS ---*/
/*------------------------------------------------------------*/
-#define TRACE_REDIR(format, args...) \
- if (VG_(clo_trace_redir)) { VG_(message)(Vg_DebugMsg, format, ## args); }
+/* A specification of a redirection we want to do. Note that because
+ both the "from" soname and function name may contain wildcards, the
+ spec can match an arbitrary number of times. */
+typedef
+ struct _Spec {
+ struct _Spec* next; /* linked list */
+ HChar* from_sopatt; /* from soname pattern */
+ HChar* from_fnpatt; /* from fnname pattern */
+ Addr to_addr; /* where redirecting to */
+ Bool isWrap; /* wrap or replacement? */
+ Bool mark; /* transient temporary used during matching */
+ }
+ Spec;
+
+/* Top-level data structure. It contains a pointer to a SegInfo and
+ also a list of the specs harvested from that SegInfo. Note that
+ seginfo is allowed to be NULL, meaning that the specs are
+ pre-loaded ones at startup and are not associated with any
+ particular seginfo. */
+typedef
+ struct _TopSpec {
+ struct _TopSpec* next; /* linked list */
+ SegInfo* seginfo; /* symbols etc */
+ Spec* specs; /* specs pulled out of seginfo */
+ Bool mark; /* transient temporary used during deletion */
+ }
+ TopSpec;
-/*
- wraps and redirections, indexed by from_addr
+/* This is the top level list of redirections. m_debuginfo maintains
+ a list of SegInfos, and the idea here is to maintain a list with
+ the same number of elements (in fact, with one more element, so as
+ to record abovementioned preloaded specifications.) */
+static TopSpec* topSpecs = NULL;
- Redirection and wrapping are two distinct mechanisms which Valgrind
- can use to change the client's control flow.
- Redirection intercepts a call to a client function, and re-points it
- to a new piece of code (presumably functionally equivalent). The
- original code is never run.
+/*------------------------------------------------------------*/
+/*--- CURRENTLY ACTIVE REDIRECTIONS ---*/
+/*------------------------------------------------------------*/
- Wrapping does call the client's original code, but calls "before"
- and "after" functions which can inspect (and perhaps modify) the
- function's arguments and return value.
- */
-struct _CodeRedirect {
- Addr from_addr; /* old addr -- MUST BE THE FIRST WORD! */
+/* Represents a currently active binding. If either parent_spec or
+ parent_sym is NULL, then this binding was hardwired at startup and
+ should not be deleted. Same is true if either parent's seginfo
+ field is NULL. */
+typedef
+ struct {
+ Addr from_addr; /* old addr -- MUST BE THE FIRST WORD! */
+ Addr to_addr; /* where redirecting to */
+ TopSpec* parent_spec; /* the TopSpec which supplied the Spec */
+ TopSpec* parent_sym; /* the TopSpec which supplied the symbol */
+ Bool isWrap; /* wrap or replacement? */
+ }
+ Active;
- enum redir_type {
- R_REDIRECT, /* plain redirection */
- R_WRAPPER, /* wrap with valgrind-internal code */
- R_CLIENT_WRAPPER, /* wrap with client-side code */
- } type;
-
- const Char *from_lib; /* library qualifier pattern */
- const Char *from_sym; /* symbol */
+/* The active set is a fast lookup table */
+static OSet* activeSet = NULL;
- Addr to_addr; /* used for redirection -- new addr */
- const FuncWrapper *wrapper; /* used for wrapping */
- CodeRedirect *next; /* next pointer on unresolved list */
-};
+/*------------------------------------------------------------*/
+/*--- FWDses ---*/
+/*------------------------------------------------------------*/
-static OSet* resolved_redirs;
+static void maybe_add_active ( Active /*by value; callee copies*/ );
-// We use a linked list here rather than an OSet, because we want to
-// traverse it and possibly remove elements as we look at them. OSet
-// doesn't support this very well.
-static CodeRedirect *unresolved_redirs = NULL;
+static void* symtab_alloc(SizeT);
+static void symtab_free(void*);
+static HChar* symtab_strdup(HChar*);
+static Bool is_plausible_guest_addr(Addr);
-static Bool soname_matches(const Char *pattern, const Char* soname)
-{
- // pattern must start with "soname:"
- vg_assert(NULL != pattern);
- vg_assert(0 == VG_(strncmp)(pattern, "soname:", 7));
-
- if (NULL == soname)
- return False;
-
- return VG_(string_match)(pattern + 7, soname);
-}
+static void show_redir_state ( HChar* who );
+static void show_active ( HChar* left, Active* act );
-Bool VG_(is_resolved)(const CodeRedirect *redir)
-{
- return redir->from_addr != 0;
-}
+static void handle_maybe_load_notifier( HChar* symbol, Addr addr );
-// Prepends redir to the unresolved list.
-static void add_redir_to_unresolved_list(CodeRedirect *redir)
-{
- redir->next = unresolved_redirs;
- unresolved_redirs = redir;
-}
-static void add_redir_to_resolved_list(CodeRedirect *redir, Bool need_discard)
-{
- vg_assert(redir->from_addr);
-
- switch (redir->type) {
- case R_REDIRECT: {
- TRACE_REDIR(" redir resolved (%s:%s=%p -> %p)",
- redir->from_lib, redir->from_sym, redir->from_addr,
- redir->to_addr);
-
- vg_assert(redir->to_addr != 0);
-
- if (need_discard) {
- /* For some given (from, to) redir, the "from" function got
- loaded before the .so containing "to" became available so
- we need to discard any existing translations involving
- the "from" function.
-
- Note, we only really need to discard the first bb of the
- old entry point, and so we avoid the problem of having to
- figure out how big that bb was -- since it is at least 1
- byte of original code, we can just pass 1 as the original
- size to invalidate_translations() and it will indeed get
- rid of the translation.
-
- Note, this is potentially expensive -- discarding
- translations requires a complete search through all of
- them.
- */
- TRACE_REDIR("Discarding translation due to redirect of already loaded function" );
- TRACE_REDIR(" %s:%s(%p) -> %p)", redir->from_lib, redir->from_sym,
- redir->from_addr, redir->to_addr );
- VG_(discard_translations)((Addr64)redir->from_addr, 1,
- "add_redir_to_resolved_list");
- }
+/*------------------------------------------------------------*/
+/*--- NOTIFICATIONS ---*/
+/*------------------------------------------------------------*/
- // This entails a possible double OSet lookup -- one for Contains(),
- // one for Insert(). If we had OSet_InsertIfNonDup() we could do it
- // with one lookup.
- if ( ! VG_(OSet_Contains)(resolved_redirs, &redir->from_addr) ) {
- VG_(OSet_Insert)(resolved_redirs, redir);
- } else {
- TRACE_REDIR(" redir %s:%s:%p->%p duplicated\n",
- redir->from_lib, redir->from_sym, redir->from_addr,
- redir->to_addr);
- // jrs 20 Nov 05: causes this: m_mallocfree.c:170
- // (mk_plain_bszB): Assertion 'bszB != 0' failed.
- // Perhaps it is an invalid free? Disable for now
- // XXX leak?
- //VG_(arena_free)(VG_AR_SYMTAB, redir);
+static
+void generate_and_add_actives (
+ /* spec list and the owning TopSpec */
+ Spec* specs,
+ TopSpec* parent_spec,
+ /* seginfo and the owning TopSpec */
+ SegInfo* si,
+ TopSpec* parent_sym
+ );
+
+/* Notify m_redir of the arrival of a new SegInfo. This is fairly
+ complex, but the net effect is to (1) add a new entry to the
+ topspecs list, and (2) figure out what new binding are now active,
+ and, as a result, add them to the actives mapping. */
+
+#define N_DEMANGLED 256
+
+void VG_(redir_notify_new_SegInfo)( SegInfo* newsi )
+{
+ Bool ok, isWrap;
+ Int i, nsyms;
+ Spec* specList;
+ Spec* spec;
+ TopSpec* ts;
+ TopSpec* newts;
+ HChar* sym_name;
+ Addr sym_addr;
+ HChar demangled_sopatt[N_DEMANGLED];
+ HChar demangled_fnpatt[N_DEMANGLED];
+
+ vg_assert(newsi);
+ vg_assert(VG_(seginfo_soname)(newsi) != NULL);
+
+ /* stay sane: we don't already have this. */
+ for (ts = topSpecs; ts; ts = ts->next)
+ vg_assert(ts->seginfo != newsi);
+
+ /* scan this SegInfo's symbol table, pulling out and demangling
+ any specs found */
+
+ specList = NULL; /* the spec list we're building up */
+
+ nsyms = VG_(seginfo_syms_howmany)( newsi );
+ for (i = 0; i < nsyms; i++) {
+ VG_(seginfo_syms_getidx)( newsi, i, &sym_addr, NULL, &sym_name );
+ ok = VG_(maybe_Z_demangle)( sym_name, demangled_sopatt, N_DEMANGLED,
+ demangled_fnpatt, N_DEMANGLED, &isWrap );
+ if (!ok) {
+ /* It's not a full-scale redirect, but perhaps it is a load-notify
+ fn? Let the load-notify department see it. */
+ handle_maybe_load_notifier( sym_name, sym_addr );
+ continue;
}
- break;
+ spec = symtab_alloc(sizeof(Spec));
+ vg_assert(spec);
+ spec->from_sopatt = symtab_strdup(demangled_sopatt);
+ spec->from_fnpatt = symtab_strdup(demangled_fnpatt);
+ vg_assert(spec->from_sopatt);
+ vg_assert(spec->from_fnpatt);
+ spec->to_addr = sym_addr;
+ spec->isWrap = isWrap;
+ /* check we're not adding manifestly stupid destinations */
+ vg_assert(is_plausible_guest_addr(sym_addr));
+ spec->next = specList;
+ spec->mark = False; /* not significant */
+ specList = spec;
}
- case R_WRAPPER:
- TRACE_REDIR(" wrapper resolved (%s:%s=%p -> wrapper)",
- redir->from_lib, redir->from_sym, redir->from_addr);
+ /* Ok. Now specList holds the list of specs from the SegInfo.
+ Build a new TopSpec, but don't add it to topSpecs yet. */
+ newts = symtab_alloc(sizeof(TopSpec));
+ vg_assert(newts);
+ newts->next = NULL; /* not significant */
+ newts->seginfo = newsi;
+ newts->specs = specList;
+ newts->mark = False; /* not significant */
+
+ /* We now need to augment the active set with the following partial
+ cross product:
+
+ (1) actives formed by matching the new specs in specList against
+ all symbols currently listed in topSpecs
+
+ (2) actives formed by matching the new symbols in newsi against
+ all specs currently listed in topSpecs
+
+ (3) actives formed by matching the new symbols in newsi against
+ the new specs in specList
+
+ This is necessary in order to maintain the invariant that
+ Actives contains all bindings generated by matching ALL specs in
+ topSpecs against ALL symbols in topSpecs (that is, a cross
+ product of ALL known specs against ALL known symbols).
+ */
+ /* Case (1) */
+ for (ts = topSpecs; ts; ts = ts->next) {
+ if (ts->seginfo)
+ generate_and_add_actives( specList, newts,
+ ts->seginfo, ts );
+ }
+
+ /* Case (2) */
+ for (ts = topSpecs; ts; ts = ts->next) {
+ generate_and_add_actives( ts->specs, ts,
+ newsi, newts );
+ }
- vg_assert(redir->wrapper);
+ /* Case (3) */
+ generate_and_add_actives( specList, newts,
+ newsi, newts );
- /* XXX redir leaked */
- //VG_(wrap_function)(redir->from_addr, redir->wrapper);
- break;
+ /* Finally, add the new TopSpec. */
+ newts->next = topSpecs;
+ topSpecs = newts;
- case R_CLIENT_WRAPPER:
- vg_assert(redir->wrapper);
- VG_(core_panic)("not implemented");
- break;
- }
+ if (VG_(clo_trace_redir))
+ show_redir_state("after VG_(redir_notify_new_SegInfo)");
}
-// Resolve a redir using si if possible. Returns True if it succeeded.
-static Bool resolve_redir_with_seginfo(CodeRedirect *redir, const SegInfo *si)
-{
- Bool ok;
+#undef N_DEMANGLED
- vg_assert(si != NULL);
- vg_assert(redir->from_addr == 0 );
- vg_assert(redir->from_sym != NULL);
- // Resolved if the soname matches and we find the symbol.
- ok = soname_matches(redir->from_lib, VG_(seginfo_soname)(si));
- if (ok) {
- redir->from_addr = VG_(reverse_search_one_symtab)(si, redir->from_sym);
- ok = ( redir->from_addr == 0 ? False : True );
- }
- return ok;
-}
+/* Do one element of the basic cross product: add to the active set,
+ all matches resulting from comparing all the given specs against
+ all the symbols in the given seginfo. If a conflicting binding
+ would thereby arise, don't add it, but do complain. */
-// Resolve a redir using any SegInfo if possible. This is called whenever
-// a new sym-to-addr redir is created. It covers the case where a
-// replacement function is loaded after its replacee.
-static Bool resolve_redir_with_existing_seginfos(CodeRedirect *redir)
+static
+void generate_and_add_actives (
+ /* spec list and the owning TopSpec */
+ Spec* specs,
+ TopSpec* parent_spec,
+ /* seginfo and the owning TopSpec */
+ SegInfo* si,
+ TopSpec* parent_sym
+ )
{
- const SegInfo *si;
-
- for (si = VG_(next_seginfo)(NULL);
- si != NULL;
- si = VG_(next_seginfo)(si))
- {
- if (resolve_redir_with_seginfo(redir, si))
- return True;
+ Spec* sp;
+ Bool anyMark;
+ Active act;
+ Int nsyms, i;
+ Addr sym_addr;
+ HChar* sym_name;
+
+ /* First figure out which of the specs match the seginfo's
+ soname. */
+ anyMark = False;
+ for (sp = specs; sp; sp = sp->next) {
+ sp->mark = VG_(string_match)( sp->from_sopatt,
+ VG_(seginfo_soname)(si) );
+ anyMark = anyMark || sp->mark;
+ }
+
+ /* shortcut: if none of the sonames match, there will be no bindings. */
+ if (!anyMark)
+ return;
+
+ /* Iterate outermost over the symbols in the seginfo, in the hope
+ of trashing the caches less. */
+ nsyms = VG_(seginfo_syms_howmany)( si );
+ for (i = 0; i < nsyms; i++) {
+ VG_(seginfo_syms_getidx)( si, i, &sym_addr, NULL, &sym_name );
+ for (sp = specs; sp; sp = sp->next) {
+ if (!sp->mark)
+ continue; /* soname doesn't match */
+ if (VG_(string_match)( sp->from_fnpatt, sym_name )) {
+ /* got a new binding. Add to collection. */
+ act.from_addr = sym_addr;
+ act.to_addr = sp->to_addr;
+ act.parent_spec = parent_spec;
+ act.parent_sym = parent_sym;
+ act.isWrap = sp->isWrap;
+ maybe_add_active( act );
+ }
+ }
}
- return False;
}
-// Resolve as many unresolved redirs as possible with this SegInfo. This
-// should be called when a new SegInfo symtab is loaded. It covers the case
-// where a replacee function is loaded after its replacement function.
-void VG_(resolve_existing_redirs_with_seginfo)(SegInfo *si)
+
+/* Add an act (passed by value; is copied here) and deal with
+ conflicting bindings. */
+static void maybe_add_active ( Active act )
{
- CodeRedirect **prevp = &unresolved_redirs;
- CodeRedirect *redir, *next;
-
- TRACE_REDIR("Just loaded %s (soname=%s),",
- VG_(seginfo_filename)(si), VG_(seginfo_soname)(si));
- TRACE_REDIR(" resolving any unresolved redirs with it");
-
- // Visit each unresolved redir - if it becomes resolved, then
- // move it from the unresolved list to the resolved list.
- for (redir = unresolved_redirs; redir != NULL; redir = next) {
- next = redir->next;
-
- if (resolve_redir_with_seginfo(redir, si)) {
- *prevp = next;
- redir->next = NULL;
- add_redir_to_resolved_list(redir, False);
- } else
- prevp = &redir->next;
+ HChar* what = NULL;
+ Active* old;
+
+ /* Complain and ignore manifestly bogus 'from' addresses.
+
+ Kludge: because this can get called befor the trampoline area (a
+ bunch of magic 'to' addresses) has its ownership changed from V
+ to C, we can't check the 'to' address similarly. Sigh.
+
+ amd64-linux hack: the vsysinfo pages appear to have no
+ permissions
+ ffffffffff600000-ffffffffffe00000 ---p 00000000 00:00 0
+ so skip the check for them. */
+ if (!is_plausible_guest_addr(act.from_addr)
+# if defined(VGP_amd64_linux)
+ && act.from_addr != 0xFFFFFFFFFF600000ULL
+ && act.from_addr != 0xFFFFFFFFFF600400ULL
+# endif
+ ) {
+ what = "redirection from-address is in non-executable area";
+ goto bad;
}
- TRACE_REDIR(" Finished resolving");
+ old = VG_(OSet_Lookup)( activeSet, &act.from_addr );
+ if (old) {
+ /* Dodgy. Conflicting binding. */
+ vg_assert(old->from_addr == act.from_addr);
+ if (old->to_addr != act.to_addr) {
+ /* we have to ignore it -- otherwise activeSet would contain
+ conflicting bindings. */
+ what = "new redirection conflicts with existing -- ignoring it";
+ goto bad;
+ } else {
+ /* This appears to be a duplicate of an existing binding.
+ Safe(ish) -- ignore. */
+ /* XXXXXXXXXXX COMPLAIN if new and old parents differ */
+ }
+ } else {
+ Active* a = VG_(OSet_AllocNode)(activeSet, sizeof(Active));
+ vg_assert(a);
+ *a = act;
+ VG_(OSet_Insert)(activeSet, a);
+ /* Now that a new from->to redirection is in force, we need to
+ get rid of any translations intersecting 'from' in order that
+ they get redirected to 'to'. So discard them. Just for
+ paranoia (but, I believe, unnecessarily), discard 'to' as
+ well. */
+ VG_(discard_translations)( (Addr64)act.from_addr, 1,
+ "redir_new_SegInfo(from_addr)");
+ VG_(discard_translations)( (Addr64)act.to_addr, 1,
+ "redir_new_SegInfo(to_addr)");
+ }
+ return;
+
+ bad:
+ vg_assert(what);
+ if (VG_(clo_verbosity) > 1) {
+ VG_(message)(Vg_UserMsg, "WARNING: %s", what);
+ show_active( " new: ", &act);
+ }
}
-/* Redirect a function at from_addr to a function at to_addr */
-__attribute__((unused)) // It is used, but not on all platforms...
-static void add_redirect_addr_to_addr( Addr from_addr, Addr to_addr )
-{
- CodeRedirect* redir = VG_(OSet_AllocNode)(resolved_redirs,
- sizeof(CodeRedirect));
- vg_assert(0 != from_addr && 0 != to_addr);
- redir->type = R_REDIRECT;
+/* Notify m_redir of the deletion of a SegInfo. This is relatively
+ simple -- just get rid of all actives derived from it, and free up
+ the associated list elements. */
- redir->from_lib = NULL;
- redir->from_sym = NULL;
- redir->from_addr = from_addr;
+void VG_(redir_notify_delete_SegInfo)( SegInfo* delsi )
+{
+ TopSpec* ts;
+ TopSpec* tsPrev;
+ Spec* sp;
+ Spec* sp_next;
+ OSet* tmpSet;
+ Active* act;
+ Bool delMe;
+ Addr* addrP;
+
+ vg_assert(delsi);
+
+ /* Search for it, and make tsPrev point to the previous entry, if
+ any. */
+ tsPrev = NULL;
+ ts = topSpecs;
+ while (True) {
+ if (ts == NULL) break;
+ if (ts->seginfo == delsi) break;
+ tsPrev = ts;
+ ts = ts->next;
+ }
- redir->to_addr = to_addr;
- redir->wrapper = 0;
+ vg_assert(ts); /* else we don't have the deleted SegInfo */
+ vg_assert(ts->seginfo == delsi);
+
+ /* Traverse the actives, copying the addresses of those we intend
+ to delete into tmpSet. */
+ tmpSet = VG_(OSet_Create)( 0/*keyOff*/, NULL/*fastCmp*/,
+ symtab_alloc, symtab_free);
+
+ ts->mark = True;
+
+ VG_(OSet_ResetIter)( activeSet );
+ while ( (act = VG_(OSet_Next)(activeSet)) ) {
+ delMe = act->parent_spec != NULL
+ && act->parent_sym != NULL
+ && act->parent_spec->seginfo != NULL
+ && act->parent_sym->seginfo != NULL
+ && (act->parent_spec->mark || act->parent_sym->mark);
+
+ /* While we're at it, a bit of paranoia: delete any actives
+ which don't have both feet in valid client executable
+ areas. */
+ if (!delMe) {
+ if (!is_plausible_guest_addr(act->from_addr)) delMe = True;
+ if (!is_plausible_guest_addr(act->to_addr)) delMe = True;
+ }
- TRACE_REDIR("REDIRECT addr to addr: %p to %p", from_addr, to_addr);
+ if (delMe) {
+ addrP = VG_(OSet_AllocNode)( tmpSet, sizeof(Addr) );
+ *addrP = act->from_addr;
+ VG_(OSet_Insert)( tmpSet, addrP );
+ /* While we have our hands on both the 'from' and 'to'
+ of this Active, do paranoid stuff with tt/tc. */
+ VG_(discard_translations)( (Addr64)act->from_addr, 1,
+ "redir_del_SegInfo(from_addr)");
+ VG_(discard_translations)( (Addr64)act->to_addr, 1,
+ "redir_del_SegInfo(to_addr)");
+ }
+ }
- // This redirection is already resolved, put it straight in the list.
- add_redir_to_resolved_list(redir, True);
-}
+ /* Now traverse tmpSet, deleting corresponding elements in
+ activeSet. */
+ VG_(OSet_ResetIter)( tmpSet );
+ while ( (addrP = VG_(OSet_Next)(tmpSet)) ) {
+ act = VG_(OSet_Remove)( activeSet, addrP );
+ vg_assert(act);
+ VG_(OSet_FreeNode)( activeSet, act );
+ }
-/* Redirect a lib/symbol reference to a function at addr */
-static void add_redirect_sym_to_addr(
- const Char *from_lib, const Char *from_sym, Addr to_addr
-)
-{
- CodeRedirect* redir = VG_(OSet_AllocNode)(resolved_redirs,
- sizeof(CodeRedirect));
- vg_assert(from_lib && from_sym && 0 != to_addr);
-
- redir->type = R_REDIRECT;
- redir->from_lib = VG_(arena_strdup)(VG_AR_SYMTAB, from_lib);
- redir->from_sym = VG_(arena_strdup)(VG_AR_SYMTAB, from_sym);
- redir->from_addr = 0;
- redir->to_addr = to_addr;
- redir->wrapper = 0;
-
- TRACE_REDIR("REDIR sym to addr: %s:%s to %p", from_lib, from_sym, to_addr);
-
- // Check against all existing segments to see if this redirection
- // can be resolved immediately (as will be the case when the replacement
- // function is loaded after the replacee). Then add it to the
- // appropriate list.
- if (resolve_redir_with_existing_seginfos(redir)) {
- add_redir_to_resolved_list(redir, True);
- } else {
- add_redir_to_unresolved_list(redir);
+ VG_(OSet_Destroy)( tmpSet, NULL );
+
+ /* The Actives set is now cleaned up. Free up this TopSpec and
+ everything hanging off it. */
+ for (sp = ts->specs; sp; sp = sp_next) {
+ if (sp->from_sopatt) symtab_free(sp->from_sopatt);
+ if (sp->from_fnpatt) symtab_free(sp->from_fnpatt);
+ sp_next = sp->next;
+ symtab_free(sp);
}
-}
-CodeRedirect *VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
- const FuncWrapper *wrapper)
-{
- CodeRedirect* redir = VG_(OSet_AllocNode)(resolved_redirs,
- sizeof(CodeRedirect));
- redir->type = R_WRAPPER;
- redir->from_lib = VG_(arena_strdup)(VG_AR_SYMTAB, from_lib);
- redir->from_sym = VG_(arena_strdup)(VG_AR_SYMTAB, from_sym);
- redir->from_addr = 0;
- redir->to_addr = 0;
- redir->wrapper = wrapper;
-
- TRACE_REDIR("REDIR sym to wrapper: %s:%s to (%p,%p)",
- from_lib, from_sym, wrapper->before, wrapper->after);
-
- // Check against all existing segments to see if this redirection
- // can be resolved immediately. Then add it to the appropriate list.
- if (resolve_redir_with_existing_seginfos(redir)) {
- add_redir_to_resolved_list(redir, True);
+ if (tsPrev == NULL) {
+ /* first in list */
+ topSpecs = ts->next;
} else {
- add_redir_to_unresolved_list(redir);
+ tsPrev->next = ts->next;
}
+ symtab_free(ts);
- return redir;
+ if (VG_(clo_trace_redir))
+ show_redir_state("after VG_(redir_notify_delete_SegInfo)");
}
-/* If address 'a' is being redirected, return the redirected-to
- address. */
-Addr VG_(code_redirect)(Addr a)
+
+/*------------------------------------------------------------*/
+/*--- QUERIES (really the whole point of this module) ---*/
+/*------------------------------------------------------------*/
+
+/* This is the crucial redirection function. It answers the question:
+ should this code address be redirected somewhere else? It's used
+ just before translating a basic block. */
+Addr VG_(redir_do_lookup) ( Addr orig, Bool* isWrap )
{
- CodeRedirect* r = VG_(OSet_Lookup)(resolved_redirs, &a);
+ Active* r = VG_(OSet_Lookup)(activeSet, &orig);
if (r == NULL)
- return a;
+ return orig;
vg_assert(r->to_addr != 0);
-
+ if (isWrap)
+ *isWrap = r->isWrap;
return r->to_addr;
}
-static void* symtab_alloc(SizeT n)
+
+/*------------------------------------------------------------*/
+/*--- INITIALISATION ---*/
+/*------------------------------------------------------------*/
+
+/* Add a never-delete-me Active. */
+
+__attribute__((unused)) /* only used on amd64 */
+static void add_hardwired_active ( Addr from, Addr to )
{
- return VG_(arena_malloc)(VG_AR_SYMTAB, n);
+ Active act;
+ act.from_addr = from;
+ act.to_addr = to;
+ act.parent_spec = NULL;
+ act.parent_sym = NULL;
+ act.isWrap = False;
+ maybe_add_active( act );
}
-static void symtab_free(void* p)
+
+/* Add a never-delete-me Spec. This is a bit of a kludge. On the
+ assumption that this is called only at startup, only handle the
+ case where topSpecs is completely empty, or if it isn't, it has
+ just one entry and that is the one with NULL seginfo -- that is the
+ entry that holds these initial specs. */
+
+__attribute__((unused)) /* not used on all platforms */
+static void add_hardwired_spec ( HChar* sopatt, HChar* fnpatt, Addr to_addr )
{
- return VG_(arena_free)(VG_AR_SYMTAB, p);
+ Spec* spec = symtab_alloc(sizeof(Spec));
+ vg_assert(spec);
+
+ if (topSpecs == NULL) {
+ topSpecs = symtab_alloc(sizeof(TopSpec));
+ vg_assert(topSpecs);
+ topSpecs->next = NULL;
+ topSpecs->seginfo = NULL;
+ topSpecs->specs = NULL;
+ topSpecs->mark = False;
+ }
+
+ vg_assert(topSpecs != NULL);
+ vg_assert(topSpecs->next == NULL);
+ vg_assert(topSpecs->seginfo == NULL);
+
+ spec->from_sopatt = sopatt;
+ spec->from_fnpatt = fnpatt;
+ spec->to_addr = to_addr;
+ spec->isWrap = False;
+ spec->mark = False; /* not significant */
+
+ spec->next = topSpecs->specs;
+ topSpecs->specs = spec;
}
-void VG_(setup_code_redirect_table) ( void )
+
+/* Initialise the redir system, and create the initial Spec list and
+ for amd64-linux a couple of permanent active mappings. The initial
+ Specs are not converted into Actives yet, on the (checked)
+ assumption that no SegInfos have so far been created, and so when
+ they are created, that will happen. */
+
+void VG_(redir_initialise) ( void )
{
- // Initialise resolved_redirs list.
- resolved_redirs = VG_(OSet_Create)(offsetof(CodeRedirect, from_addr),
- NULL, // Use fast comparison
- symtab_alloc,
- symtab_free);
+ // Assert that there are no SegInfos so far
+ vg_assert( VG_(next_seginfo)(NULL) == NULL );
+
+ // Initialise active mapping.
+ activeSet = VG_(OSet_Create)(offsetof(Active, from_addr),
+ NULL, // Use fast comparison
+ symtab_alloc,
+ symtab_free);
-#if defined(VGP_x86_linux)
+ // The rest of this function just adds initial Specs.
+
+# if defined(VGP_x86_linux)
/* Redirect _dl_sysinfo_int80, which is glibc's default system call
routine, to our copy so that the special sysinfo unwind hack in
- m_stacktrace.c will kick in. */
- add_redirect_sym_to_addr(
- "soname:ld-linux.so.2", "_dl_sysinfo_int80",
- (Addr)&VG_(x86_linux_REDIR_FOR__dl_sysinfo_int80)
+ m_stacktrace.c will kick in. */
+ add_hardwired_spec(
+ "ld-linux.so.2", "_dl_sysinfo_int80",
+ (Addr)&VG_(x86_linux_REDIR_FOR__dl_sysinfo_int80)
);
/* If we're using memcheck, use this intercept right from the
start, otherwise ld.so (glibc-2.3.5) makes a lot of noise. */
if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
- add_redirect_sym_to_addr(
- "soname:ld-linux.so.2", "index",
- (Addr)&VG_(x86_linux_REDIR_FOR_index)
- );
+ add_hardwired_spec(
+ "ld-linux.so.2", "index",
+ (Addr)&VG_(x86_linux_REDIR_FOR_index)
+ );
}
-#elif defined(VGP_amd64_linux)
-
+# elif defined(VGP_amd64_linux)
/* Redirect vsyscalls to local versions */
- add_redirect_addr_to_addr(
+ add_hardwired_active(
0xFFFFFFFFFF600000ULL,
(Addr)&VG_(amd64_linux_REDIR_FOR_vgettimeofday)
);
- add_redirect_addr_to_addr(
+ add_hardwired_active(
0xFFFFFFFFFF600400ULL,
(Addr)&VG_(amd64_linux_REDIR_FOR_vtime)
);
-#elif defined(VGP_ppc32_linux)
-
+# elif defined(VGP_ppc32_linux)
/* If we're using memcheck, use these intercepts right from
the start, otherwise ld.so makes a lot of noise. */
if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
-
- add_redirect_sym_to_addr(
- "soname:ld.so.1", "strlen",
- (Addr)&VG_(ppc32_linux_REDIR_FOR_strlen)
+ add_hardwired_spec(
+ "ld.so.1", "strlen",
+ (Addr)&VG_(ppc32_linux_REDIR_FOR_strlen)
);
- add_redirect_sym_to_addr(
+ add_hardwired_spec(
"soname:ld.so.1", "strcmp",
(Addr)&VG_(ppc32_linux_REDIR_FOR_strcmp)
);
-
}
-#elif defined(VGP_ppc64_linux)
-
+# elif defined(VGP_ppc64_linux)
/* If we're using memcheck, use these intercepts right from
the start, otherwise ld.so makes a lot of noise. */
if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
}
-#else
-# error Unknown platform
-#endif
-}
-
-/* Z-decode a symbol into library:func form, eg
-
- _vgi_libcZdsoZd6__ZdlPv --> libc.so.6:_ZdlPv
-
- Uses the Z-encoding scheme described in pub_core_redir.h.
- Returns True if demangle OK, False otherwise.
-*/
-static Bool Z_decode(const Char* symbol, Char* result, Int nbytes)
-{
-# define EMIT(ch) \
- do { \
- if (j >= nbytes) \
- result[j-1] = 0; \
- else \
- result[j++] = ch; \
- } while (0)
-
- Bool error = False;
- Int i, j = 0;
- Int len = VG_(strlen)(symbol);
- if (0) VG_(printf)("idm: %s\n", symbol);
-
- i = VG_REPLACE_FUNCTION_PREFIX_LEN;
-
- /* Chew though the Z-encoded soname part. */
- while (True) {
-
- if (i >= len)
- break;
+# else
+# error Unknown platform
+# endif
- if (symbol[i] == '_')
- /* We found the underscore following the Z-encoded soname.
- Just copy the rest literally. */
- break;
-
- if (symbol[i] != 'Z') {
- EMIT(symbol[i]);
- i++;
- continue;
- }
-
- /* We've got a Z-escape. Act accordingly. */
- i++;
- if (i >= len) {
- /* Hmm, Z right at the end. Something's wrong. */
- error = True;
- EMIT('Z');
- break;
- }
- switch (symbol[i]) {
- case 'a': EMIT('*'); break;
- case 'p': EMIT('+'); break;
- case 'c': EMIT(':'); break;
- case 'd': EMIT('.'); break;
- case 'u': EMIT('_'); break;
- case 'h': EMIT('-'); break;
- case 's': EMIT(' '); break;
- case 'Z': EMIT('Z'); break;
- default: error = True; EMIT('Z'); EMIT(symbol[i]); break;
- }
- i++;
- }
-
- if (error || i >= len || symbol[i] != '_') {
- /* Something's wrong. Give up. */
- VG_(message)(Vg_UserMsg, "intercept: error demangling: %s", symbol);
- EMIT(0);
- return False;
- }
+ if (VG_(clo_trace_redir))
+ show_redir_state("after VG_(redir_initialise)");
+}
- /* Copy the rest of the string verbatim. */
- i++;
- EMIT(':');
- while (True) {
- if (i >= len)
- break;
- EMIT(symbol[i]);
- i++;
- }
- EMIT(0);
- if (0) VG_(printf)("%s\n", result);
- return True;
+/*------------------------------------------------------------*/
+/*--- MISC HELPERS ---*/
+/*------------------------------------------------------------*/
-# undef EMIT
+static void* symtab_alloc(SizeT n)
+{
+ return VG_(arena_malloc)(VG_AR_SYMTAB, n);
}
-// Nb: this can change the string pointed to by 'symbol'.
-static void handle_replacement_function( Char* symbol, Addr addr )
+static void symtab_free(void* p)
{
- Bool ok;
- Int len = VG_(strlen)(symbol) + 1 - VG_REPLACE_FUNCTION_PREFIX_LEN;
- Char *lib = VG_(arena_malloc)(VG_AR_SYMTAB, len+8);
- Char *func;
-
- // Put "soname:" at the start of lib
- VG_(strcpy)(lib, "soname:");
-
- ok = Z_decode(symbol, lib+7, len);
- if (ok) {
- // lib is "soname:<libname>:<fnname>". Split the string at the 2nd ':'.
- func = lib + VG_(strlen)(lib)-1;
- while(*func != ':') func--;
- *func = '\0';
- func++; // Move past the '\0'
-
- // Now lib is "soname:<libname>" and func is "<fnname>".
- if (0) VG_(printf)("lib A%sZ, func A%sZ\n", lib, func);
- add_redirect_sym_to_addr(lib, func, addr);
-
- // Overwrite the given Z-encoded name with just the fnname.
- VG_(strcpy)(symbol, func);
- }
-
- VG_(arena_free)(VG_AR_SYMTAB, lib);
+ return VG_(arena_free)(VG_AR_SYMTAB, p);
}
-static Addr __libc_freeres_wrapper = 0;
+static HChar* symtab_strdup(HChar* str)
+{
+ return VG_(arena_strdup)(VG_AR_SYMTAB, str);
+}
-Addr VG_(get_libc_freeres_wrapper)(void)
+/* Really this should be merged with translations_allowable_from_seg
+ in m_translate. */
+static Bool is_plausible_guest_addr(Addr a)
{
- return __libc_freeres_wrapper;
+ NSegment* seg = VG_(am_find_nsegment)(a);
+ return seg != NULL
+ && (seg->kind == SkAnonC || seg->kind == SkFileC)
+ && (seg->hasX || seg->hasR); /* crude x86-specific hack */
}
-// This is specifically for stringifying VG_(x) function names. We
-// need to do two macroexpansions to get the VG_ macro expanded before
-// stringifying.
-#define _STR(x) #x
-#define STR(x) _STR(x)
-static void handle_load_notifier( Char* symbol, Addr addr )
+/*------------------------------------------------------------*/
+/*--- NOTIFY-ON-LOAD FUNCTIONS ---*/
+/*------------------------------------------------------------*/
+
+static void handle_maybe_load_notifier( HChar* symbol, Addr addr )
{
- if (VG_(strcmp)(symbol, STR(VG_NOTIFY_ON_LOAD(freeres))) == 0)
- __libc_freeres_wrapper = addr;
-// else if (VG_(strcmp)(symbol, STR(VG_WRAPPER(pthread_startfunc_wrapper))) == 0)
-// VG_(pthread_startfunc_wrapper)((Addr)(si->offset + sym->st_value));
+ if (0 != VG_(strncmp)(symbol, VG_NOTIFY_ON_LOAD_PREFIX,
+ VG_NOTIFY_ON_LOAD_PREFIX_LEN))
+ /* Doesn't have the right prefix */
+ return;
+
+ if (VG_(strcmp)(symbol, VG_STRINGIFY(VG_NOTIFY_ON_LOAD(freeres))) == 0)
+ VG_(client___libc_freeres_wrapper) = addr;
+// else
+// if (VG_(strcmp)(symbol, STR(VG_WRAPPER(pthread_startfunc_wrapper))) == 0)
+// VG_(pthread_startfunc_wrapper)((Addr)(si->offset + sym->st_value));
else
vg_assert2(0, "unrecognised load notification function: %s", symbol);
}
-static Bool is_replacement_function(Char* s)
-{
- return (0 == VG_(strncmp)(s,
- VG_REPLACE_FUNCTION_PREFIX,
- VG_REPLACE_FUNCTION_PREFIX_LEN));
-}
-static Bool is_load_notifier(Char* s)
+/*------------------------------------------------------------*/
+/*--- SANITY/DEBUG ---*/
+/*------------------------------------------------------------*/
+
+static void show_spec ( HChar* left, Spec* spec )
{
- return (0 == VG_(strncmp)(s,
- VG_NOTIFY_ON_LOAD_PREFIX,
- VG_NOTIFY_ON_LOAD_PREFIX_LEN));
+ VG_(message)(Vg_DebugMsg,
+ "%s%18s %30s %s-> 0x%08llx",
+ left,
+ spec->from_sopatt, spec->from_fnpatt,
+ spec->isWrap ? "W" : "R",
+ (ULong)spec->to_addr );
}
-// Call this for each symbol loaded. It determines if we need to do
-// anything special with it. It can modify 'symbol' in-place.
-void VG_(maybe_redir_or_notify) ( Char* symbol, Addr addr )
+static void show_active ( HChar* left, Active* act )
{
- if (is_replacement_function(symbol))
- handle_replacement_function(symbol, addr);
- else
- if (is_load_notifier(symbol))
- handle_load_notifier(symbol, addr);
+ Bool ok;
+ HChar name1[64] = "";
+ HChar name2[64] = "";
+ name1[0] = name2[0] = 0;
+ ok = VG_(get_fnname_w_offset)(act->from_addr, name1, 64);
+ if (!ok) VG_(strcpy)(name1, "???");
+ ok = VG_(get_fnname_w_offset)(act->to_addr, name2, 64);
+ if (!ok) VG_(strcpy)(name2, "???");
+
+ VG_(message)(Vg_DebugMsg, "%s0x%08llx (%10s) %s-> 0x%08llx %s",
+ left,
+ (ULong)act->from_addr, name1,
+ act->isWrap ? "W" : "R",
+ (ULong)act->to_addr, name2 );
}
+static void show_redir_state ( HChar* who )
+{
+ TopSpec* ts;
+ Spec* sp;
+ Active* act;
+ VG_(message)(Vg_DebugMsg, "<<");
+ VG_(message)(Vg_DebugMsg, " ------ REDIR STATE %s ------", who);
+ for (ts = topSpecs; ts; ts = ts->next) {
+ VG_(message)(Vg_DebugMsg,
+ " TOPSPECS of soname %s",
+ ts->seginfo ? (HChar*)VG_(seginfo_soname)(ts->seginfo)
+ : "(hardwired)" );
+ for (sp = ts->specs; sp; sp = sp->next)
+ show_spec(" ", sp);
+ }
+ VG_(message)(Vg_DebugMsg, " ------ ACTIVE ------");
+ VG_(OSet_ResetIter)( activeSet );
+ while ( (act = VG_(OSet_Next)(activeSet)) ) {
+ show_active(" ", act);
+ }
-//:: /*------------------------------------------------------------*/
-//:: /*--- General function wrapping. ---*/
-//:: /*------------------------------------------------------------*/
-//::
-//:: /*
-//:: TODO:
-//:: - hook into the symtab machinery
-//:: - client-side wrappers?
-//:: - better interfaces for before() functions to get to arguments
-//:: - handle munmap of code (dlclose())
-//:: - handle thread exit
-//:: - handle longjmp
-//:: */
-//:: struct callkey {
-//:: ThreadId tid; /* calling thread */
-//:: Addr esp; /* address of args on stack */
-//:: Addr eip; /* return address */
-//:: };
-//::
-//:: struct call_instance {
-//:: struct callkey key;
-//::
-//:: const FuncWrapper *wrapper;
-//:: void *nonce;
-//:: };
-//::
-//:: static inline Addr addrcmp(Addr a, Addr b)
-//:: {
-//:: if (a < b)
-//:: return -1;
-//:: else if (a > b)
-//:: return 1;
-//:: else
-//:: return 0;
-//:: }
-//::
-//:: static inline Int cmp(UInt a, UInt b)
-//:: {
-//:: if (a < b)
-//:: return -1;
-//:: else if (a > b)
-//:: return 1;
-//:: else
-//:: return 0;
-//:: }
-//::
-//:: static Int keycmp(const void *pa, const void *pb)
-//:: {
-//:: const struct callkey *a = (const struct callkey *)pa;
-//:: const struct callkey *b = (const struct callkey *)pb;
-//:: Int ret;
-//::
-//:: if ((ret = cmp(a->tid, b->tid)))
-//:: return ret;
-//::
-//:: if ((ret = addrcmp(a->esp, b->esp)))
-//:: return ret;
-//::
-//:: return addrcmp(a->eip, b->eip);
-//:: }
-//::
-//:: /* List of wrapped call invocations which are currently active */
-//:: static SkipList wrapped_frames = VG_SKIPLIST_INIT(struct call_instance, key, keycmp,
-//:: NULL, VG_AR_SYMTAB);
-//::
-//:: static struct call_instance *find_call(Addr retaddr, Addr argsp, ThreadId tid)
-//:: {
-//:: struct callkey key = { tid, argsp, retaddr };
-//::
-//:: return VG_(SkipList_Find_Exact)(&wrapped_frames, &key);
-//:: }
-//::
-//:: static void wrapper_return(Addr retaddr);
-//::
-//:: /* Called from generated code via helper */
-//:: void VG_(wrap_before)(ThreadState *tst, const FuncWrapper *wrapper)
-//:: {
-//:: Addr retaddr = VG_RETADDR(tst->arch);
-//:: Addr argp = (Addr)&VG_FUNC_ARG(tst->arch, 0);
-//:: void *nonce = NULL;
-//:: Bool mf = VG_(my_fault);
-//:: VG_(my_fault) = True;
-//::
-//:: if (wrapper->before) {
-//:: va_list args = VG_VA_LIST(tst->arch);
-//:: nonce = (*wrapper->before)(args);
-//:: }
-//::
-//:: if (wrapper->after) {
-//:: /* If there's an after function, make sure it gets called */
-//:: struct call_instance *call;
-//::
-//:: call = find_call(retaddr, argp, tst->tid);
-//::
-//:: if (call != NULL) {
-//:: /* Found a stale outstanding call; clean it up and recycle
-//:: the structure */
-//:: if (call->wrapper->after)
-//:: (*call->wrapper->after)(call->nonce, RT_LONGJMP, 0);
-//:: } else {
-//:: call = VG_(SkipNode_Alloc)(&wrapped_frames);
-//::
-//:: call->key.tid = tst->tid;
-//:: call->key.esp = argp;
-//:: call->key.eip = retaddr;
-//::
-//:: VG_(SkipList_Insert)(&wrapped_frames, call);
-//::
-//:: wrapper_return(retaddr);
-//:: }
-//::
-//:: call->wrapper = wrapper;
-//:: call->nonce = nonce;
-//:: } else
-//:: vg_assert(nonce == NULL);
-//::
-//:: VG_(my_fault) = mf;
-//:: }
-//::
-//:: /* Called from generated code via helper */
-//:: void VG_(wrap_after)(ThreadState *tst)
-//:: {
-//:: Addr EIP = VG_INSTR_PTR(tst->arch); /* instruction after call */
-//:: Addr ESP = VG_STACK_PTR(tst->arch); /* pointer to args */
-//:: Word ret = VG_RETVAL(tst->arch); /* return value */
-//:: struct call_instance *call;
-//:: Bool mf = VG_(my_fault);
-//::
-//:: VG_(my_fault) = True;
-//:: call = find_call(EIP, ESP, tst->tid);
-//::
-//:: if (0)
-//:: VG_(printf)("wrap_after(%p,%p,%d) -> %p\n", EIP, ESP, tst->tid, call);
-//::
-//:: if (call != NULL) {
-//:: if (call->wrapper->after)
-//:: (*call->wrapper->after)(call->nonce, RT_RETURN, ret);
-//::
-//:: VG_(SkipList_Remove)(&wrapped_frames, &call->key);
-//:: VG_(SkipNode_Free)(&wrapped_frames, call);
-//:: }
-//:: VG_(my_fault) = mf;
-//:: }
-//::
-//::
-//:: struct wrapped_function {
-//:: Addr eip; /* eip of function entrypoint */
-//:: const FuncWrapper *wrapper;
-//:: };
-//::
-//:: struct wrapper_return {
-//:: Addr eip; /* return address */
-//:: };
-//::
-//:: /* A mapping from eip of wrapped function entrypoints to actual wrappers */
-//:: static SkipList wrapped_functions = VG_SKIPLIST_INIT(struct wrapped_function, eip, VG_(cmp_Addr),
-//:: NULL, VG_AR_SYMTAB);
-//::
-//:: /* A set of EIPs which are return addresses for wrapped functions */
-//:: static SkipList wrapper_returns = VG_SKIPLIST_INIT(struct wrapper_return, eip, VG_(cmp_Addr),
-//:: NULL, VG_AR_SYMTAB);
-//::
-//:: /* Wrap function starting at eip */
-//:: void VG_(wrap_function)(Addr eip, const FuncWrapper *wrapper)
-//:: {
-//:: struct wrapped_function *func;
-//::
-//:: if (0)
-//:: VG_(printf)("wrapping %p with (%p,%p)\n", eip, wrapper->before, wrapper->after);
-//::
-//:: func = VG_(SkipList_Find_Exact)(&wrapped_functions, &eip);
-//::
-//:: if (func == NULL) {
-//:: func = VG_(SkipNode_Alloc)(&wrapped_functions);
-//:: VG_(invalidate_translations)(eip, 1, True);
-//::
-//:: func->eip = eip;
-//:: VG_(SkipList_Insert)(&wrapped_functions, func);
-//:: }
-//::
-//:: func->wrapper = wrapper;
-//:: }
-//::
-//:: const FuncWrapper *VG_(is_wrapped)(Addr eip)
-//:: {
-//:: struct wrapped_function *func = VG_(SkipList_Find_Exact)(&wrapped_functions, &eip);
-//::
-//:: if (func)
-//:: return func->wrapper;
-//:: return NULL;
-//:: }
-//::
-//:: Bool VG_(is_wrapper_return)(Addr eip)
-//:: {
-//:: struct wrapper_return *ret = VG_(SkipList_Find_Exact)(&wrapper_returns, &eip);
-//::
-//:: return ret != NULL;
-//:: }
-//::
-//:: /* Mark eip as being the return address of a wrapper, so that the
-//:: codegen will generate the appropriate call. */
-//:: void wrapper_return(Addr eip)
-//:: {
-//:: struct wrapper_return *ret;
-//::
-//:: if (VG_(is_wrapper_return)(eip))
-//:: return;
-//::
-//:: VG_(invalidate_translations)(eip, 1, True);
-//::
-//:: ret = VG_(SkipNode_Alloc)(&wrapper_returns);
-//:: ret->eip = eip;
-//::
-//:: VG_(SkipList_Insert)(&wrapper_returns, ret);
-//:: }
+ VG_(message)(Vg_DebugMsg, ">>");
+}
/*--------------------------------------------------------------------*/
/*--- end ---*/
// VALGRIND_NON_SIMD_CALL[12]
#include "pub_core_debuginfo.h" // needed for pub_core_redir.h :(
#include "pub_core_mallocfree.h" // for VG_MIN_MALLOC_SZB, VG_AR_CLIENT
-#include "pub_core_redir.h" // for VG_REPLACE_FUNCTION
+#include "pub_core_redir.h" // for VG_REDIRECT_FUNCTION_*
#include "pub_core_replacemalloc.h"
/* Some handy Z-encoded names */
__builtin_delete, calloc, realloc, memalign, and friends.
None of these functions are called directly - they are not meant to
- be found by the dynamic linker. But ALL client calls to malloc() and
- friends wind up here eventually. They get called because vg_replace_malloc
- installs a bunch of code redirects which causes Valgrind to use these
- functions rather than the ones they're replacing.
+ be found by the dynamic linker. But ALL client calls to malloc()
+ and friends wind up here eventually. They get called because
+ vg_replace_malloc installs a bunch of code redirects which causes
+ Valgrind to use these functions rather than the ones they're
+ replacing.
*/
/* Generate a replacement for 'fnname' in object 'soname', which calls
*/
#define ALLOC_or_NULL(soname, fnname, vg_replacement) \
\
- void* VG_REPLACE_FUNCTION(soname,fnname) (SizeT n); \
- void* VG_REPLACE_FUNCTION(soname,fnname) (SizeT n) \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) (SizeT n); \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) (SizeT n) \
{ \
void* v; \
\
*/
#define ALLOC_or_BOMB(soname, fnname, vg_replacement) \
\
- void* VG_REPLACE_FUNCTION(soname,fnname) (SizeT n); \
- void* VG_REPLACE_FUNCTION(soname,fnname) (SizeT n) \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) (SizeT n); \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) (SizeT n) \
{ \
void* v; \
\
*/
#define FREE(soname, fnname, vg_replacement) \
\
- void VG_REPLACE_FUNCTION(soname,fnname) (void *p); \
- void VG_REPLACE_FUNCTION(soname,fnname) (void *p) \
+ void VG_REPLACE_FUNCTION_ZU(soname,fnname) (void *p); \
+ void VG_REPLACE_FUNCTION_ZU(soname,fnname) (void *p) \
{ \
MALLOC_TRACE(#vg_replacement "(%p)", p ); \
if (p == NULL) \
#define CALLOC(soname, fnname) \
\
- void* VG_REPLACE_FUNCTION(soname,fnname) ( SizeT nmemb, SizeT size ); \
- void* VG_REPLACE_FUNCTION(soname,fnname) ( SizeT nmemb, SizeT size ) \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( SizeT nmemb, SizeT size ); \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( SizeT nmemb, SizeT size ) \
{ \
void* v; \
\
#define REALLOC(soname, fnname) \
\
- void* VG_REPLACE_FUNCTION(soname,fnname) ( void* ptrV, SizeT new_size );\
- void* VG_REPLACE_FUNCTION(soname,fnname) ( void* ptrV, SizeT new_size ) \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( void* ptrV, SizeT new_size );\
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( void* ptrV, SizeT new_size ) \
{ \
void* v; \
\
if (ptrV == NULL) \
/* We need to call a malloc-like function; so let's use \
one which we know exists. */ \
- return VG_REPLACE_FUNCTION(libcZdsoZa,malloc) (new_size); \
+ return VG_REPLACE_FUNCTION_ZU(libcZdsoZa,malloc) (new_size); \
if (new_size <= 0) { \
- VG_REPLACE_FUNCTION(libcZdsoZa,free)(ptrV); \
+ VG_REPLACE_FUNCTION_ZU(libcZdsoZa,free)(ptrV); \
MALLOC_TRACE(" = 0"); \
return NULL; \
} \
#define MEMALIGN(soname, fnname) \
\
- void* VG_REPLACE_FUNCTION(soname,fnname) ( SizeT alignment, SizeT n ); \
- void* VG_REPLACE_FUNCTION(soname,fnname) ( SizeT alignment, SizeT n ) \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( SizeT alignment, SizeT n ); \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( SizeT alignment, SizeT n ) \
{ \
void* v; \
\
#define VALLOC(soname, fnname) \
\
- void* VG_REPLACE_FUNCTION(soname,fnname) ( SizeT size ); \
- void* VG_REPLACE_FUNCTION(soname,fnname) ( SizeT size ) \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( SizeT size ); \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( SizeT size ) \
{ \
- return VG_REPLACE_FUNCTION(libcZdsoZa,memalign)(VKI_PAGE_SIZE, size); \
+ return VG_REPLACE_FUNCTION_ZU(libcZdsoZa,memalign)(VKI_PAGE_SIZE, size); \
}
VALLOC(m_libc_dot_so_star, valloc);
#define MALLOPT(soname, fnname) \
\
- int VG_REPLACE_FUNCTION(soname, fnname) ( int cmd, int value ); \
- int VG_REPLACE_FUNCTION(soname, fnname) ( int cmd, int value ) \
+ int VG_REPLACE_FUNCTION_ZU(soname, fnname) ( int cmd, int value ); \
+ int VG_REPLACE_FUNCTION_ZU(soname, fnname) ( int cmd, int value ) \
{ \
/* In glibc-2.2.4, 1 denotes a successful return value for \
mallopt */ \
#define POSIX_MEMALIGN(soname, fnname) \
\
- int VG_REPLACE_FUNCTION(soname, fnname) ( void **memptr, SizeT alignment, SizeT size ); \
- int VG_REPLACE_FUNCTION(soname, fnname) ( void **memptr, SizeT alignment, SizeT size ) \
+ int VG_REPLACE_FUNCTION_ZU(soname, fnname) ( void **memptr, \
+ SizeT alignment, SizeT size ); \
+ int VG_REPLACE_FUNCTION_ZU(soname, fnname) ( void **memptr, \
+ SizeT alignment, SizeT size ) \
{ \
void *mem; \
\
|| (alignment & (alignment - 1)) != 0) \
return VKI_EINVAL; \
\
- mem = VG_REPLACE_FUNCTION(libcZdsoZa,memalign)(alignment, size); \
+ mem = VG_REPLACE_FUNCTION_ZU(libcZdsoZa,memalign)(alignment, size); \
\
if (mem != NULL) { \
*memptr = mem; \
#define MALLOC_USABLE_SIZE(soname, fnname) \
\
- int VG_REPLACE_FUNCTION(soname, fnname) ( void* p ); \
- int VG_REPLACE_FUNCTION(soname, fnname) ( void* p ) \
+ int VG_REPLACE_FUNCTION_ZU(soname, fnname) ( void* p ); \
+ int VG_REPLACE_FUNCTION_ZU(soname, fnname) ( void* p ) \
{ \
SizeT pszB; \
\
#define PANIC(soname, fnname) \
\
- void VG_REPLACE_FUNCTION(soname, fnname) ( void ); \
- void VG_REPLACE_FUNCTION(soname, fnname) ( void ) \
+ void VG_REPLACE_FUNCTION_ZU(soname, fnname) ( void ); \
+ void VG_REPLACE_FUNCTION_ZU(soname, fnname) ( void ) \
{ \
panic(#fnname); \
}
// doesn't know that the call to mallinfo fills in mi.
#define MALLINFO(soname, fnname) \
\
- struct vg_mallinfo VG_REPLACE_FUNCTION(soname, fnname) ( void ); \
- struct vg_mallinfo VG_REPLACE_FUNCTION(soname, fnname) ( void ) \
+ struct vg_mallinfo VG_REPLACE_FUNCTION_ZU(soname, fnname) ( void ); \
+ struct vg_mallinfo VG_REPLACE_FUNCTION_ZU(soname, fnname) ( void ) \
{ \
static struct vg_mallinfo mi; \
MALLOC_TRACE("mallinfo()"); \
init_done = 1;
- VALGRIND_MAGIC_SEQUENCE(res, -1, VG_USERREQ__GET_MALLOCFUNCS, &info,
- 0, 0, 0);
+ VALGRIND_DO_CLIENT_REQUEST(res, -1, VG_USERREQ__GET_MALLOCFUNCS, &info,
+ 0, 0, 0);
}
/*--------------------------------------------------------------------*/
give finer interleaving but much increased scheduling overheads. */
#define SCHEDULING_QUANTUM 50000
-/* If true, a fault is Valgrind-internal (ie, a bug) */
-Bool VG_(my_fault) = True;
+/* If False, a fault is Valgrind-internal (ie, a bug) */
+Bool VG_(in_generated_code) = False;
/* Counts downwards in VG_(run_innerloop). */
UInt VG_(dispatch_ctr);
}
-/* Set the standard set of blocked signals, used wheneever we're not
+/* Set the standard set of blocked signals, used whenever we're not
running a client syscall. */
static void block_signals(ThreadId tid)
{
VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, NULL);
}
-/* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal
- mask state, but does need to pass "val" through. */
-#define SCHEDSETJMP(tid, jumped, stmt) \
- do { \
- ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
- \
- (jumped) = __builtin_setjmp(_qq_tst->sched_jmpbuf); \
- if ((jumped) == 0) { \
- vg_assert(!_qq_tst->sched_jmpbuf_valid); \
- _qq_tst->sched_jmpbuf_valid = True; \
- stmt; \
- } else if (VG_(clo_trace_sched)) \
- VG_(printf)("SCHEDSETJMP(line %d) tid %d, jumped=%d\n", \
- __LINE__, tid, jumped); \
- vg_assert(_qq_tst->sched_jmpbuf_valid); \
- _qq_tst->sched_jmpbuf_valid = False; \
- } while(0)
-
-/* Run the thread tid for a while, and return a VG_TRC_* value to the
- scheduler indicating what happened. */
-static
-UInt run_thread_for_a_while ( ThreadId tid )
-{
- volatile Bool jumped;
- volatile ThreadState *tst = VG_(get_ThreadState)(tid);
-
- volatile UInt trc = 0;
- volatile Int dispatch_ctr_SAVED = VG_(dispatch_ctr);
- volatile Int done_this_time;
-
- /* For paranoia purposes only */
- volatile Addr a_vex = (Addr) & VG_(threads)[tid].arch.vex;
- volatile Addr a_vexsh = (Addr) & VG_(threads)[tid].arch.vex_shadow;
- volatile Addr a_spill = (Addr) & VG_(threads)[tid].arch.vex_spill;
- volatile UInt sz_vex = (UInt) sizeof VG_(threads)[tid].arch.vex;
- volatile UInt sz_vexsh = (UInt) sizeof VG_(threads)[tid].arch.vex_shadow;
- volatile UInt sz_spill = (UInt) sizeof VG_(threads)[tid].arch.vex_spill;
-
- /* Paranoia */
- vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(VG_(is_running_thread)(tid));
- vg_assert(!VG_(is_exiting)(tid));
-
- /* Even more paranoia. Check that what we have matches
- Vex's guest state layout requirements. */
- if (0)
- VG_(printf)("%p %d %p %d %p %d\n",
- (void*)a_vex, sz_vex, (void*)a_vexsh, sz_vexsh,
- (void*)a_spill, sz_spill );
-
- vg_assert(VG_IS_8_ALIGNED(sz_vex));
- vg_assert(VG_IS_8_ALIGNED(sz_vexsh));
- vg_assert(VG_IS_16_ALIGNED(sz_spill));
-
- vg_assert(VG_IS_4_ALIGNED(a_vex));
- vg_assert(VG_IS_4_ALIGNED(a_vexsh));
- vg_assert(VG_IS_4_ALIGNED(a_spill));
-
- vg_assert(sz_vex == sz_vexsh);
- vg_assert(a_vex + sz_vex == a_vexsh);
-
- vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
- vg_assert(a_vex + 2 * sz_vex == a_spill);
-
-# if defined(VGA_ppc32) || defined(VGA_ppc64)
- /* This is necessary due to the hacky way vex models reservations
- on ppc. It's really quite incorrect for each thread to have its
- own reservation flag/address, since it's really something that
- all threads share (that's the whole point). But having shared
- guest state is something we can't model with Vex. However, as
- per PaulM's 2.4.0ppc, the reservation is modelled using a
- reservation flag which is cleared at each context switch. So it
- is indeed possible to get away with a per thread-reservation if
- the thread's reservation is cleared before running it.
-
- This should be abstractified and lifted out.
- */
- /* Clear any existing reservation that this thread might have made
- last time it was running. */
- VG_(threads)[tid].arch.vex.guest_RESVN = 0;
-
- /* ppc guest_state vector regs must be 16byte aligned for loads/stores */
- vg_assert(VG_IS_16_ALIGNED(VG_(threads)[tid].arch.vex.guest_VR0));
- vg_assert(VG_IS_16_ALIGNED(VG_(threads)[tid].arch.vex_shadow.guest_VR0));
-# endif
-
- /* there should be no undealt-with signals */
- //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
-
- //VG_(printf)("running EIP = %p ESP=%p\n",
- //VG_(threads)[tid].arch.m_eip, VG_(threads)[tid].arch.m_esp);
-
- vg_assert(VG_(my_fault));
- VG_(my_fault) = False;
-
- SCHEDSETJMP(
- tid,
- jumped,
- trc = (UInt)VG_(run_innerloop)( (void*)&tst->arch.vex,
- VG_(clo_profile_flags) > 0 ? 1 : 0 )
- );
-
- //nextEIP = tst->arch.m_eip;
- //if (nextEIP >= VG_(client_end))
- // VG_(printf)("trc=%d jump to %p from %p\n",
- // trc, nextEIP, EIP);
-
- VG_(my_fault) = True;
-
- if (jumped) {
- /* We get here if the client took a fault, which caused our
- signal handler to longjmp. */
- vg_assert(trc == 0);
- trc = VG_TRC_FAULT_SIGNAL;
- block_signals(tid);
- }
-
- done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 0;
-
- vg_assert(done_this_time >= 0);
- bbs_done += (ULong)done_this_time;
-
- return trc;
-}
-
-
static void os_state_clear(ThreadState *tst)
{
tst->os_state.lwpid = 0;
}
/*
- Called in the child after fork. If the parent has multiple
- threads, then we've inhereted a VG_(threads) array describing them,
- but only the thread which called fork() is actually alive in the
- child. This functions needs to clean up all those other thread
- structures.
+ Called in the child after fork. If the parent has multiple
+ threads, then we've inherited a VG_(threads) array describing them,
+ but only the thread which called fork() is actually alive in the
+ child. This functions needs to clean up all those other thread
+ structures.
Whichever tid in the parent which called fork() becomes the
master_tid in the child. That's because the only living slot in
}
+/* ---------------------------------------------------------------------
+ Helpers for running translations.
+ ------------------------------------------------------------------ */
+
+/* Use gcc's built-in setjmp/longjmp. longjmp must not restore signal
+ mask state, but does need to pass "val" through. */
+#define SCHEDSETJMP(tid, jumped, stmt) \
+ do { \
+ ThreadState * volatile _qq_tst = VG_(get_ThreadState)(tid); \
+ \
+ (jumped) = __builtin_setjmp(_qq_tst->sched_jmpbuf); \
+ if ((jumped) == 0) { \
+ vg_assert(!_qq_tst->sched_jmpbuf_valid); \
+ _qq_tst->sched_jmpbuf_valid = True; \
+ stmt; \
+ } else if (VG_(clo_trace_sched)) \
+ VG_(printf)("SCHEDSETJMP(line %d) tid %d, jumped=%d\n", \
+ __LINE__, tid, jumped); \
+ vg_assert(_qq_tst->sched_jmpbuf_valid); \
+ _qq_tst->sched_jmpbuf_valid = False; \
+ } while(0)
+
+
+/* Do various guest state alignment checks prior to running a thread.
+ Specifically, check that what we have matches Vex's guest state
+ layout requirements. */
+static inline void do_pre_run_checks ( volatile ThreadState* tst )
+{
+ Addr a_vex = (Addr) & tst->arch.vex;
+ Addr a_vexsh = (Addr) & tst->arch.vex_shadow;
+ Addr a_spill = (Addr) & tst->arch.vex_spill;
+ UInt sz_vex = (UInt) sizeof tst->arch.vex;
+ UInt sz_vexsh = (UInt) sizeof tst->arch.vex_shadow;
+ UInt sz_spill = (UInt) sizeof tst->arch.vex_spill;
+
+ if (0)
+ VG_(printf)("%p %d %p %d %p %d\n",
+ (void*)a_vex, sz_vex, (void*)a_vexsh, sz_vexsh,
+ (void*)a_spill, sz_spill );
+
+ vg_assert(VG_IS_8_ALIGNED(sz_vex));
+ vg_assert(VG_IS_8_ALIGNED(sz_vexsh));
+ vg_assert(VG_IS_16_ALIGNED(sz_spill));
+
+ vg_assert(VG_IS_4_ALIGNED(a_vex));
+ vg_assert(VG_IS_4_ALIGNED(a_vexsh));
+ vg_assert(VG_IS_4_ALIGNED(a_spill));
+
+ vg_assert(sz_vex == sz_vexsh);
+ vg_assert(a_vex + sz_vex == a_vexsh);
+
+ vg_assert(sz_spill == LibVEX_N_SPILL_BYTES);
+ vg_assert(a_vex + 2 * sz_vex == a_spill);
+
+# if defined(VGA_ppc32) || defined(VGA_ppc64)
+ /* ppc guest_state vector regs must be 16 byte aligned for
+ loads/stores */
+ vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VR0));
+ vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow.guest_VR0));
+# endif
+}
+
+
+/* Run the thread tid for a while, and return a VG_TRC_* value
+ indicating why VG_(run_innerloop) stopped. */
+static UInt run_thread_for_a_while ( ThreadId tid )
+{
+ volatile Int jumped;
+ volatile ThreadState* tst;
+ volatile UInt trc;
+ volatile Int dispatch_ctr_SAVED;
+ volatile Int done_this_time;
+
+ /* Paranoia */
+ vg_assert(VG_(is_valid_tid)(tid));
+ vg_assert(VG_(is_running_thread)(tid));
+ vg_assert(!VG_(is_exiting)(tid));
+
+ tst = VG_(get_ThreadState)(tid);
+ do_pre_run_checks(tst);
+ /* end Paranoia */
+
+ trc = 0;
+ dispatch_ctr_SAVED = VG_(dispatch_ctr);
+
+# if defined(VGA_ppc32) || defined(VGA_ppc64)
+ /* This is necessary due to the hacky way vex models reservations
+ on ppc. It's really quite incorrect for each thread to have its
+ own reservation flag/address, since it's really something that
+ all threads share (that's the whole point). But having shared
+ guest state is something we can't model with Vex. However, as
+ per PaulM's 2.4.0ppc, the reservation is modelled using a
+ reservation flag which is cleared at each context switch. So it
+ is indeed possible to get away with a per thread-reservation if
+ the thread's reservation is cleared before running it.
+ */
+ /* Clear any existing reservation that this thread might have made
+ last time it was running. */
+ VG_(threads)[tid].arch.vex.guest_RESVN = 0;
+# endif
+
+ /* there should be no undealt-with signals */
+ //vg_assert(VG_(threads)[tid].siginfo.si_signo == 0);
+
+ vg_assert(VG_(in_generated_code) == False);
+ VG_(in_generated_code) = True;
+
+ SCHEDSETJMP(
+ tid,
+ jumped,
+ trc = (UInt)VG_(run_innerloop)( (void*)&tst->arch.vex,
+ VG_(clo_profile_flags) > 0 ? 1 : 0 )
+ );
+
+ VG_(in_generated_code) = False;
+
+ if (jumped) {
+ /* We get here if the client took a fault that caused our signal
+ handler to longjmp. */
+ vg_assert(trc == 0);
+ trc = VG_TRC_FAULT_SIGNAL;
+ block_signals(tid);
+ }
+
+ done_this_time = (Int)dispatch_ctr_SAVED - (Int)VG_(dispatch_ctr) - 0;
+
+ vg_assert(done_this_time >= 0);
+ bbs_done += (ULong)done_this_time;
+
+ return trc;
+}
+
+
+/* Run a no-redir translation just once, and return the resulting
+ VG_TRC_* value. */
+static UInt run_noredir_translation ( Addr hcode, ThreadId tid )
+{
+ volatile Int jumped;
+ volatile ThreadState* tst;
+ volatile UWord argblock[4];
+
+ /* Paranoia */
+ vg_assert(VG_(is_valid_tid)(tid));
+ vg_assert(VG_(is_running_thread)(tid));
+ vg_assert(!VG_(is_exiting)(tid));
+
+ tst = VG_(get_ThreadState)(tid);
+ do_pre_run_checks(tst);
+ /* end Paranoia */
+
+# if defined(VGA_ppc32) || defined(VGA_ppc64)
+ /* I don't think we need to clear this thread's guest_RESVN here,
+ because we can only get here if run_thread_for_a_while() has
+ been used immediately before, on this same thread. */
+# endif
+
+ argblock[0] = (UWord)hcode;
+ argblock[1] = (UWord)&VG_(threads)[tid].arch.vex;
+ argblock[2] = 0;
+ argblock[3] = 0;
+
+ vg_assert(VG_(in_generated_code) == False);
+ VG_(in_generated_code) = True;
+
+ SCHEDSETJMP(
+ tid,
+ jumped,
+ VG_(run_a_noredir_translation)( &argblock[0] )
+ );
+
+ VG_(in_generated_code) = False;
+
+ if (jumped) {
+ /* We get here if the client took a fault that caused our signal
+ handler to longjmp. */
+ vg_assert(argblock[3] == argblock[1]); /* iow, trc was not set */
+ block_signals(tid);
+ return VG_TRC_FAULT_SIGNAL;
+ } else {
+ /* store away the guest program counter */
+ VG_(set_IP)( tid, argblock[2] );
+ if (argblock[3] == argblock[1])
+ /* the guest state pointer afterwards was unchanged */
+ return VG_TRC_BORING;
+ else
+ return (UInt)argblock[3];
+ }
+}
+
+/* ---------------------------------------------------------------------
+ Helper stuff for managing no-redirection translations.
+ ------------------------------------------------------------------ */
+
+/* Run a translation. argblock points to 4 UWords, 2 to carry args
+ and 2 to carry results:
+ 0: input: ptr to translation
+ 1: input: ptr to guest state
+ 2: output: next guest PC
+ 3: output: guest state pointer afterwards (== thread return code)
+*/
+extern UWord run_a_translation ( UWord* argblock );
+#if defined(VGP_x86_linux)
+#elif defined(VGP_amd64_linux)
+asm("\n"
+".text\n"
+"run_a_translation:\n"
+" pushq %rbx\n"
+" pushq %rbp\n"
+" pushq %r12\n"
+" pushq %r13\n"
+" pushq %r14\n"
+" pushq %r15\n"
+
+" pushq %rdi\n" /* we will need it after running the translation */
+" movq 8(%rdi), %rbp\n"
+" call *0(%rdi)\n"
+
+" popq %rdi\n"
+" movq %rax, 16(%rdi)\n"
+" movq %rbp, 24(%rdi)\n"
+
+" popq %r15\n"
+" popq %r14\n"
+" popq %r13\n"
+" popq %r12\n"
+" popq %rbp\n"
+" popq %rbx\n"
+" ret\n"
+".previous\n"
+);
+#elif defined(VGP_ppc32_linux)
+asm("\n"
+".text\n"
+"run_a_translation:\n"
+" stwu 1,-256(1)\n"
+" stw 14,128(1)\n"
+" stw 15,132(1)\n"
+" stw 16,136(1)\n"
+" stw 17,140(1)\n"
+" stw 18,144(1)\n"
+" stw 19,148(1)\n"
+" stw 20,152(1)\n"
+" stw 21,156(1)\n"
+" stw 22,160(1)\n"
+" stw 23,164(1)\n"
+" stw 24,168(1)\n"
+" stw 25,172(1)\n"
+" stw 26,176(1)\n"
+" stw 27,180(1)\n"
+" stw 28,184(1)\n"
+" stw 29,188(1)\n"
+" stw 30,192(1)\n"
+" stw 31,196(1)\n"
+" mflr 31\n"
+" stw 31,200(1)\n"
+
+" stw 3,204(1)\n"
+" lwz 31,4(3)\n"
+" lwz 30,0(3)\n"
+" mtlr 30\n"
+" blrl\n"
+
+" lwz 4,204(1)\n"
+" stw 3, 8(4)\n"
+" stw 31,12(4)\n"
+
+" lwz 14,128(1)\n"
+" lwz 15,132(1)\n"
+" lwz 16,136(1)\n"
+" lwz 17,140(1)\n"
+" lwz 18,144(1)\n"
+" lwz 19,148(1)\n"
+" lwz 20,152(1)\n"
+" lwz 21,156(1)\n"
+" lwz 22,160(1)\n"
+" lwz 23,164(1)\n"
+" lwz 24,168(1)\n"
+" lwz 25,172(1)\n"
+" lwz 26,176(1)\n"
+" lwz 27,180(1)\n"
+" lwz 28,184(1)\n"
+" lwz 29,188(1)\n"
+" lwz 30,192(1)\n"
+" lwz 31,200(1)\n"
+" mtlr 31\n"
+" lwz 31,196(1)\n"
+" addi 1,1,256\n"
+" blr\n"
+
+".previous\n"
+);
+#else
+# error "Not implemented"
+#endif
+
+
+/* tid just requested a jump to the noredir version of its current
+ program counter. So make up that translation if needed, run it,
+ and return the resulting thread return code. */
+static UInt/*trc*/ handle_noredir_jump ( ThreadId tid )
+{
+ AddrH hcode = 0;
+ Addr ip = VG_(get_IP)(tid);
+
+ Bool found = VG_(search_unredir_transtab)( &hcode, ip );
+ if (!found) {
+ /* Not found; we need to request a translation. */
+ if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done,
+ False/*NO REDIRECTION*/ )) {
+
+ found = VG_(search_unredir_transtab)( &hcode, ip );
+ vg_assert2(found, "unredir translation missing after creation?!");
+
+ } else {
+ // If VG_(translate)() fails, it's because it had to throw a
+ // signal because the client jumped to a bad address. That
+ // means that either a signal has been set up for delivery,
+ // or the thread has been marked for termination. Either
+ // way, we just need to go back into the scheduler loop.
+ return VG_TRC_BORING;
+ }
+
+ }
+
+ vg_assert(found);
+ vg_assert(hcode != 0);
+
+ /* Otherwise run it and return the resulting VG_TRC_* value. */
+ return run_noredir_translation( hcode, tid );
+}
+
+
/* ---------------------------------------------------------------------
The scheduler proper.
------------------------------------------------------------------ */
found = VG_(search_transtab)( NULL, ip, True/*upd_fast_cache*/ );
if (!found) {
/* Not found; we need to request a translation. */
- if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/, bbs_done )) {
+ if (VG_(translate)( tid, ip, /*debug*/False, 0/*not verbose*/,
+ bbs_done, True/*allow redirection*/ )) {
found = VG_(search_transtab)( NULL, ip, True );
vg_assert2(found, "VG_TRC_INNER_FASTMISS: missing tt_fast entry");
print_sched_event(tid, buf);
}
- switch(trc) {
+ if (trc == VEX_TRC_JMP_NOREDIR) {
+ /* If we got a request to run a no-redir version of
+ something, do so now -- handle_noredir_jump just (creates
+ and) runs that one translation. The flip side is that the
+ noredir translation can't itself return another noredir
+ request -- that would be nonsensical. It can, however,
+ return VG_TRC_BORING, which just means keep going as
+ normal. */
+ trc = handle_noredir_jump(tid);
+ vg_assert(trc != VEX_TRC_JMP_NOREDIR);
+ }
+
+ switch (trc) {
+ case VG_TRC_BORING:
+ /* no special event, just keep going. */
+ break;
+
case VG_TRC_INNER_FASTMISS:
vg_assert(VG_(dispatch_ctr) > 1);
handle_tt_miss(tid);
zztid, O_CLREQ_RET, sizeof(UWord), f); \
} while (0)
+
/* ---------------------------------------------------------------------
Handle client requests.
------------------------------------------------------------------ */
VG_(message)(Vg_DebugMsg,
" -> extended stack base to %p",
VG_PGROUNDDN(fault));
- return; // extension succeeded, restart instruction
+ return; // extension succeeded, restart host (hence guest)
+ // instruction
} else
VG_(message)(Vg_UserMsg,
"Stack overflow in thread %d: can't grow stack to %p",
VG_(set_default_handler)(sigNo);
}
- if (!VG_(my_fault)) {
+ if (VG_(in_generated_code)) {
/* Can't continue; must longjmp back to the scheduler and thus
enter the sighandler immediately. */
deliver_signal(tid, info);
#include "pub_core_basics.h"
#include "pub_core_aspacemgr.h"
-#include "pub_core_machine.h" // For VG_(machine_get_VexArchInfo)
- // and VG_(get_SP)
+#include "pub_core_machine.h" // VG_(fnptr_to_fnentry)
+ // VG_(get_SP)
+ // VG_(machine_get_VexArchInfo)
#include "pub_core_libcbase.h"
#include "pub_core_libcassert.h"
#include "pub_core_libcprint.h"
#include "pub_core_options.h"
-#include "pub_core_debuginfo.h" // Needed for pub_core_redir :(
-#include "pub_core_redir.h" // For VG_(code_redirect)()
+#include "pub_core_debuginfo.h" // VG_(get_fnname_w_offset)
+#include "pub_core_redir.h" // VG_(redir_do_lookup)
+
+#include "pub_core_signals.h" // VG_(synth_fault_{perms,mapping}
+#include "pub_core_stacks.h" // VG_(unknown_SP_update)()
+#include "pub_core_tooliface.h" // VG_(tdict)
-#include "pub_core_signals.h" // For VG_(synth_fault_{perms,mapping})()
-#include "pub_core_stacks.h" // For VG_(unknown_SP_update)()
-#include "pub_core_tooliface.h" // For VG_(tdict)
#include "pub_core_translate.h"
#include "pub_core_transtab.h"
-#include "pub_core_dispatch.h" // VG_(run_innerloop__dispatch_{un}profiled)
+#include "pub_core_dispatch.h" // VG_(run_innerloop__dispatch_{un}profiled)
+ // VG_(run_a_noredir_translation__return_point)
/*------------------------------------------------------------*/
}
/* Destination is redirected? */
- if (addr != VG_(code_redirect)(addr))
+ if (addr != VG_(redir_do_lookup)(addr, NULL))
goto dontchase;
/* well, ok then. go on and chase. */
}
+/* Note: see comments at top of m_redir.c for the Big Picture on how
+ redirections are managed. */
+
Bool VG_(translate) ( ThreadId tid,
Addr64 orig_addr,
Bool debugging_translation,
Int debugging_verbosity,
- ULong bbs_done )
+ ULong bbs_done,
+ Bool allow_redirection )
{
Addr64 redir, orig_addr_noredir = orig_addr;
Int tmpbuf_used, verbosity, i;
Bool notrace_until_done, do_self_check;
+ Bool did_redirect, isWrap;
UInt notrace_until_limit = 0;
NSegment* seg;
VexArch vex_arch;
/* Look in the code redirect table to see if we should
translate an alternative address for orig_addr. */
- redir = VG_(code_redirect)(orig_addr);
+ isWrap = False;
+ if (allow_redirection) {
+ redir = VG_(redir_do_lookup)(orig_addr, &isWrap);
+ did_redirect = redir != orig_addr;
+ } else {
+ redir = orig_addr;
+ did_redirect = False;
+ }
+
+ if (did_redirect == False) vg_assert(isWrap == False);
- if (redir != orig_addr && VG_(clo_verbosity) >= 2) {
+ if (redir != orig_addr
+ && (VG_(clo_verbosity) >= 2 || VG_(clo_trace_redir))) {
Bool ok;
Char name1[64] = "";
Char name2[64] = "";
/* Set up closure arg for "chase_into_ok" */
chase_into_ok__CLOSURE_tid = tid;
+ /* Set up args for LibVEX_Translate. */
vta.arch_guest = vex_arch;
vta.archinfo_guest = vex_archinfo;
vta.arch_host = vex_arch;
? vg_SP_update_pass
: NULL;
vta.do_self_check = do_self_check;
+ /* If this translation started at a redirected address, then we
+ need to ask the JIT to generate code to put the non-redirected
+ guest address into guest_NRADDR. */
+ vta.do_set_NRADDR = isWrap;
vta.traceflags = verbosity;
/* Set up the dispatch-return info. For archs without a link
register, vex generates a jump back to the specified dispatch
address. Else, it just generates a branch-to-LR. */
# if defined(VGA_x86) || defined(VGA_amd64)
- vta.dispatch = VG_(clo_profile_flags) > 0
- ? (void*) &VG_(run_innerloop__dispatch_profiled)
- : (void*) &VG_(run_innerloop__dispatch_unprofiled);
+ vta.dispatch
+ = (!allow_redirection)
+ ? /* It's a no-redir translation. Will be run with the nonstandard
+ dispatcher VG_(run_a_noredir_translation)
+ and so needs a nonstandard return point. */
+ (void*) &VG_(run_a_noredir_translation__return_point)
+
+ : /* normal translation. Uses VG_(run_innerloop). Return
+ point depends on whether we're profiling bbs or not. */
+ VG_(clo_profile_flags) > 0
+ ? (void*) &VG_(run_innerloop__dispatch_profiled)
+ : (void*) &VG_(run_innerloop__dispatch_unprofiled);
# elif defined(VGA_ppc32) || defined(VGA_ppc64)
vta.dispatch = NULL;
# else
// If debugging, don't do anything with the translated block; we
// only did this for the debugging output produced along the way.
if (!debugging_translation) {
- // Note that we use orig_addr_noredir, not orig_addr, which
- // might have been changed by the redirection
- VG_(add_to_transtab)( &vge,
- orig_addr_noredir,
- (Addr)(&tmpbuf[0]),
- tmpbuf_used,
- do_self_check );
+
+ if (allow_redirection) {
+ // Put it into the normal TT/TC structures. This is the
+ // normal case.
+
+ // Note that we use orig_addr_noredir, not orig_addr, which
+ // might have been changed by the redirection
+ VG_(add_to_transtab)( &vge,
+ orig_addr_noredir,
+ (Addr)(&tmpbuf[0]),
+ tmpbuf_used,
+ do_self_check );
+ } else {
+ VG_(add_to_unredir_transtab)( &vge,
+ orig_addr_noredir,
+ (Addr)(&tmpbuf[0]),
+ tmpbuf_used,
+ do_self_check );
+ }
}
return True;
/*--------------------------------------------------------------------*/
/*--- end ---*/
/*--------------------------------------------------------------------*/
-
/* Sanity check absolutely everything. True == check passed. */
+/* forward */
+static Bool sanity_check_redir_tt_tc ( void );
+
static Bool sanity_check_all_sectors ( void )
{
Int sno;
if (!sane)
return False;
}
+ if (!sanity_check_redir_tt_tc() )
+ return False;
return True;
}
/*--- Delete translations. ---*/
/*-------------------------------------------------------------*/
+/* forward */
+static void unredir_discard_translations( Addr64, ULong );
+
/* Stuff for deleting translations which intersect with a given
address range. Unfortunately, to make this run at a reasonable
speed, it is complex. */
if (anyDeleted)
invalidateFastCache();
+ /* don't forget the no-redir cache */
+ unredir_discard_translations( guest_start, range );
+
/* Post-deletion sanity check */
if (VG_(clo_sanity_level >= 4)) {
Int i;
}
+/*------------------------------------------------------------*/
+/*--- AUXILIARY: the unredirected TT/TC ---*/
+/*------------------------------------------------------------*/
+
+/* A very simple translation cache which holds a small number of
+ unredirected translations. This is completely independent of the
+ main tt/tc structures. When unredir_tc or unredir_tt becomes full,
+ both structures are simply dumped and we start over.
+
+ Since these translations are unredirected, the search key is (by
+ definition) the first address entry in the .vge field. */
+
+/* Sized to hold 500 translations of average size 1000 bytes. */
+
+#define UNREDIR_SZB 1000
+
+#define N_UNREDIR_TT 500
+#define N_UNREDIR_TCQ (N_UNREDIR_TT * UNREDIR_SZB / sizeof(ULong))
+
+typedef
+ struct {
+ VexGuestExtents vge;
+ Addr hcode;
+ Bool inUse;
+ }
+ UTCEntry;
+
+/* We just allocate forwards in _tc, never deleting. */
+static ULong unredir_tc[N_UNREDIR_TCQ] __attribute__((aligned(8)));
+static Int unredir_tc_used;
+
+/* Slots in _tt can come into use and out again (.inUse).
+ Nevertheless _tt_highwater is maintained so that invalidations
+ don't have to scan all the slots when only a few are in use.
+ _tt_highwater holds the index of the highest ever allocated
+ slot. */
+static UTCEntry unredir_tt[N_UNREDIR_TT];
+static Int unredir_tt_highwater;
+
+
+static void init_unredir_tt_tc ( void )
+{
+ Int i;
+ unredir_tc_used = 0;
+ for (i = 0; i < N_UNREDIR_TT; i++)
+ unredir_tt[i].inUse = False;
+ unredir_tt_highwater = -1;
+}
+
+/* Do a sanity check; return False on failure. */
+static Bool sanity_check_redir_tt_tc ( void )
+{
+ Int i;
+ if (unredir_tt_highwater < -1) return False;
+ if (unredir_tt_highwater >= N_UNREDIR_TT) return False;
+
+ for (i = unredir_tt_highwater+1; i < N_UNREDIR_TT; i++)
+ if (unredir_tt[i].inUse)
+ return False;
+
+ if (unredir_tc_used < 0) return False;
+ if (unredir_tc_used > N_UNREDIR_TCQ) return False;
+
+ return True;
+}
+
+
+/* Add an UNREDIRECTED translation of vge to TT/TC. The translation
+ is temporarily in code[0 .. code_len-1].
+*/
+void VG_(add_to_unredir_transtab)( VexGuestExtents* vge,
+ Addr64 entry,
+ AddrH code,
+ UInt code_len,
+ Bool is_self_checking )
+{
+ Int i, j, code_szQ;
+ HChar *srcP, *dstP;
+
+ vg_assert(sanity_check_redir_tt_tc());
+
+ /* This is the whole point: it's not redirected! */
+ vg_assert(entry == vge->base[0]);
+
+ /* How many unredir_tt slots are needed */
+ code_szQ = (code_len + 7) / 8;
+
+ /* Look for an empty unredir_tc slot */
+ for (i = 0; i < N_UNREDIR_TT; i++)
+ if (!unredir_tt[i].inUse)
+ break;
+
+ if (i >= N_UNREDIR_TT || code_szQ > (N_UNREDIR_TCQ - unredir_tc_used)) {
+ /* It's full; dump everything we currently have */
+ init_unredir_tt_tc();
+ i = 0;
+ }
+
+ vg_assert(unredir_tc_used >= 0);
+ vg_assert(unredir_tc_used <= N_UNREDIR_TCQ);
+ vg_assert(code_szQ > 0);
+ vg_assert(code_szQ + unredir_tc_used <= N_UNREDIR_TCQ);
+ vg_assert(i >= 0 && i < N_UNREDIR_TT);
+ vg_assert(unredir_tt[i].inUse == False);
+
+ if (i > unredir_tt_highwater)
+ unredir_tt_highwater = i;
+
+ dstP = (HChar*)&unredir_tc[unredir_tc_used];
+ srcP = (HChar*)code;
+ for (j = 0; j < code_len; j++)
+ dstP[j] = srcP[j];
+
+ unredir_tt[i].inUse = True;
+ unredir_tt[i].vge = *vge;
+ unredir_tt[i].hcode = (Addr)dstP;
+
+ unredir_tc_used += code_szQ;
+ vg_assert(unredir_tc_used >= 0);
+ vg_assert(unredir_tc_used <= N_UNREDIR_TCQ);
+
+ vg_assert(&dstP[code_len] <= (HChar*)&unredir_tc[unredir_tc_used]);
+}
+
+Bool VG_(search_unredir_transtab) ( /*OUT*/AddrH* result,
+ Addr64 guest_addr )
+{
+ Int i;
+ for (i = 0; i < N_UNREDIR_TT; i++) {
+ if (!unredir_tt[i].inUse)
+ continue;
+ if (unredir_tt[i].vge.base[0] == guest_addr) {
+ *result = (AddrH)unredir_tt[i].hcode;
+ return True;
+ }
+ }
+ return False;
+}
+
+static void unredir_discard_translations( Addr64 guest_start, ULong range )
+{
+ Int i;
+
+ vg_assert(sanity_check_redir_tt_tc());
+
+ for (i = 0; i <= unredir_tt_highwater; i++) {
+ if (unredir_tt[i].inUse
+ && overlaps( guest_start, range, &unredir_tt[i].vge))
+ unredir_tt[i].inUse = False;
+ }
+}
+
+
/*------------------------------------------------------------*/
/*--- Initialisation. ---*/
/*------------------------------------------------------------*/
/* and the fast caches. */
invalidateFastCache();
+ /* and the unredir tt/tc */
+ init_unredir_tt_tc();
+
if (VG_(clo_verbosity) > 2) {
VG_(message)(Vg_DebugMsg,
"TT/TC: cache: %d sectors of %d bytes each = %d total",
extern Int VG_(fd_soft_limit);
extern Int VG_(fd_hard_limit);
+/* Useful addresses extracted from the client */
+/* Where is the __libc_freeres_wrapper routine we made? */
+extern Addr VG_(client___libc_freeres_wrapper);
#endif // __PUB_CORE_CLIENTSTATE_H
unsigned long _qzz_res = 0;
va_list vargs;
va_start(vargs, format);
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, VG_USERREQ__INTERNAL_PRINTF,
- (unsigned long)format, (unsigned long)vargs, 0, 0);
+ VALGRIND_DO_CLIENT_REQUEST(
+ _qzz_res, 0, VG_USERREQ__INTERNAL_PRINTF,
+ (unsigned long)format, (unsigned long)vargs, 0, 0
+ );
va_end(vargs);
return _qzz_res;
}
extern Bool VG_(get_fnname_nodemangle)( Addr a, Char* fnname, Int n_fnname );
-extern Addr VG_(reverse_search_one_symtab) ( const SegInfo* si, const Char* name );
-
extern Bool VG_(use_CFI_info) ( /*MOD*/Addr* ipP,
/*MOD*/Addr* spP,
/*MOD*/Addr* fpP,
#define __PUB_CORE_DEMANGLE_H
//--------------------------------------------------------------------
-// PURPOSE: This module exports a single function for demangling C++
-// names.
+// PURPOSE: This module exports functions for demangling C++ and
+// Z-encoded names.
//--------------------------------------------------------------------
-extern void VG_(demangle) ( Char* orig, Char* result, Int result_size );
+/* This is the main, standard demangler entry point. */
+
+extern
+void VG_(demangle) ( Char* orig, Char* result, Int result_size );
+
+/* Demangle a Z-encoded name as described in pub_tool_redir.h.
+ Z-encoded names are used by Valgrind for doing function
+ interception/wrapping.
+
+ Demangle 'sym' into its soname and fnname parts, putting them in
+ the specified buffers. Returns a Bool indicating whether the
+ demangled failed or not. A failure can occur because the prefix
+ isn't recognised, the internal Z-escaping is wrong, or because one
+ or the other (or both) of the output buffers becomes full. Passing
+ 'so' as NULL is acceptable if the caller is only interested in the
+ function name part. */
+
+extern
+Bool VG_(maybe_Z_demangle) ( const HChar* sym,
+ /*OUT*/HChar* so, Int soLen,
+ /*OUT*/HChar* fn, Int fnLen,
+ /*OUT*/Bool* isWrap );
#endif // __PUB_CORE_DEMANGLE_H
This code simply handles the common case fast -- when the translation
address is found in the translation cache. For anything else, the
scheduler does the work.
+
+ NOTE, VG_(run_innerloop) MUST NOT BE USED for noredir translations.
+ Instead use VG_(run_a_noredir_translation).
*/
extern
UWord VG_(run_innerloop) ( void* guest_state, UWord do_profiling );
-
#if defined(VGA_x86) || defined(VGA_amd64)
/* We need to locate a couple of labels inside VG_(run_innerloop), so
that Vex can add branches to them from generated code. Hence the
- following somewhat bogus decls. At least on x86 and amd64. */
+ following somewhat bogus decls. At least on x86 and amd64. ppc32
+ and ppc64 use straightforward bl-blr to get from dispatcher to
+ translation and back and so do not need these labels. */
extern void VG_(run_innerloop__dispatch_unprofiled);
extern void VG_(run_innerloop__dispatch_profiled);
#endif
+
+/* Run a no-redir translation. argblock points to 4 UWords, 2 to carry args
+ and 2 to carry results:
+ 0: input: ptr to translation
+ 1: input: ptr to guest state
+ 2: output: next guest PC
+ 3: output: guest state pointer afterwards (== thread return code)
+ MUST NOT BE USED for non-noredir (normal) translations.
+*/
+extern void VG_(run_a_noredir_translation) ( volatile UWord* argblock );
+#if defined(VGA_x86) || defined(VGA_amd64)
+/* We need to a label inside VG_(run_a_noredir_translation), so that
+ Vex can add branches to them from generated code. Hence the
+ following somewhat bogus decl. */
+extern void VG_(run_a_noredir_translation__return_point);
+#endif
+
+
#endif // __PUB_CORE_DISPATCH_H
/*--------------------------------------------------------------------*/
/* And some more of our own. These must not have the same values as
those from libvex_trc_values.h. (viz, 60 or below is safe). */
+#define VG_TRC_BORING 29 /* no event; just keep going */
#define VG_TRC_INNER_FASTMISS 37 /* TRC only; means fast-cache miss. */
#define VG_TRC_INNER_COUNTERZERO 41 /* TRC only; means bb ctr == 0 */
#define VG_TRC_FAULT_SIGNAL 43 /* TRC only; got sigsegv/sigbus */
#include "pub_tool_redir.h"
-//--------------------------------------------------------------------
-// General
-//--------------------------------------------------------------------
-
-// This module needs be told about all the symbols that get loaded, so
-// it can check if it needs to do anything special. This is the function
-// that does that checking. It modifies 'symbol' in-place by Z-decoding
-// it if necessary.
-void VG_(maybe_redir_or_notify) ( Char* symbol, Addr addr );
//--------------------------------------------------------------------
-// Code replacement
+// Notifications - by which we are told of state changes
//--------------------------------------------------------------------
-// See include/pub_tool_redir.h for details on how to do code replacement.
+/* Notify the module of a new SegInfo (called from m_debuginfo). */
+extern void VG_(redir_notify_new_SegInfo)( SegInfo* );
-typedef struct _CodeRedirect CodeRedirect;
+/* Notify the module of the disappearance of a SegInfo (also called
+ from m_debuginfo). */
+extern void VG_(redir_notify_delete_SegInfo)( SegInfo* );
-// This is the crucial redirection function. It answers the question:
-// should this code address be redirected somewhere else? It's used just
-// before translating a basic block.
-extern Addr VG_(code_redirect) ( Addr orig );
+/* Initialise the module, and load initial "hardwired" redirects. */
+extern void VG_(redir_initialise)( void );
-/* Set up some default redirects. */
-extern void VG_(setup_code_redirect_table) ( void );
-extern void VG_(resolve_existing_redirs_with_seginfo)(SegInfo *si);
+//--------------------------------------------------------------------
+// Queries
+//--------------------------------------------------------------------
+
+/* This is the crucial redirection function. It answers the question:
+ should this code address be redirected somewhere else? It's used
+ just before translating a basic block. If a redir is found,
+ *isWrap allows to distinguish wrap- from replace- style
+ redirections. */
+extern Addr VG_(redir_do_lookup) ( Addr orig, Bool* isWrap );
//--------------------------------------------------------------------
Functions named with this macro should be in client space, ie. in
vgpreload_<tool>.h or vgpreload_core.h. */
-#define VG_NOTIFY_ON_LOAD(name) _vgw_##name
-#define VG_NOTIFY_ON_LOAD_PREFIX "_vgw_"
-#define VG_NOTIFY_ON_LOAD_PREFIX_LEN 5
+#define VG_NOTIFY_ON_LOAD(name) _vgnU_##name
+#define VG_NOTIFY_ON_LOAD_PREFIX "_vgnU_"
+#define VG_NOTIFY_ON_LOAD_PREFIX_LEN 6
-// Called by m_main to get our __libc_freeres wrapper.
-extern Addr VG_(get_libc_freeres_wrapper)(void);
//--------------------------------------------------------------------
// Function wrapping
// This is currently not working(?) --njn
/* Wrapping machinery */
-enum return_type {
- RT_RETURN,
- RT_LONGJMP,
- RT_EXIT,
-};
-
-typedef struct _FuncWrapper FuncWrapper;
-struct _FuncWrapper {
- void *(*before)(va_list args);
- void (*after) (void *nonce, enum return_type, Word retval);
-};
-
-extern void VG_(wrap_function)(Addr eip, const FuncWrapper *wrapper);
-extern const FuncWrapper *VG_(is_wrapped)(Addr eip);
-extern Bool VG_(is_wrapper_return)(Addr eip);
+//enum return_type {
+ // RT_RETURN,
+ // RT_LONGJMP,
+ // RT_EXIT,
+ //};
+//
+//typedef struct _FuncWrapper FuncWrapper;
+//struct _FuncWrapper {
+ // void *(*before)(va_list args);
+ // void (*after) (void *nonce, enum return_type, Word retval);
+ //};
+//
+//extern void VG_(wrap_function)(Addr eip, const FuncWrapper *wrapper);
+//extern const FuncWrapper *VG_(is_wrapped)(Addr eip);
+//extern Bool VG_(is_wrapper_return)(Addr eip);
/* Primary interface for adding wrappers for client-side functions. */
-extern CodeRedirect *VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
- const FuncWrapper *wrapper);
-
-extern Bool VG_(is_resolved)(const CodeRedirect *redir);
+//extern CodeRedirect *VG_(add_wrapper)(const Char *from_lib, const Char *from_sym,
+// const FuncWrapper *wrapper);
+//
+//extern Bool VG_(is_resolved)(const CodeRedirect *redir);
#endif // __PUB_CORE_REDIR_H
/* Stats ... */
extern void VG_(print_scheduler_stats) ( void );
-/* If true, a fault is Valgrind-internal (ie, a bug) */
-extern Bool VG_(my_fault);
+/* If False, a fault is Valgrind-internal (ie, a bug) */
+extern Bool VG_(in_generated_code);
/* Sanity checks which may be done at any time. The scheduler decides when. */
extern void VG_(sanity_check_general) ( Bool force_expensive );
Addr64 orig_addr,
Bool debugging_translation,
Int debugging_verbosity,
- ULong bbs_done );
+ ULong bbs_done,
+ Bool allow_redirection );
extern void VG_(print_translation_stats) ( void );
extern UInt VG_(get_bbs_translated) ( void );
+/* Add to / search the auxiliary, small, unredirected translation
+ table. */
+
+extern
+void VG_(add_to_unredir_transtab)( VexGuestExtents* vge,
+ Addr64 entry,
+ AddrH code,
+ UInt code_len,
+ Bool is_self_checking );
+extern
+Bool VG_(search_unredir_transtab) ( /*OUT*/AddrH* result,
+ Addr64 guest_addr );
+
// BB profiling stuff
typedef struct _BBProfEntry {
extern void __libc_freeres(void);
__libc_freeres();
#endif
- VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
- VG_USERREQ__LIBC_FREERES_DONE, 0, 0, 0, 0);
+ VALGRIND_DO_CLIENT_REQUEST(res, 0 /* default */,
+ VG_USERREQ__LIBC_FREERES_DONE, 0, 0, 0, 0);
/*NOTREACHED*/
*(int *)0 = 'x';
}
/*--- end ---*/
/*--------------------------------------------------------------------*/
+#if 0
+
+#define PTH_FUNC(ret_ty, f, args...) \
+ ret_ty VG_WRAP_FUNCTION_ZZ(libpthreadZdsoZd0,f)(args); \
+ ret_ty VG_WRAP_FUNCTION_ZZ(libpthreadZdsoZd0,f)(args)
+
+#include <stdio.h>
+#include <pthread.h>
+
+// pthread_create
+PTH_FUNC(int, pthreadZucreateZAZa, // pthread_create@*
+ pthread_t *thread, const pthread_attr_t *attr,
+ void *(*start) (void *), void *arg)
+{
+ int ret;
+ void* fn;
+ VALGRIND_GET_NRADDR(fn);
+ fprintf(stderr, "<< pthread_create wrapper"); fflush(stderr);
+
+ CALL_FN_W_WWWW(ret, fn, thread,attr,start,arg);
+
+ fprintf(stderr, " -> %d >>\n", ret);
+ return ret;
+}
+
+// pthread_mutex_lock
+PTH_FUNC(int, pthreadZumutexZulock, // pthread_mutex_lock
+ pthread_mutex_t *mutex)
+{
+ int ret;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ fprintf(stderr, "<< pthread_mxlock %p", mutex); fflush(stderr);
+
+ CALL_FN_W_W(ret, fn, mutex);
+
+ fprintf(stderr, " -> %d >>\n", ret);
+ return ret;
+}
+
+// pthread_mutex_unlock
+PTH_FUNC(int, pthreadZumutexZuunlock, // pthread_mutex_unlock
+ pthread_mutex_t *mutex)
+{
+ int ret;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+
+ fprintf(stderr, "<< pthread_mxunlk %p", mutex); fflush(stderr);
+
+ CALL_FN_W_W(ret, fn, mutex);
+
+ fprintf(stderr, " -> %d >>\n", ret);
+ return ret;
+}
+
+#endif
is present or not. */
extern SegInfo* VG_(find_seginfo) ( Addr a );
-extern const SegInfo* VG_(next_seginfo) ( const SegInfo *si );
+/* Fish bits out of SegInfos. */
extern Addr VG_(seginfo_start) ( const SegInfo *si );
extern SizeT VG_(seginfo_size) ( const SegInfo *si );
extern const UChar* VG_(seginfo_soname) ( const SegInfo *si );
extern const UChar* VG_(seginfo_filename) ( const SegInfo *si );
extern ULong VG_(seginfo_sym_offset)( const SegInfo *si );
+/* Function for traversing the seginfo list. When called with NULL it
+ returns the first element; otherwise it returns the given element's
+ successor. */
+extern const SegInfo* VG_(next_seginfo) ( const SegInfo *si );
+
+/* Functions for traversing all the symbols in a SegInfo. _howmany
+ tells how many there are. _getidx retrieves the n'th, for n in 0
+ .. _howmany-1. You may not modify the function name thereby
+ acquired; if you want to do so, first strdup it. */
+extern Int VG_(seginfo_syms_howmany) ( const SegInfo *si );
+extern void VG_(seginfo_syms_getidx) ( const SegInfo *si,
+ Int idx,
+ /*OUT*/Addr* addr,
+ /*OUT*/UInt* size,
+ /*OUT*/HChar** name );
+
typedef
enum {
Vg_SectUnknown,
#if defined(VGA_x86)
# define VG_MIN_INSTR_SZB 1 // min length of native instruction
# define VG_MAX_INSTR_SZB 16 // max length of native instruction
-# define VG_CLREQ_SZB 18 // length of a client request, may
+# define VG_CLREQ_SZB 14 // length of a client request, may
// be larger than VG_MAX_INSTR_SZB
# define VG_STACK_REDZONE_SZB 0 // number of addressable bytes below %RSP
#elif defined(VGA_amd64)
# define VG_MIN_INSTR_SZB 1
# define VG_MAX_INSTR_SZB 16
-# define VG_CLREQ_SZB 18
+# define VG_CLREQ_SZB 19
# define VG_STACK_REDZONE_SZB 128
#elif defined(VGA_ppc32)
# define VG_MIN_INSTR_SZB 4
#ifndef __PUB_TOOL_REDIR_H
#define __PUB_TOOL_REDIR_H
-/* The following macros facilitate function replacement, which is one form
- of code replacement.
+/* The following macros facilitate function replacement and wrapping.
- The general idea is: you can write a function like this:
+ Function wrapping and function replacement are similar but not
+ identical.
- ret_type VG_REPLACE_FUNCTION(zEncodedSoname, fnname) ( ... args ... )
+ A replacement for some function F simply diverts all calls to F
+ to the stated replacement. There is no way to get back to F itself
+ from the replacement.
+
+ A wrapper for a function F causes all calls to F to instead go to
+ the wrapper. However, from inside the wrapper, it is possible
+ (with some difficulty) to get to F itself.
+
+ You may notice that replacement is a special case of wrapping, in
+ which the call to the original is omitted. For implementation
+ reasons, though, it is important to use the following macros
+ correctly: in particular, if you want to write a replacement, make
+ sure you use the VG_REPLACE_FN_ macros and not the VG_WRAP_FN_
+ macros.
+
+ Replacement
+ ~~~~~~~~~~~
+ To write a replacement function, do this:
+
+ ret_type
+ VG_REPLACE_FUNCTION_ZU(zEncodedSoname,fnname) ( .. args .. )
{
... body ...
}
zEncodedSoname should be a Z-encoded soname (see below for Z-encoding
details) and fnname should be an unencoded fn name. The resulting name is
- _vgi_zEncodedSoname_fnname
+ _vgrZU_zEncodedSoname_fnname
- The "_vgi_" is a prefix that gets discarded upon decoding.
+ The "_vgrZU_" is a prefix that gets discarded upon decoding.
+
+ It is also possible to write
+
+ ret_type
+ VG_REPLACE_FUNCTION_ZZ(zEncodedSoname,zEncodedFnname) ( .. args .. )
+ {
+ ... body ...
+ }
- When it sees this name, the core's symbol-table reading machinery
- and redirection machinery will conspire to cause calls to the function
- 'fnname' in object with soname 'zEncodedSoname' to actually be routed to
- the function written here. We use this below to define dozens of
- replacements of malloc, free, etc.
+ which means precisely the same, but the function name is also
+ Z-encoded. This can sometimes be necessary. In this case the
+ resulting function name is
+
+ _vgrZZ_zEncodedSoname_zEncodedFnname
+
+ When it sees this either such name, the core's symbol-table reading
+ machinery and redirection machinery first Z-decode the soname and
+ if necessary the fnname. They are encoded so that they may include
+ arbitrary characters, and in particular they may contain '*', which
+ acts as a wildcard.
+
+ They then will conspire to cause calls to any function matching
+ 'fnname' in any object whose soname matches 'soname' to actually be
+ routed to this function. This is used in Valgrind to define dozens
+ of replacements of malloc, free, etc.
The soname must be a Z-encoded bit of text because sonames can
- contain dots etc which are not valid symbol names. But don't Z-encode
- the function name, since it will already be a valid symbol name, and the
- Z-encoding might screw up the C++ demangling.
+ contain dots etc which are not valid symbol names. The function
+ name may or may not be Z-encoded: to include wildcards it has to be,
+ but Z-encoding C++ function names which are themselves already mangled
+ using Zs in some way is tedious and error prone, so the _ZU variant
+ allows them not to be Z-encoded.
- Note that the soname can contain '*' as a wildcard meaning "match
- anything".
+ Note that the soname "NONE" is specially interpreted to match any
+ shared object which doesn't have a soname.
Note also that the replacement function should probably (must be?) in
client space, so it runs on the simulated CPU. So it must be in
either vgpreload_<tool>.so or vgpreload_core.so. It also only works
with functions in shared objects, I think.
- It is important that the Z-encoded soname contains no unencoded
- underscores, since the intercept-handlers in vg_symtab2.c detect
- the end of the soname by looking for the first trailing underscore.
-
- Z-encoding details: the scheme is like GHC's. It is just about
- readable enough to make a preprocessor unnecessary. First the "_vgi_"
- prefix is added, and then the following characters are transformed.
-
- * --> Za ('a' for "asterisk")
- + --> Zp
- : --> Zc
- . --> Zd
- _ --> Zu
- - --> Zh ('h' for "hyphen")
- (space) --> Zs
- Z --> ZZ
+ It is important that the Z-encoded names contain no unencoded
+ underscores, since the intercept-handlers in m_redir.c detect the
+ end of the soname by looking for the first trailing underscore.
+
+ Wrapping
+ ~~~~~~~~
+ This is identical to replacement, except that you should use the
+ macro names
+
+ VG_WRAP_FUNCTION_ZU
+ VG_WRAP_FUNCTION_ZZ
+
+ instead.
+
+ Z-encoding
+ ~~~~~~~~~~
+ Z-encoding details: the scheme is like GHC's. It is just about
+ readable enough to make a preprocessor unnecessary. First the
+ "_vgrZU_" or "_vgrZZ_" prefix is added, and then the following
+ characters are transformed.
+
+ * --> Za (asterisk)
+ + --> Zp (plus)
+ : --> Zc (colon)
+ . --> Zd (dot)
+ _ --> Zu (underscore)
+ - --> Zh (hyphen)
+ (space) --> Zs (space)
+ @ -> ZA (at)
+ Z --> ZZ (Z)
Everything else is left unchanged.
*/
-#define VG_REPLACE_FUNCTION(soname, fnname) _vgi_##soname##_##fnname
-#define VG_REPLACE_FUNCTION_PREFIX "_vgi_"
-#define VG_REPLACE_FUNCTION_PREFIX_LEN 5
+/* If you change these, the code in VG_(maybe_Z_demangle) needs to be
+ changed accordingly. NOTE: duplicates
+ I_{WRAP,REPLACE}_SONAME_FNNAME_Z{U,Z} in valgrind.h. */
+
+#define VG_REPLACE_FUNCTION_ZU(soname,fnname) _vgrZU_##soname##_##fnname
+#define VG_REPLACE_FUNCTION_ZZ(soname,fnname) _vgrZZ_##soname##_##fnname
+
+#define VG_WRAP_FUNCTION_ZU(soname,fnname) _vgwZU_##soname##_##fnname
+#define VG_WRAP_FUNCTION_ZZ(soname,fnname) _vgwZZ_##soname##_##fnname
#endif // __PUB_TOOL_REDIR_H
The resulting executables will still run without Valgrind, just a
little bit more slowly than they otherwise would, but otherwise
unchanged. When not running on valgrind, each client request
- consumes very few (eg. < 10) instructions, so the resulting performance
+ consumes very few (eg. 7) instructions, so the resulting performance
loss is negligible unless you plan to execute client requests
millions of times per second. Nevertheless, if that is still a
problem, you can compile with the NVALGRIND symbol defined (gcc
we can't use C++ style "//" comments nor the "asm" keyword (instead
use "__asm__"). */
+/* Derive some tags indicating what the target architecture is. Note
+ that in this file we're using the compiler's CPP symbols for
+ identifying architectures, which are different to the ones we use
+ within the rest of Valgrind. Note, __powerpc__ is active for both
+ 32 and 64-bit PPC, whereas __powerpc64__ is only active for the
+ latter. */
+#undef ARCH_x86
+#undef ARCH_amd64
+#undef ARCH_ppc32
+#undef ARCH_ppc64
+
+#if defined(__i386__)
+# define ARCH_x86 1
+#elif defined(__x86_64__)
+# define ARCH_amd64 1
+#elif defined(__powerpc__) && !defined(__powerpc64__)
+# define ARCH_ppc32 1
+#elif defined(__powerpc__) && defined(__powerpc64__)
+# define ARCH_ppc64 1
+#endif
+
/* If we're not compiling for our target architecture, don't generate
- any inline asms. Note that in this file we're using the compiler's
- CPP symbols for identifying architectures, which are different to
- the ones we use within the rest of Valgrind. Note, __powerpc__ is
- active for both 32 and 64-bit PPC, whereas __powerpc64__ is only
- active for the latter. */
-#if !defined(__i386__) && !defined(__x86_64__) && !defined(__powerpc__)
-# ifndef NVALGRIND
-# define NVALGRIND 1
-# endif /* NVALGRIND */
+ any inline asms. */
+#if !defined(ARCH_x86) && !defined(ARCH_amd64) \
+ && !defined(ARCH_ppc32) && !defined(ARCH_ppc64)
+# if !defined(NVALGRIND)
+# define NVALGRIND 1
+# endif
#endif
+
/* ------------------------------------------------------------------ */
-/* The architecture-specific part */
+/* ARCHITECTURE SPECIFICS for SPECIAL INSTRUCTIONS. There is nothing */
+/* in here of use to end-users -- skip to the next section. */
/* ------------------------------------------------------------------ */
-#ifdef NVALGRIND
+#if defined(NVALGRIND)
/* Define NVALGRIND to completely remove the Valgrind magic sequence
- from the compiled code (analogous to NDEBUG's effects on assert()) */
-#define VALGRIND_MAGIC_SEQUENCE( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
- { \
- (_zzq_rlval) = (_zzq_default); \
+ from the compiled code (analogous to NDEBUG's effects on
+ assert()) */
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
+ { \
+ (_zzq_rlval) = (_zzq_default); \
}
-#else /* NVALGRIND */
+#else /* ! NVALGRIND */
+
+/* The following defines the magic code sequences which the JITter
+ spots and handles magically. Don't look too closely at them as
+ they will rot your brain.
-/* The following defines the magic code sequences which the JITter spots and
- handles magically. Don't look too closely at them; they will rot
- your brain. We must ensure that the default value gets put in the return
- slot, so that everything works when this is executed not under Valgrind.
- Args are passed in a memory block, and so there's no intrinsic limit to
- the number that could be passed, but it's currently four.
+ The assembly code sequences for all architectures is in this one
+ file. This is because this file must be stand-alone, and we don't
+ want to have multiple files.
+
+ For VALGRIND_DO_CLIENT_REQUEST, we must ensure that the default
+ value gets put in the return slot, so that everything works when
+ this is executed not under Valgrind. Args are passed in a memory
+ block, and so there's no intrinsic limit to the number that could
+ be passed, but it's currently four.
The macro args are:
_zzq_rlval result lvalue
_zzq_request request code
_zzq_arg1..4 request params
- Nb: we put the assembly code sequences for all architectures in this one
- file. This is because this file must be stand-alone, and we don't want
- to have multiple files.
+ The other two macros are used to support function wrapping, and are
+ a lot simpler. VALGRIND_GET_NRADDR returns the value of the
+ guest's NRADDR pseudo-register. VALGRIND_CALL_NOREDIR_* behaves
+ the same as the following on the guest, but guarantees that the
+ branch instruction will not be redirected: x86: call *%eax, amd64:
+ call *%rax, ppc32/ppc64: bctrl. VALGRIND_CALL_NOREDIR is just
+ text, not a complete inline asm, since it needs to be combined with
+ more magic inline asm stuff to be useful.
*/
-#ifdef __x86_64__
-#define VALGRIND_MAGIC_SEQUENCE( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
- \
- { volatile unsigned long long _zzq_args[5]; \
- _zzq_args[0] = (volatile unsigned long long)(_zzq_request); \
- _zzq_args[1] = (volatile unsigned long long)(_zzq_arg1); \
- _zzq_args[2] = (volatile unsigned long long)(_zzq_arg2); \
- _zzq_args[3] = (volatile unsigned long long)(_zzq_arg3); \
- _zzq_args[4] = (volatile unsigned long long)(_zzq_arg4); \
- __asm__ volatile("roll $29, %%eax ; roll $3, %%eax\n\t" \
- "rorl $27, %%eax ; rorl $5, %%eax\n\t" \
- "roll $13, %%eax ; roll $19, %%eax" \
- : "=d" (_zzq_rlval) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "memory" \
- ); \
+/* ---------------------------- x86 ---------------------------- */
+
+#if defined(ARCH_x86)
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "roll $3, %%edi ; roll $13, %%edi\n\t" \
+ "roll $29, %%edi ; roll $19, %%edi\n\t" \
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
+ { volatile unsigned int _zzq_args[5]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EDX = client_request ( %EAX ) */ \
+ "xchgl %%ebx,%%ebx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
}
-#endif /* __x86_64__ */
-
-#ifdef __i386__
-#define VALGRIND_MAGIC_SEQUENCE( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
- \
- { unsigned int _zzq_args[5]; \
- _zzq_args[0] = (unsigned int)(_zzq_request); \
- _zzq_args[1] = (unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (unsigned int)(_zzq_arg4); \
- __asm__ volatile("roll $29, %%eax ; roll $3, %%eax\n\t" \
- "rorl $27, %%eax ; rorl $5, %%eax\n\t" \
- "roll $13, %%eax ; roll $19, %%eax" \
- : "=d" (_zzq_rlval) \
- : "a" (&_zzq_args[0]), "0" (_zzq_default) \
- : "cc", "memory" \
- ); \
+
+#define VALGRIND_GET_NRADDR(_zzq_rlval) \
+ { volatile unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %EAX = guest_NRADDR */ \
+ "xchgl %%ecx,%%ecx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = (void*)__addr; \
}
-#endif /* __i386__ */
-
-#if defined(__powerpc__) && !defined(__powerpc64__)
-#define VALGRIND_MAGIC_SEQUENCE( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
- \
- { volatile unsigned int _zzq_args[5]; \
- register unsigned int _zzq_tmp __asm__("r3"); \
- register volatile unsigned int *_zzq_ptr __asm__("r4"); \
- _zzq_args[0] = (volatile unsigned int)(_zzq_request); \
- _zzq_args[1] = (volatile unsigned int)(_zzq_arg1); \
- _zzq_args[2] = (volatile unsigned int)(_zzq_arg2); \
- _zzq_args[3] = (volatile unsigned int)(_zzq_arg3); \
- _zzq_args[4] = (volatile unsigned int)(_zzq_arg4); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("tw 0,3,27\n\t" \
- "rlwinm 0,0,29,0,0\n\t" \
- "rlwinm 0,0,3,0,0\n\t" \
- "rlwinm 0,0,13,0,0\n\t" \
- "rlwinm 0,0,19,0,0\n\t" \
- "nop\n\t" \
- : "=r" (_zzq_tmp) \
- : "0" (_zzq_default), "r" (_zzq_ptr) \
- : "memory"); \
- _zzq_rlval = (__typeof__(_zzq_rlval)) _zzq_tmp; \
+
+#define VALGRIND_CALL_NOREDIR_EAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%EAX */ \
+ "xchgl %%edx,%%edx\n\t"
+#endif /* ARCH_x86 */
+
+/* --------------------------- amd64 --------------------------- */
+
+#if defined(ARCH_amd64)
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
+ "rolq $61, %%rdi ; rolq $51, %%rdi\n\t" \
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
+ { volatile unsigned long long int _zzq_args[5]; \
+ volatile unsigned long long int _zzq_result; \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RDX = client_request ( %RAX ) */ \
+ "xchgq %%rbx,%%rbx" \
+ : "=d" (_zzq_result) \
+ : "a" (&_zzq_args[0]), "0" (_zzq_default) \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = _zzq_result; \
}
-#endif /* __powerpc__ 32-bit only */
-
-#if defined(__powerpc__) && defined(__powerpc64__)
-#define VALGRIND_MAGIC_SEQUENCE( \
- _zzq_rlval, _zzq_default, _zzq_request, \
- _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
- \
- { volatile unsigned long long int _zzq_args[5]; \
- register unsigned long long int _zzq_tmp __asm__("r3"); \
- register volatile unsigned long long int *_zzq_ptr __asm__("r4"); \
- _zzq_args[0] = (volatile unsigned long long int)(_zzq_request); \
- _zzq_args[1] = (volatile unsigned long long int)(_zzq_arg1); \
- _zzq_args[2] = (volatile unsigned long long int)(_zzq_arg2); \
- _zzq_args[3] = (volatile unsigned long long int)(_zzq_arg3); \
- _zzq_args[4] = (volatile unsigned long long int)(_zzq_arg4); \
- _zzq_ptr = _zzq_args; \
- __asm__ volatile("tw 0,3,27\n\t" \
- "rotldi 0,0,61\n\t" \
- "rotldi 0,0,3\n\t" \
- "rotldi 0,0,13\n\t" \
- "rotldi 0,0,51\n\t" \
- "nop\n\t" \
- : "=r" (_zzq_tmp) \
- : "0" (_zzq_default), "r" (_zzq_ptr) \
- : "memory"); \
- _zzq_rlval = (__typeof__(_zzq_rlval)) _zzq_tmp; \
+
+#define VALGRIND_GET_NRADDR(_zzq_rlval) \
+ { volatile unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %RAX = guest_NRADDR */ \
+ "xchgq %%rcx,%%rcx" \
+ : "=a" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = (void*)__addr; \
}
-#endif /* __powerpc__ 64-bit only */
+
+#define VALGRIND_CALL_NOREDIR_RAX \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%RAX */ \
+ "xchgq %%rdx,%%rdx\n\t"
+#endif /* ARCH_amd64 */
+
+/* --------------------------- ppc32 --------------------------- */
+
+#if defined(ARCH_ppc32)
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
+ "rlwinm 0,0,29,0,0 ; rlwinm 0,0,19,0,0\n\t" \
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
+ \
+ { volatile unsigned int _zzq_args[5]; \
+ register unsigned int _zzq_result __asm__("r3"); \
+ register volatile unsigned int *_zzq_ptr __asm__("r4"); \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = client_request ( %R4 ) */ \
+ "or 1,1,1" \
+ : "=r" (_zzq_result) \
+ : "0" (_zzq_default), "r" (_zzq_ptr) \
+ : "cc", "memory"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NRADDR(_zzq_rlval) \
+ { register unsigned int __addr __asm__("r3"); \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %R3 = guest_NRADDR */ \
+ "or 2,2,2" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory" \
+ ); \
+ _zzq_rlval = (void*)__addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R11 */ \
+ "or 3,3,3\n\t"
+#endif /* ARCH_ppc32 */
+
+/* --------------------------- ppc64 --------------------------- */
+
+#if defined(ARCH_ppc64)
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4) \
+ \
+ { volatile unsigned long long int _zzq_args[5]; \
+ register unsigned long long int _zzq_tmp __asm__("r3"); \
+ register volatile unsigned long long int *_zzq_ptr __asm__("r4"); \
+ _zzq_args[0] = (volatile unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (volatile unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (volatile unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (volatile unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (volatile unsigned long long int)(_zzq_arg4); \
+ _zzq_ptr = _zzq_args; \
+ __asm__ volatile("tw 0,3,27\n\t" \
+ "rotldi 0,0,61\n\t" \
+ "rotldi 0,0,3\n\t" \
+ "rotldi 0,0,13\n\t" \
+ "rotldi 0,0,51\n\t" \
+ "nop\n\t" \
+ : "=r" (_zzq_tmp) \
+ : "0" (_zzq_default), "r" (_zzq_ptr) \
+ : "memory"); \
+ _zzq_rlval = (__typeof__(_zzq_rlval)) _zzq_tmp; \
+ }
+#endif /* ARCH_ppc64 */
/* Insert assembly code for other architectures here... */
/* ------------------------------------------------------------------ */
-/* The architecture-independent part */
+/* ARCHITECTURE SPECIFICS for FUNCTION WRAPPING. This is all very */
+/* ugly. It's the least-worst tradeoff I can think of. */
+/* ------------------------------------------------------------------ */
+
+/* This section defines magic (a.k.a appalling-hack) macros for doing
+ guaranteed-no-redirection macros, so as to get from function
+ wrappers to the functions they are wrapping. The whole point is to
+ construct standard call sequences, but to do the call itself with a
+ special no-redirect call pseudo-instruction that the JIT
+ understands and handles specially. This section is long and
+ repetitious, and I can't see a way to make it shorter.
+
+ The naming scheme is as follows:
+
+ CALL_FN_{W,v}_{v,W,WW,WWW,WWWW,5W,6W,7W,etc}
+
+ 'W' stands for "word" and 'v' for "void". Hence there are
+ different macros for calling arity 0, 1, 2, 3, 4, etc, functions,
+ and for each, the possibility of returning a word-typed result, or
+ no result.
+*/
+
+/* Use these to write the name of your wrapper. NOTE: duplicates
+ VG_WRAP_FUNCTION_Z{U,Z} in pub_tool_redir.h. */
+
+#define I_WRAP_SONAME_FNNAME_ZU(soname,fnname) \
+ _vgwZU_##soname##_##fnname
+
+#define I_WRAP_SONAME_FNNAME_ZZ(soname,fnname) \
+ _vgwZZ_##soname##_##fnname
+
+/* Use this macro from within a wrapper function to get the address of
+ the original function. Once you have that you can then use it in
+ one of the CALL_FN_ macros. */
+#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NRADDR(_lval)
+
+/* Derivatives of the main macros below, for calling functions
+ returning void. */
+
+#define CALL_FN_v_v(fnptr) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_v(_junk,fnptr); } while (0)
+
+#define CALL_FN_v_W(fnptr, arg1) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_W(_junk,fnptr,arg1); } while (0)
+
+#define CALL_FN_v_WW(fnptr, arg1,arg2) \
+ do { volatile unsigned long _junk; \
+ CALL_FN_W_WW(_junk,fnptr,arg1,arg2); } while (0)
+
+/* ---------------------------- x86 ---------------------------- */
+
+#if defined(ARCH_x86)
+
+/* These regs are trashed by the hidden call. No need to mention eax
+ as gcc can already see that, plus causes gcc to bomb. */
+#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+
+/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, fnptr) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ __asm__ volatile( \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, fnptr, arg1) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $4, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, fnptr, arg1,arg2) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $8, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, fnptr, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $16, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, fnptr, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $20, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, fnptr, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $24, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, fnptr, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $28, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, fnptr, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $32, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, fnptr, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "pushl 48(%%eax)\n\t" \
+ "pushl 44(%%eax)\n\t" \
+ "pushl 40(%%eax)\n\t" \
+ "pushl 36(%%eax)\n\t" \
+ "pushl 32(%%eax)\n\t" \
+ "pushl 28(%%eax)\n\t" \
+ "pushl 24(%%eax)\n\t" \
+ "pushl 20(%%eax)\n\t" \
+ "pushl 16(%%eax)\n\t" \
+ "pushl 12(%%eax)\n\t" \
+ "pushl 8(%%eax)\n\t" \
+ "pushl 4(%%eax)\n\t" \
+ "movl (%%eax), %%eax\n\t" /* target->%eax */ \
+ VALGRIND_CALL_NOREDIR_EAX \
+ "addl $48, %%esp\n" \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* ARCH_x86 */
+
+/* --------------------------- amd64 --------------------------- */
+
+#if defined(ARCH_amd64)
+
+/* ARGREGS: rdi rsi rdx rcx r8 r9 (the rest on stack in R-to-L order) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS /*"rax",*/ "rcx", "rdx", "rsi", \
+ "rdi", "r8", "r9", "r10", "r11"
+
+/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
+ long) == 8. */
+
+#define CALL_FN_W_v(lval, fnptr) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ __asm__ volatile( \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, fnptr, arg1) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, fnptr, arg1,arg2) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* ARCH_amd64 */
+
+/* --------------------------- ppc32 --------------------------- */
+
+#if defined(ARCH_ppc32)
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "lr", \
+ "r0", "r2", "r3", "r4", "r5", "r6", \
+ "r7", "r8", "r9", "r10", "r11", "r12"
+
+/* These CALL_FN_ macros assume that on ppc32-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, fnptr) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, fnptr, arg1) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)arg1; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, fnptr, arg1,arg2) \
+ do { \
+ volatile void* _fnptr = (fnptr); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_fnptr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ __asm__ volatile( \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* ARCH_ppc32 */
+
+/* --------------------------- ppc64 --------------------------- */
+
+
+/* ------------------------------------------------------------------ */
+/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
+/* */
/* ------------------------------------------------------------------ */
/* Some request codes. There are many more of these, but most are not
exposed to end-user view. These are the public ones, all of the
form 0x1000 + small_number.
- Core ones are in the range 0x00000000--0x0000ffff. The non-public ones
- start at 0x2000.
+ Core ones are in the range 0x00000000--0x0000ffff. The non-public
+ ones start at 0x2000.
*/
-/* These macros are used by tools -- they must be public, but don't embed them
- * into other programs. */
+/* These macros are used by tools -- they must be public, but don't
+ embed them into other programs. */
#define VG_USERREQ_TOOL_BASE(a,b) \
((unsigned int)(((a)&0xff) << 24 | ((b)&0xff) << 16))
#define VG_IS_TOOL_USERREQ(a, b, v) \
enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
- /* These allow any function to be called from the
- simulated CPU but run on the real CPU.
- Nb: the first arg passed to the function is always the ThreadId of
- the running thread! So CLIENT_CALL0 actually requires a 1 arg
+ /* These allow any function to be called from the simulated
+ CPU but run on the real CPU. Nb: the first arg passed to
+ the function is always the ThreadId of the running
+ thread! So CLIENT_CALL0 actually requires a 1 arg
function, etc. */
VG_USERREQ__CLIENT_CALL0 = 0x1101,
VG_USERREQ__CLIENT_CALL1 = 0x1102,
VG_USERREQ__CLIENT_CALL2 = 0x1103,
VG_USERREQ__CLIENT_CALL3 = 0x1104,
- /* Can be useful in regression testing suites -- eg. can send
- Valgrind's output to /dev/null and still count errors. */
+ /* Can be useful in regression testing suites -- eg. can
+ send Valgrind's output to /dev/null and still count
+ errors. */
VG_USERREQ__COUNT_ERRORS = 0x1201,
- /* These are useful and can be interpreted by any tool that tracks
- malloc() et al, by using vg_replace_malloc.c. */
+ /* These are useful and can be interpreted by any tool that
+ tracks malloc() et al, by using vg_replace_malloc.c. */
VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
VG_USERREQ__FREELIKE_BLOCK = 0x1302,
/* Memory pool support. */
VG_USERREQ__STACK_CHANGE = 0x1503,
} Vg_ClientRequest;
-#ifndef __GNUC__
-#define __extension__
+#if !defined(__GNUC__)
+# define __extension__ /* */
#endif
-/* Returns the number of Valgrinds this code is running under. That is,
- 0 if running natively, 1 if running under Valgrind, 2 if running under
- Valgrind which is running under another Valgrind, etc. */
-#define RUNNING_ON_VALGRIND __extension__ \
- ({unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* returned if not */, \
- VG_USERREQ__RUNNING_ON_VALGRIND, \
- 0, 0, 0, 0); \
- _qzz_res; \
+/* Returns the number of Valgrinds this code is running under. That
+ is, 0 if running natively, 1 if running under Valgrind, 2 if
+ running under Valgrind which is running under another Valgrind,
+ etc. */
+#define RUNNING_ON_VALGRIND __extension__ \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* if not */, \
+ VG_USERREQ__RUNNING_ON_VALGRIND, \
+ 0, 0, 0, 0); \
+ _qzz_res; \
})
_qzz_len - 1]. Useful if you are debugging a JITter or some such,
since it provides a way to make sure valgrind will retranslate the
invalidated area. Returns no value. */
-#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__DISCARD_TRANSLATIONS, \
- _qzz_addr, _qzz_len, 0, 0); \
+#define VALGRIND_DISCARD_TRANSLATIONS(_qzz_addr,_qzz_len) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DISCARD_TRANSLATIONS, \
+ _qzz_addr, _qzz_len, 0, 0); \
}
-#ifdef NVALGRIND
-#define VALGRIND_PRINTF(...)
-#define VALGRIND_PRINTF_BACKTRACE(...)
+/* These requests are for getting Valgrind itself to print something.
+ Possibly with a backtrace. This is a really ugly hack. */
+
+#if defined(NVALGRIND)
+
+# define VALGRIND_PRINTF(...)
+# define VALGRIND_PRINTF_BACKTRACE(...)
#else /* NVALGRIND */
unsigned long _qzz_res;
va_list vargs;
va_start(vargs, format);
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, VG_USERREQ__PRINTF,
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
(unsigned long)format, (unsigned long)vargs, 0, 0);
va_end(vargs);
return (int)_qzz_res;
unsigned long _qzz_res;
va_list vargs;
va_start(vargs, format);
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
(unsigned long)format, (unsigned long)vargs, 0, 0);
va_end(vargs);
return (int)_qzz_res;
#endif /* NVALGRIND */
+
/* These requests allow control to move from the simulated CPU to the
real CPU, calling an arbitary function */
-#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
- ({unsigned long _qyy_res; \
- VALGRIND_MAGIC_SEQUENCE(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL0, \
- _qyy_fn, \
- 0, 0, 0); \
- _qyy_res; \
+#define VALGRIND_NON_SIMD_CALL0(_qyy_fn) \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL0, \
+ _qyy_fn, \
+ 0, 0, 0); \
+ _qyy_res; \
})
-#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
- ({unsigned long _qyy_res; \
- VALGRIND_MAGIC_SEQUENCE(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL1, \
- _qyy_fn, \
- _qyy_arg1, 0, 0); \
- _qyy_res; \
+#define VALGRIND_NON_SIMD_CALL1(_qyy_fn, _qyy_arg1) \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL1, \
+ _qyy_fn, \
+ _qyy_arg1, 0, 0); \
+ _qyy_res; \
})
-#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
- ({unsigned long _qyy_res; \
- VALGRIND_MAGIC_SEQUENCE(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL2, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, 0); \
- _qyy_res; \
+#define VALGRIND_NON_SIMD_CALL2(_qyy_fn, _qyy_arg1, _qyy_arg2) \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL2, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, 0); \
+ _qyy_res; \
})
-#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
- ({unsigned long _qyy_res; \
- VALGRIND_MAGIC_SEQUENCE(_qyy_res, 0 /* default return */, \
- VG_USERREQ__CLIENT_CALL3, \
- _qyy_fn, \
- _qyy_arg1, _qyy_arg2, _qyy_arg3); \
- _qyy_res; \
+#define VALGRIND_NON_SIMD_CALL3(_qyy_fn, _qyy_arg1, _qyy_arg2, _qyy_arg3) \
+ ({unsigned long _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__CLIENT_CALL3, \
+ _qyy_fn, \
+ _qyy_arg1, _qyy_arg2, _qyy_arg3); \
+ _qyy_res; \
})
/* Counts the number of errors that have been recorded by a tool. Nb:
the tool must record the errors with VG_(maybe_record_error)() or
VG_(unique_error)() for them to be counted. */
-#define VALGRIND_COUNT_ERRORS \
- ({unsigned int _qyy_res; \
- VALGRIND_MAGIC_SEQUENCE(_qyy_res, 0 /* default return */, \
- VG_USERREQ__COUNT_ERRORS, \
- 0, 0, 0, 0); \
- _qyy_res; \
+#define VALGRIND_COUNT_ERRORS \
+ ({unsigned int _qyy_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qyy_res, 0 /* default return */, \
+ VG_USERREQ__COUNT_ERRORS, \
+ 0, 0, 0, 0); \
+ _qyy_res; \
})
/* Mark a block of memory as having been allocated by a malloc()-like
Nb: block must be freed via a free()-like function specified
with VALGRIND_FREELIKE_BLOCK or mismatch errors will occur. */
-#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__MALLOCLIKE_BLOCK, \
- addr, sizeB, rzB, is_zeroed); \
+#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MALLOCLIKE_BLOCK, \
+ addr, sizeB, rzB, is_zeroed); \
}
/* Mark a block of memory as having been freed by a free()-like function.
`rzB' is redzone size; it must match that given to
VALGRIND_MALLOCLIKE_BLOCK. Memory not freed will be detected by the leak
checker. Put it immediately after the point where the block is freed. */
-#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__FREELIKE_BLOCK, \
- addr, rzB, 0, 0); \
+#define VALGRIND_FREELIKE_BLOCK(addr, rzB) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__FREELIKE_BLOCK, \
+ addr, rzB, 0, 0); \
}
/* Create a memory pool. */
-#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__CREATE_MEMPOOL, \
- pool, rzB, is_zeroed, 0); \
+#define VALGRIND_CREATE_MEMPOOL(pool, rzB, is_zeroed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CREATE_MEMPOOL, \
+ pool, rzB, is_zeroed, 0); \
}
/* Destroy a memory pool. */
-#define VALGRIND_DESTROY_MEMPOOL(pool) \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__DESTROY_MEMPOOL, \
- pool, 0, 0, 0); \
+#define VALGRIND_DESTROY_MEMPOOL(pool) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DESTROY_MEMPOOL, \
+ pool, 0, 0, 0); \
}
/* Associate a piece of memory with a memory pool. */
-#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_ALLOC, \
- pool, addr, size, 0); \
+#define VALGRIND_MEMPOOL_ALLOC(pool, addr, size) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_ALLOC, \
+ pool, addr, size, 0); \
}
/* Disassociate a piece of memory from a memory pool. */
-#define VALGRIND_MEMPOOL_FREE(pool, addr) \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__MEMPOOL_FREE, \
- pool, addr, 0, 0); \
+#define VALGRIND_MEMPOOL_FREE(pool, addr) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__MEMPOOL_FREE, \
+ pool, addr, 0, 0); \
}
/* Mark a piece of memory as being a stack. Returns a stack id. */
-#define VALGRIND_STACK_REGISTER(start, end) \
- ({unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__STACK_REGISTER, \
- start, end, 0, 0); \
- _qzz_res; \
+#define VALGRIND_STACK_REGISTER(start, end) \
+ ({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_REGISTER, \
+ start, end, 0, 0); \
+ _qzz_res; \
})
/* Unmark the piece of memory associated with a stack id as being a
stack. */
-#define VALGRIND_STACK_DEREGISTER(id) \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__STACK_DEREGISTER, \
- id, 0, 0, 0); \
+#define VALGRIND_STACK_DEREGISTER(id) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_DEREGISTER, \
+ id, 0, 0, 0); \
}
/* Change the start and end address of the stack id. */
-#define VALGRIND_STACK_CHANGE(id, start, end) \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__STACK_CHANGE, \
- id, start, end, 0); \
+#define VALGRIND_STACK_CHANGE(id, start, end) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__STACK_CHANGE, \
+ id, start, end, 0); \
}
+
+#undef ARCH_x86
+#undef ARCH_amd64
+#undef ARCH_ppc32
+#undef ARCH_ppc64
+
#endif /* __VALGRIND_H */
#define RECORD_OVERLAP_ERROR(s, p_extra) \
{ \
Word unused_res; \
- VALGRIND_MAGIC_SEQUENCE(unused_res, 0, \
- _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR, \
- s, p_extra, 0, 0); \
+ VALGRIND_DO_CLIENT_REQUEST(unused_res, 0, \
+ _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR, \
+ s, p_extra, 0, 0); \
}
static __inline__
#define STRRCHR(soname, fnname) \
- char* VG_REPLACE_FUNCTION(soname,fnname)( const char* s, int c ); \
- char* VG_REPLACE_FUNCTION(soname,fnname)( const char* s, int c ) \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname)( const char* s, int c ); \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname)( const char* s, int c ) \
{ \
UChar ch = (UChar)((UInt)c); \
UChar* p = (UChar*)s; \
#define STRCHR(soname, fnname) \
- char* VG_REPLACE_FUNCTION(soname,fnname) ( const char* s, int c ); \
- char* VG_REPLACE_FUNCTION(soname,fnname) ( const char* s, int c ) \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( const char* s, int c ); \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( const char* s, int c ) \
{ \
UChar ch = (UChar)((UInt)c); \
UChar* p = (UChar*)s; \
#define STRCAT(soname, fnname) \
- char* VG_REPLACE_FUNCTION(soname,fnname) ( char* dst, const char* src ); \
- char* VG_REPLACE_FUNCTION(soname,fnname) ( char* dst, const char* src ) \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( char* dst, const char* src ); \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( char* dst, const char* src ) \
{ \
const Char* src_orig = src; \
Char* dst_orig = dst; \
#define STRNCAT(soname, fnname) \
- char* VG_REPLACE_FUNCTION(soname,fnname) ( char* dst, const char* src, SizeT n ); \
- char* VG_REPLACE_FUNCTION(soname,fnname) ( char* dst, const char* src, SizeT n ) \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ ( char* dst, const char* src, SizeT n ); \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ ( char* dst, const char* src, SizeT n ) \
{ \
const Char* src_orig = src; \
Char* dst_orig = dst; \
#define STRNLEN(soname, fnname) \
- SizeT VG_REPLACE_FUNCTION(soname,fnname) ( const char* str, SizeT n ); \
- SizeT VG_REPLACE_FUNCTION(soname,fnname) ( const char* str, SizeT n ) \
+ SizeT VG_REPLACE_FUNCTION_ZU(soname,fnname) ( const char* str, SizeT n ); \
+ SizeT VG_REPLACE_FUNCTION_ZU(soname,fnname) ( const char* str, SizeT n ) \
{ \
SizeT i = 0; \
while (i < n && str[i] != 0) i++; \
// confusing if you aren't expecting it. Other small functions in this file
// may also be inline by gcc.
#define STRLEN(soname, fnname) \
- SizeT VG_REPLACE_FUNCTION(soname,fnname)( const char* str ); \
- SizeT VG_REPLACE_FUNCTION(soname,fnname)( const char* str ) \
+ SizeT VG_REPLACE_FUNCTION_ZU(soname,fnname)( const char* str ); \
+ SizeT VG_REPLACE_FUNCTION_ZU(soname,fnname)( const char* str ) \
{ \
SizeT i = 0; \
while (str[i] != 0) i++; \
#define STRCPY(soname, fnname) \
- char* VG_REPLACE_FUNCTION(soname, fnname) ( char* dst, const char* src ); \
- char* VG_REPLACE_FUNCTION(soname, fnname) ( char* dst, const char* src ) \
+ char* VG_REPLACE_FUNCTION_ZU(soname, fnname) ( char* dst, const char* src ); \
+ char* VG_REPLACE_FUNCTION_ZU(soname, fnname) ( char* dst, const char* src ) \
{ \
const Char* src_orig = src; \
Char* dst_orig = dst; \
#define STRNCPY(soname, fnname) \
- char* VG_REPLACE_FUNCTION(soname, fnname) ( char* dst, const char* src, SizeT n ); \
- char* VG_REPLACE_FUNCTION(soname, fnname) ( char* dst, const char* src, SizeT n ) \
+ char* VG_REPLACE_FUNCTION_ZU(soname, fnname) \
+ ( char* dst, const char* src, SizeT n ); \
+ char* VG_REPLACE_FUNCTION_ZU(soname, fnname) \
+ ( char* dst, const char* src, SizeT n ) \
{ \
const Char* src_orig = src; \
Char* dst_orig = dst; \
#define STRNCMP(soname, fnname) \
- int VG_REPLACE_FUNCTION(soname,fnname) ( const char* s1, const char* s2, SizeT nmax ); \
- int VG_REPLACE_FUNCTION(soname,fnname) ( const char* s1, const char* s2, SizeT nmax ) \
+ int VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ ( const char* s1, const char* s2, SizeT nmax ); \
+ int VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ ( const char* s1, const char* s2, SizeT nmax ) \
{ \
SizeT n = 0; \
while (True) { \
#define STRCMP(soname, fnname) \
- int VG_REPLACE_FUNCTION(soname,fnname) ( const char* s1, const char* s2 ); \
- int VG_REPLACE_FUNCTION(soname,fnname) ( const char* s1, const char* s2 ) \
+ int VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ ( const char* s1, const char* s2 ); \
+ int VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ ( const char* s1, const char* s2 ) \
{ \
register unsigned char c1; \
register unsigned char c2; \
#define MEMCHR(soname, fnname) \
- void* VG_REPLACE_FUNCTION(soname,fnname) (const void *s, int c, SizeT n); \
- void* VG_REPLACE_FUNCTION(soname,fnname) (const void *s, int c, SizeT n) \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) (const void *s, int c, SizeT n); \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) (const void *s, int c, SizeT n) \
{ \
SizeT i; \
UChar c0 = (UChar)c; \
#define MEMCPY(soname, fnname) \
- void* VG_REPLACE_FUNCTION(soname,fnname)( void *dst, const void *src, SizeT len ); \
- void* VG_REPLACE_FUNCTION(soname,fnname)( void *dst, const void *src, SizeT len ) \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ ( void *dst, const void *src, SizeT len ); \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ ( void *dst, const void *src, SizeT len ) \
{ \
register char *d; \
register char *s; \
#define MEMCMP(soname, fnname) \
- int VG_REPLACE_FUNCTION(soname,fnname)( const void *s1V, const void *s2V, SizeT n ); \
- int VG_REPLACE_FUNCTION(soname,fnname)( const void *s1V, const void *s2V, SizeT n ) \
+ int VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ ( const void *s1V, const void *s2V, SizeT n ); \
+ int VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ ( const void *s1V, const void *s2V, SizeT n ) \
{ \
int res; \
unsigned char a0; \
/* Copy SRC to DEST, returning the address of the terminating '\0' in
DEST. (minor variant of strcpy) */
#define STPCPY(soname, fnname) \
- char* VG_REPLACE_FUNCTION(soname,fnname) ( char* dst, const char* src ); \
- char* VG_REPLACE_FUNCTION(soname,fnname) ( char* dst, const char* src ) \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( char* dst, const char* src ); \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) ( char* dst, const char* src ) \
{ \
const Char* src_orig = src; \
Char* dst_orig = dst; \
#define MEMSET(soname, fnname) \
- void* VG_REPLACE_FUNCTION(soname,fnname)(void *s, Int c, SizeT n); \
- void* VG_REPLACE_FUNCTION(soname,fnname)(void *s, Int c, SizeT n) \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname)(void *s, Int c, SizeT n); \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname)(void *s, Int c, SizeT n) \
{ \
unsigned char *cp = s; \
\
#define MEMMOVE(soname, fnname) \
- void* VG_REPLACE_FUNCTION(soname,fnname)(void *dstV, const void *srcV, SizeT n); \
- void* VG_REPLACE_FUNCTION(soname,fnname)(void *dstV, const void *srcV, SizeT n) \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ (void *dstV, const void *srcV, SizeT n); \
+ void* VG_REPLACE_FUNCTION_ZU(soname,fnname) \
+ (void *dstV, const void *srcV, SizeT n) \
{ \
SizeT i; \
Char* dst = (Char*)dstV; \
/* Find the first occurrence of C in S or the final NUL byte. */
#define GLIBC232_STRCHRNUL(soname, fnname) \
- char* VG_REPLACE_FUNCTION(soname,fnname) (const char* s, int c_in); \
- char* VG_REPLACE_FUNCTION(soname,fnname) (const char* s, int c_in) \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) (const char* s, int c_in); \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) (const char* s, int c_in) \
{ \
unsigned char c = (unsigned char) c_in; \
unsigned char* char_ptr = (unsigned char *)s; \
/* Find the first occurrence of C in S. */
#define GLIBC232_RAWMEMCHR(soname, fnname) \
- char* VG_REPLACE_FUNCTION(soname,fnname) (const char* s, int c_in); \
- char* VG_REPLACE_FUNCTION(soname,fnname) (const char* s, int c_in) \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) (const char* s, int c_in); \
+ char* VG_REPLACE_FUNCTION_ZU(soname,fnname) (const char* s, int c_in) \
{ \
unsigned char c = (unsigned char) c_in; \
unsigned char* char_ptr = (unsigned char *)s; \
_qzz_len bytes. */
#define VALGRIND_MAKE_NOACCESS(_qzz_addr,_qzz_len) \
(__extension__({unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
VG_USERREQ__MAKE_NOACCESS, \
_qzz_addr, _qzz_len, 0, 0); \
_qzz_res; \
for _qzz_len bytes. */
#define VALGRIND_MAKE_WRITABLE(_qzz_addr,_qzz_len) \
(__extension__({unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
VG_USERREQ__MAKE_WRITABLE, \
_qzz_addr, _qzz_len, 0, 0); \
_qzz_res; \
for _qzz_len bytes. */
#define VALGRIND_MAKE_READABLE(_qzz_addr,_qzz_len) \
(__extension__({unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
VG_USERREQ__MAKE_READABLE, \
_qzz_addr, _qzz_len, 0, 0); \
_qzz_res; \
string which is included in any messages pertaining to addresses
within the specified memory range. Has no other effect on the
properties of the memory range. */
-#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
- (__extension__({unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
- VG_USERREQ__CREATE_BLOCK, \
- _qzz_addr, _qzz_len, _qzz_desc, 0); \
- _qzz_res; \
+#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
+ (__extension__({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
+ VG_USERREQ__CREATE_BLOCK, \
+ _qzz_addr, _qzz_len, _qzz_desc, 0); \
+ _qzz_res; \
}))
/* Discard a block-description-handle. Returns 1 for an
invalid handle, 0 for a valid handle. */
#define VALGRIND_DISCARD(_qzz_blkindex) \
(__extension__ ({unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0 /* default return */, \
VG_USERREQ__DISCARD, \
0, _qzz_blkindex, 0, 0); \
_qzz_res; \
If suitable addressibility is not established, Valgrind prints an
error message and returns the address of the first offending byte.
Otherwise it returns zero. */
-#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \
- (__extension__({unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__CHECK_WRITABLE, \
- _qzz_addr, _qzz_len, 0, 0); \
- _qzz_res; \
+#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CHECK_WRITABLE, \
+ _qzz_addr, _qzz_len, 0, 0); \
+ _qzz_res; \
}))
/* Check that memory at _qzz_addr is addressible and defined for
_qzz_len bytes. If suitable addressibility and definedness are not
established, Valgrind prints an error message and returns the
address of the first offending byte. Otherwise it returns zero. */
-#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \
- (__extension__({unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__CHECK_READABLE, \
- _qzz_addr, _qzz_len, 0, 0); \
- _qzz_res; \
+#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \
+ (__extension__({unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__CHECK_READABLE, \
+ _qzz_addr, _qzz_len, 0, 0); \
+ _qzz_res; \
}))
/* Use this macro to force the definedness and addressibility of a
are not established, Valgrind prints an error message and returns
the address of the first offending byte. Otherwise it returns
zero. */
-#define VALGRIND_CHECK_DEFINED(__lvalue) \
- VALGRIND_CHECK_READABLE( \
- (volatile unsigned char *)&(__lvalue), \
+#define VALGRIND_CHECK_DEFINED(__lvalue) \
+ VALGRIND_CHECK_READABLE( \
+ (volatile unsigned char *)&(__lvalue), \
(unsigned int)(sizeof (__lvalue)))
/* Do a memory leak check mid-execution. */
-#define VALGRIND_DO_LEAK_CHECK \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__DO_LEAK_CHECK, \
- 0, 0, 0, 0); \
+#define VALGRIND_DO_LEAK_CHECK \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DO_LEAK_CHECK, \
+ 0, 0, 0, 0); \
}
/* Just display summaries of leaked memory, rather than all the
details */
-#define VALGRIND_DO_QUICK_LEAK_CHECK \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__DO_LEAK_CHECK, \
- 1, 0, 0, 0); \
+#define VALGRIND_DO_QUICK_LEAK_CHECK \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__DO_LEAK_CHECK, \
+ 1, 0, 0, 0); \
}
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
-#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
- {unsigned int _qzz_res; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
- VG_USERREQ__COUNT_LEAKS, \
- &leaked, &dubious, &reachable, &suppressed);\
+#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
+ {unsigned int _qzz_res; \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
+ VG_USERREQ__COUNT_LEAKS, \
+ &leaked, &dubious, &reachable, &suppressed); \
}
(__extension__({unsigned int _qzz_res; \
char* czzsrc = (char*)zzsrc; \
char* czzvbits = (char*)zzvbits; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
VG_USERREQ__GET_VBITS, \
czzsrc, czzvbits, zznbytes,0 ); \
_qzz_res; \
(__extension__({unsigned int _qzz_res; \
char* czzdst = (char*)zzdst; \
char* czzvbits = (char*)zzvbits; \
- VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \
+ VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, \
VG_USERREQ__SET_VBITS, \
czzdst, czzvbits, zznbytes,0 ); \
_qzz_res; \
trivialleak.stderr.exp trivialleak.vgtest \
metadata.stderr.exp metadata.stdout.exp metadata.vgtest-HIDING \
vgtest_ume.stderr.exp vgtest_ume.disabled \
+ wrap1.vgtest wrap1.stdout.exp wrap1.stderr.exp \
+ wrap2.vgtest wrap2.stdout.exp wrap2.stderr.exp \
+ wrap3.vgtest wrap3.stdout.exp wrap3.stderr.exp \
+ wrap4.vgtest wrap4.stdout.exp wrap4.stderr.exp \
+ wrap5.vgtest wrap5.stdout.exp wrap5.stderr.exp \
+ wrap6.vgtest wrap6.stdout.exp wrap6.stderr.exp \
writev.stderr.exp writev.stderr.exp2 writev.stderr.exp3 writev.vgtest \
xml1.stderr.exp xml1.stderr.exp2 xml1.stderr.exp3 \
xml1.stderr.exp64 xml1.stderr.exp64_2 xml1.stdout.exp \
trivialleak \
mismatches new_override metadata \
xml1 \
+ wrap1 wrap2 wrap3 wrap4 wrap5 wrap6 \
writev zeropage
--- /dev/null
+
+#include <stdio.h>
+#include "valgrind.h"
+
+/* The simplest possible wrapping test: just call a wrapped function
+ and check we run the wrapper instead. */
+
+/* The "original" function */
+__attribute__((noinline))
+void actual ( void )
+{
+ printf("in actual\n");
+}
+
+/* The wrapper. Since this executable won't have a soname, we have to
+ use "NONE", since V treats any executable/.so which lacks a soname
+ as if its soname was "NONE". */
+void I_WRAP_SONAME_FNNAME_ZU(NONE,actual) ( void )
+{
+ void* orig;
+ VALGRIND_GET_ORIG_FN(orig);
+ printf("wrapper-pre\n");
+ CALL_FN_v_v(orig);
+ printf("wrapper-post\n");
+}
+
+/* --------------- */
+
+int main ( void )
+{
+ printf("starting\n");
+ actual();
+ return 0;
+}
--- /dev/null
+starting
+wrapper-pre
+in actual
+wrapper-post
--- /dev/null
+prog: wrap1
+vgopts: -q
--- /dev/null
+
+#include <stdio.h>
+#include "valgrind.h"
+
+/* Check that function wrapping works for a recursive function. */
+
+/* This is needed to stop gcc4 turning 'fact' into a loop */
+__attribute__((noinline))
+int mul ( int x, int y ) { return x * y; }
+
+int fact ( int n )
+{
+ if (n == 0) return 1; else return mul(n, fact(n-1));
+}
+
+int I_WRAP_SONAME_FNNAME_ZU(NONE,fact) ( int n )
+{
+ int r;
+ void* orig;
+ VALGRIND_GET_ORIG_FN(orig);
+ printf("in wrapper1-pre: fact(%d)\n", n);
+ CALL_FN_W_W(r, orig, n);
+ printf("in wrapper1-post: fact(%d) = %d\n", n, r);
+ return r;
+}
+
+/* --------------- */
+
+int main ( void )
+{
+ int r;
+ printf("computing fact(5)\n");
+ r = fact(5);
+ printf("fact(5) = %d\n", r);
+ return 0;
+}
--- /dev/null
+computing fact(5)
+in wrapper1-pre: fact(5)
+in wrapper1-pre: fact(4)
+in wrapper1-pre: fact(3)
+in wrapper1-pre: fact(2)
+in wrapper1-pre: fact(1)
+in wrapper1-pre: fact(0)
+in wrapper1-post: fact(0) = 1
+in wrapper1-post: fact(1) = 1
+in wrapper1-post: fact(2) = 2
+in wrapper1-post: fact(3) = 6
+in wrapper1-post: fact(4) = 24
+in wrapper1-post: fact(5) = 120
+fact(5) = 120
--- /dev/null
+prog: wrap2
+vgopts: -q
--- /dev/null
+
+#include <stdio.h>
+#include "valgrind.h"
+
+/* Check that function wrapping works for a mutually recursive
+ pair. */
+
+static int fact1 ( int n );
+static int fact2 ( int n );
+
+/* This is needed to stop gcc4 turning 'fact' into a loop */
+__attribute__((noinline))
+int mul ( int x, int y ) { return x * y; }
+
+int fact1 ( int n )
+{
+ if (n == 0) return 1; else return mul(n, fact2(n-1));
+}
+int fact2 ( int n )
+{
+ if (n == 0) return 1; else return mul(n, fact1(n-1));
+}
+
+
+int I_WRAP_SONAME_FNNAME_ZU(NONE,fact1) ( int n )
+{
+ int r;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ printf("in wrapper1-pre: fact(%d)\n", n);
+ CALL_FN_W_W(r,fn,n);
+ printf("in wrapper1-post: fact(%d) = %d\n", n, r);
+ return r;
+}
+
+int I_WRAP_SONAME_FNNAME_ZU(NONE,fact2) ( int n )
+{
+ int r;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ printf("in wrapper2-pre: fact(%d)\n", n);
+ CALL_FN_W_W(r,fn,n);
+ printf("in wrapper2-post: fact(%d) = %d\n", n, r);
+ return r;
+}
+
+/* --------------- */
+
+int main ( void )
+{
+ int r;
+ printf("computing fact1(5)\n");
+ r = fact1(5);
+ printf("fact1(5) = %d\n", r);
+ return 0;
+}
--- /dev/null
+computing fact1(5)
+in wrapper1-pre: fact(5)
+in wrapper2-pre: fact(4)
+in wrapper1-pre: fact(3)
+in wrapper2-pre: fact(2)
+in wrapper1-pre: fact(1)
+in wrapper2-pre: fact(0)
+in wrapper2-post: fact(0) = 1
+in wrapper1-post: fact(1) = 1
+in wrapper2-post: fact(2) = 2
+in wrapper1-post: fact(3) = 6
+in wrapper2-post: fact(4) = 24
+in wrapper1-post: fact(5) = 120
+fact1(5) = 120
--- /dev/null
+prog: wrap3
+vgopts: -q
--- /dev/null
+
+#include <stdio.h>
+#include "valgrind.h"
+
+/* Check that it's safe to call a wrapped function from some other
+ function's wrapper. Note that because the wrapper for fact1
+ actually interferes with the computation of the result, this
+ program produces a different answer when run on V (162) from
+ natively (120).
+*/
+
+static int fact1 ( int n );
+static int fact2 ( int n );
+
+/* This is needed to stop gcc4 turning 'fact' into a loop */
+__attribute__((noinline))
+int mul ( int x, int y ) { return x * y; }
+
+int fact1 ( int n )
+{
+ if (n == 0) return 1; else return mul(n, fact2(n-1));
+}
+int fact2 ( int n )
+{
+ if (n == 0) return 1; else return mul(n, fact1(n-1));
+}
+
+
+int I_WRAP_SONAME_FNNAME_ZU(NONE,fact1) ( int n )
+{
+ int r;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ printf("in wrapper1-pre: fact(%d)\n", n);
+ CALL_FN_W_W(r, fn, n);
+ printf("in wrapper1-post: fact(%d) = %d\n", n, r);
+ if (n >= 3) r += fact2(2);
+ return r;
+}
+
+int I_WRAP_SONAME_FNNAME_ZU(NONE,fact2) ( int n )
+{
+ int r;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ printf("in wrapper2-pre: fact(%d)\n", n);
+ CALL_FN_W_W(r, fn, n);
+ printf("in wrapper2-post: fact(%d) = %d\n", n, r);
+ return r;
+}
+
+/* --------------- */
+
+int main ( void )
+{
+ int r;
+ printf("computing fact1(5)\n");
+ r = fact1(5);
+ printf("fact1(5) = %d\n", r);
+ return 0;
+}
--- /dev/null
+computing fact1(5)
+in wrapper1-pre: fact(5)
+in wrapper2-pre: fact(4)
+in wrapper1-pre: fact(3)
+in wrapper2-pre: fact(2)
+in wrapper1-pre: fact(1)
+in wrapper2-pre: fact(0)
+in wrapper2-post: fact(0) = 1
+in wrapper1-post: fact(1) = 1
+in wrapper2-post: fact(2) = 2
+in wrapper1-post: fact(3) = 6
+in wrapper2-pre: fact(2)
+in wrapper1-pre: fact(1)
+in wrapper2-pre: fact(0)
+in wrapper2-post: fact(0) = 1
+in wrapper1-post: fact(1) = 1
+in wrapper2-post: fact(2) = 2
+in wrapper2-post: fact(4) = 32
+in wrapper1-post: fact(5) = 160
+in wrapper2-pre: fact(2)
+in wrapper1-pre: fact(1)
+in wrapper2-pre: fact(0)
+in wrapper2-post: fact(0) = 1
+in wrapper1-post: fact(1) = 1
+in wrapper2-post: fact(2) = 2
+fact1(5) = 162
--- /dev/null
+prog: wrap4
+vgopts: -q
--- /dev/null
+
+#include <stdio.h>
+#include <malloc.h>
+#include "valgrind.h"
+
+/* As wrap4.c, but also throw in various calls to another redirected
+ function (malloc) to check that that doesn't screw anything up.
+*/
+
+typedef
+ struct _Lard {
+ struct _Lard* next;
+ char stuff[999];
+ }
+ Lard;
+
+Lard* lard = NULL;
+static int ctr = 0;
+
+void addMoreLard ( void )
+{
+ Lard* p;
+ ctr++;
+ if ((ctr % 3) == 1) {
+ p = malloc(sizeof(Lard));
+ p->next = lard;
+ lard = p;
+ }
+}
+
+
+static int fact1 ( int n );
+static int fact2 ( int n );
+
+/* This is needed to stop gcc4 turning 'fact' into a loop */
+__attribute__((noinline))
+int mul ( int x, int y ) { return x * y; }
+
+int fact1 ( int n )
+{
+ addMoreLard();
+ if (n == 0) return 1; else return mul(n, fact2(n-1));
+}
+int fact2 ( int n )
+{
+ addMoreLard();
+ if (n == 0) return 1; else return mul(n, fact1(n-1));
+}
+
+
+int I_WRAP_SONAME_FNNAME_ZU(NONE,fact1) ( int n )
+{
+ int r;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ printf("in wrapper1-pre: fact(%d)\n", n);
+ addMoreLard();
+ CALL_FN_W_W(r, fn, n);
+ addMoreLard();
+ printf("in wrapper1-post: fact(%d) = %d\n", n, r);
+ if (n >= 3) r += fact2(2);
+ return r;
+}
+
+int I_WRAP_SONAME_FNNAME_ZU(NONE,fact2) ( int n )
+{
+ int r;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ printf("in wrapper2-pre: fact(%d)\n", n);
+ addMoreLard();
+ CALL_FN_W_W(r, fn, n);
+ addMoreLard();
+ printf("in wrapper2-post: fact(%d) = %d\n", n, r);
+ return r;
+}
+
+/* --------------- */
+
+int main ( void )
+{
+ int r;
+ Lard *p, *p_next;
+ printf("computing fact1(7)\n");
+ r = fact1(7);
+ printf("fact1(7) = %d\n", r);
+
+ printf("allocated %d Lards\n", ctr);
+ for (p = lard; p; p = p_next) {
+ p_next = p->next;
+ free(p);
+ }
+
+ return 0;
+}
--- /dev/null
+computing fact1(7)
+in wrapper1-pre: fact(7)
+in wrapper2-pre: fact(6)
+in wrapper1-pre: fact(5)
+in wrapper2-pre: fact(4)
+in wrapper1-pre: fact(3)
+in wrapper2-pre: fact(2)
+in wrapper1-pre: fact(1)
+in wrapper2-pre: fact(0)
+in wrapper2-post: fact(0) = 1
+in wrapper1-post: fact(1) = 1
+in wrapper2-post: fact(2) = 2
+in wrapper1-post: fact(3) = 6
+in wrapper2-pre: fact(2)
+in wrapper1-pre: fact(1)
+in wrapper2-pre: fact(0)
+in wrapper2-post: fact(0) = 1
+in wrapper1-post: fact(1) = 1
+in wrapper2-post: fact(2) = 2
+in wrapper2-post: fact(4) = 32
+in wrapper1-post: fact(5) = 160
+in wrapper2-pre: fact(2)
+in wrapper1-pre: fact(1)
+in wrapper2-pre: fact(0)
+in wrapper2-post: fact(0) = 1
+in wrapper1-post: fact(1) = 1
+in wrapper2-post: fact(2) = 2
+in wrapper2-post: fact(6) = 972
+in wrapper1-post: fact(7) = 6804
+in wrapper2-pre: fact(2)
+in wrapper1-pre: fact(1)
+in wrapper2-pre: fact(0)
+in wrapper2-post: fact(0) = 1
+in wrapper1-post: fact(1) = 1
+in wrapper2-post: fact(2) = 2
+fact1(7) = 6806
+allocated 51 Lards
--- /dev/null
+prog: wrap5
+vgopts: -q
--- /dev/null
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "valgrind.h"
+
+/* Program that checks all numbers of args (0 through 12) work for
+ wrapping. Also calls originals which trash all the iregs in an
+ attempt to shake out any problems caused by insufficient saving of
+ caller-save registers around the hidden call instruction. */
+
+typedef unsigned int UInt;
+
+#define ROL(_x,n) (((_x) << n) | ((UInt)(_x)) >> ((8*sizeof(UInt)-n)))
+
+#define TRASH_IREGS(_rlval, _vec) \
+ do { \
+ UInt* vec = (_vec); \
+ /* x86 spills for v > 4, amd64 for v > 12. */ \
+ UInt i, sum = 0; \
+ UInt v1 = vec[1-1]; \
+ UInt v2 = vec[2-1]; \
+ UInt v3 = vec[3-1]; \
+ UInt v4 = vec[4-1]; \
+ UInt v5 = vec[5-1]; \
+ UInt v6 = vec[6-1]; \
+ UInt v7 = vec[7-1]; \
+ UInt v8 = vec[8-1]; \
+ UInt v9 = vec[9-1]; \
+ UInt v10 = vec[10-1]; \
+ UInt v11 = vec[11-1]; \
+ UInt v12 = vec[12-1]; \
+ for (i = 0; i < 50; i++) { \
+ v1 = ROL(v1,1); \
+ v2 = ROL(v2,2); \
+ v3 = ROL(v3,3); \
+ v4 = ROL(v4,4); \
+ v5 = ROL(v5,5); \
+ v6 = ROL(v6,6); \
+ v7 = ROL(v7,7); \
+ v8 = ROL(v8,8); \
+ v9 = ROL(v9,9); \
+ v10 = ROL(v10,10); \
+ v11 = ROL(v11,11); \
+ v12 = ROL(v12,12); \
+ sum ^= (v1-v2); \
+ sum ^= (v1-v3); \
+ sum ^= (v1-v4); \
+ sum ^= (v1-v5); \
+ sum ^= (v1-v6); \
+ sum ^= (v1-v7); \
+ sum ^= (v1-v8); \
+ sum ^= (v1-v9); \
+ sum ^= (v1-v10); \
+ sum ^= (v1-v11); \
+ sum ^= (v1-v12); \
+ } \
+ _rlval = sum; \
+ } while (0)
+
+/* --------------- 0 --------------- */
+
+UInt fn_0 ( void )
+{
+ UInt r;
+ UInt* words = calloc(200, sizeof(UInt));
+ TRASH_IREGS(r, words);
+ free(words);
+ return r;
+}
+
+UInt I_WRAP_SONAME_FNNAME_ZU(NONE,fn_0) ( UInt a1 )
+{
+ UInt r;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ printf("fn_0 wrapper pre ()\n");
+ CALL_FN_W_v(r, fn);
+ printf("fn_0 wrapper post1 = %d\n", (int)r);
+ CALL_FN_v_v(fn);
+ printf("fn_0 wrapper post2 = %d\n", (int)r);
+ return r;
+}
+
+/* --------------- 1 --------------- */
+
+UInt fn_1 ( UInt a1 )
+{
+ UInt r;
+ UInt* words = calloc(200, sizeof(UInt));
+ words[1-1] = a1;
+ TRASH_IREGS(r, words);
+ free(words);
+ return r;
+}
+
+UInt I_WRAP_SONAME_FNNAME_ZU(NONE,fn_1) ( UInt a1 )
+{
+ UInt r;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ printf("fn_1 wrapper pre ( %d )\n", (int)a1);
+ CALL_FN_W_W(r, fn, a1);
+ printf("fn_1 wrapper post1 = %d\n", (int)r);
+ CALL_FN_v_W(fn, a1);
+ printf("fn_1 wrapper post2 = %d\n", (int)r);
+ return r;
+}
+
+/* --------------- 2 --------------- */
+
+UInt fn_2 ( UInt a1, UInt a2 )
+{
+ UInt r;
+ UInt* words = calloc(200, sizeof(UInt));
+ words[1-1] = a1;
+ words[2-1] = a2;
+ TRASH_IREGS(r, words);
+ free(words);
+ return r;
+}
+
+UInt I_WRAP_SONAME_FNNAME_ZU(NONE,fn_2) ( UInt a1, UInt a2 )
+{
+ UInt r;
+ void* fn;
+ VALGRIND_GET_ORIG_FN(fn);
+ printf("fn_2 wrapper pre ( %d, %d )\n", (int)a1, (int)a2);
+ CALL_FN_W_WW(r, fn, a1, a2);
+ printf("fn_2 wrapper post1 = %d\n", (int)r);
+ CALL_FN_v_WW(fn, a1, a2);
+ printf("fn_2 wrapper post2 = %d\n", (int)r);
+ return r;
+}
+
+/* --------------- main --------------- */
+
+int main ( void )
+{
+ UInt w;
+
+ printf("fn_0 ...\n");
+ w = fn_0();
+ printf(" ... %d\n\n", (int)w);
+
+ printf("fn_1 ...\n");
+ w = fn_1(42);
+ printf(" ... %d\n\n", (int)w);
+
+ printf("fn_2 ...\n");
+ w = fn_2(42,43);
+ printf(" ... %d\n\n", (int)w);
+
+ return 0;
+}
+
--- /dev/null
+fn_0 ...
+fn_0 wrapper pre ()
+fn_0 wrapper post1 = 0
+fn_0 wrapper post2 = 0
+ ... 0
+
+fn_1 ...
+fn_1 wrapper pre ( 42 )
+fn_1 wrapper post1 = -13631437
+fn_1 wrapper post2 = -13631437
+ ... -13631437
+
+fn_2 ...
+fn_2 wrapper pre ( 42, 43 )
+fn_2 wrapper post1 = 201956282
+fn_2 wrapper post2 = 201956282
+ ... 201956282
+
--- /dev/null
+prog: wrap6
+vgopts: -q
--trace-signals=no|yes show signal handling details? [no]
--trace-symtab=no|yes show symbol table details? [no]
--trace-cfi=no|yes show call-frame-info details? [no]
+ --trace-redir=no|yes show redirection details? [no]
--trace-sched=no|yes show thread scheduler details? [no]
--wait-for-gdb=yes|no pause on startup to wait for gdb attach
--command-line-only=no|yes only use command line options [no]