From 7a36f601331a70949f3ff12cc33b2be98a23e796 Mon Sep 17 00:00:00 2001 From: Julian Seward Date: Fri, 12 Apr 2002 11:12:52 +0000 Subject: [PATCH] Mega-merge of my last 2 weeks hacking. This basically does the groundwork for pthread_* support. Major changes: * Valgrind now contains a (skeletal!) user-space pthreads implementation. The exciting bits are in new file vg_scheduler.c. This contains thread management and scheduling, including nasty crud to do with making some syscalls (read,write,nanosleep) nonblocking. Also implementation of pthread_ functions: create join mutex_{create,destroy,lock,unlock} and cancel. * As a side effect of the above, major improvements to signal handling and to the client-request machinery. This is now used to intercept malloc/free etc too; the hacky way this is done before is gone. Another side effect is that vg_dispatch.S is greatly simplified. Also, the horrible hacks to do with delivering signals to threads blocked in syscalls are gone, since the new mechanisms cover this case easily. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@52 --- Makefile.am | 5 +- addrcheck/Makefile.am | 5 +- cachegrind/Makefile.am | 5 +- configure.in | 2 +- corecheck/Makefile.am | 5 +- coregrind/Makefile.am | 5 +- coregrind/arch/x86-linux/vg_syscall.S | 74 ---- coregrind/valgrind.in | 8 +- coregrind/vg_clientmalloc.c | 404 +----------------- coregrind/vg_constants.h | 31 +- coregrind/vg_dispatch.S | 324 +++++---------- coregrind/vg_from_ucode.c | 90 ++-- coregrind/vg_helpers.S | 54 --- coregrind/vg_include.h | 344 +++++++++++---- coregrind/vg_kerneliface.h | 106 +++++ coregrind/vg_main.c | 340 ++------------- coregrind/vg_memory.c | 21 +- coregrind/vg_mylibc.c | 51 ++- coregrind/vg_signals.c | 431 ++++++++----------- coregrind/vg_syscall.S | 74 ---- coregrind/vg_to_ucode.c | 88 ++-- coregrind/vg_translate.c | 20 +- coregrind/vg_transtab.c | 6 +- helgrind/Makefile.am | 5 +- include/valgrind.h | 139 +++++-- lackey/Makefile.am | 5 +- memcheck/Makefile.am | 5 +- none/Makefile.am | 5 +- tests/blocked_syscall.c | 2 +- valgrind.h | 139 +++++-- valgrind.in | 8 +- vg_clientmalloc.c | 404 +----------------- vg_clientperms.c | 68 +-- vg_constants.h | 31 +- vg_dispatch.S | 324 +++++---------- vg_from_ucode.c | 90 ++-- vg_helpers.S | 54 --- vg_include.h | 344 +++++++++++---- vg_kerneliface.h | 106 +++++ vg_main.c | 340 ++------------- vg_memory.c | 21 +- vg_mylibc.c | 51 ++- vg_signals.c | 431 ++++++++----------- vg_syscall.S | 74 ---- vg_syscall_mem.c | 574 ++++++++++++++------------ vg_to_ucode.c | 88 ++-- vg_translate.c | 20 +- vg_transtab.c | 6 +- 48 files changed, 2327 insertions(+), 3500 deletions(-) diff --git a/Makefile.am b/Makefile.am index 4f0034ab54..9e73341c85 100644 --- a/Makefile.am +++ b/Makefile.am @@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so valgrinq_so_SOURCES = vg_valgrinq_dummy.c valgrind_so_SOURCES = \ + vg_scheduler.c \ vg_clientmalloc.c \ vg_clientperms.c \ vg_demangle.c \ @@ -70,10 +71,10 @@ noinst_HEADERS = \ vg_unsafe.h -vg_memory.o: vg_memory.c +vg_memory.o: vg_memory.c $(noinst_HEADERS) $(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $< -vg_clientmalloc.o: vg_clientmalloc.c +vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS) $(COMPILE) -fno-omit-frame-pointer -c $< diff --git a/addrcheck/Makefile.am b/addrcheck/Makefile.am index 4f0034ab54..9e73341c85 100644 --- a/addrcheck/Makefile.am +++ b/addrcheck/Makefile.am @@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so valgrinq_so_SOURCES = vg_valgrinq_dummy.c valgrind_so_SOURCES = \ + vg_scheduler.c \ vg_clientmalloc.c \ vg_clientperms.c \ vg_demangle.c \ @@ -70,10 +71,10 @@ noinst_HEADERS = \ vg_unsafe.h -vg_memory.o: vg_memory.c +vg_memory.o: vg_memory.c $(noinst_HEADERS) $(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $< -vg_clientmalloc.o: vg_clientmalloc.c +vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS) $(COMPILE) -fno-omit-frame-pointer -c $< diff --git a/cachegrind/Makefile.am b/cachegrind/Makefile.am index 4f0034ab54..9e73341c85 100644 --- a/cachegrind/Makefile.am +++ b/cachegrind/Makefile.am @@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so valgrinq_so_SOURCES = vg_valgrinq_dummy.c valgrind_so_SOURCES = \ + vg_scheduler.c \ vg_clientmalloc.c \ vg_clientperms.c \ vg_demangle.c \ @@ -70,10 +71,10 @@ noinst_HEADERS = \ vg_unsafe.h -vg_memory.o: vg_memory.c +vg_memory.o: vg_memory.c $(noinst_HEADERS) $(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $< -vg_clientmalloc.o: vg_clientmalloc.c +vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS) $(COMPILE) -fno-omit-frame-pointer -c $< diff --git a/configure.in b/configure.in index d3b223c940..1c77f23fa0 100644 --- a/configure.in +++ b/configure.in @@ -1,7 +1,7 @@ # Process this file with autoconf to produce a configure script. AC_INIT(vg_clientmalloc.c) AM_CONFIG_HEADER(config.h) -AM_INIT_AUTOMAKE(valgrind, 20020329) +AM_INIT_AUTOMAKE(valgrind, 20020412) AM_MAINTAINER_MODE diff --git a/corecheck/Makefile.am b/corecheck/Makefile.am index 4f0034ab54..9e73341c85 100644 --- a/corecheck/Makefile.am +++ b/corecheck/Makefile.am @@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so valgrinq_so_SOURCES = vg_valgrinq_dummy.c valgrind_so_SOURCES = \ + vg_scheduler.c \ vg_clientmalloc.c \ vg_clientperms.c \ vg_demangle.c \ @@ -70,10 +71,10 @@ noinst_HEADERS = \ vg_unsafe.h -vg_memory.o: vg_memory.c +vg_memory.o: vg_memory.c $(noinst_HEADERS) $(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $< -vg_clientmalloc.o: vg_clientmalloc.c +vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS) $(COMPILE) -fno-omit-frame-pointer -c $< diff --git a/coregrind/Makefile.am b/coregrind/Makefile.am index 4f0034ab54..9e73341c85 100644 --- a/coregrind/Makefile.am +++ b/coregrind/Makefile.am @@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so valgrinq_so_SOURCES = vg_valgrinq_dummy.c valgrind_so_SOURCES = \ + vg_scheduler.c \ vg_clientmalloc.c \ vg_clientperms.c \ vg_demangle.c \ @@ -70,10 +71,10 @@ noinst_HEADERS = \ vg_unsafe.h -vg_memory.o: vg_memory.c +vg_memory.o: vg_memory.c $(noinst_HEADERS) $(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $< -vg_clientmalloc.o: vg_clientmalloc.c +vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS) $(COMPILE) -fno-omit-frame-pointer -c $< diff --git a/coregrind/arch/x86-linux/vg_syscall.S b/coregrind/arch/x86-linux/vg_syscall.S index 210328a690..c07d95567c 100644 --- a/coregrind/arch/x86-linux/vg_syscall.S +++ b/coregrind/arch/x86-linux/vg_syscall.S @@ -41,10 +41,6 @@ # m_state_static, and back afterwards. VG_(do_syscall): - cmpl $2, VG_(syscall_depth) - jz do_syscall_DEPTH_2 - - # depth 1 copy follows ... # Save all the int registers of the real machines state on the # simulators stack. pushal @@ -104,76 +100,6 @@ VG_(do_syscall): ret - - - - - - - -do_syscall_DEPTH_2: - - # depth 2 copy follows ... - # Save all the int registers of the real machines state on the - # simulators stack. - pushal - - # and save the real FPU state too - fwait - fnsave VG_(real_fpu_state_saved_over_syscall_d2) - frstor VG_(real_fpu_state_saved_over_syscall_d2) - - # remember what the simulators stack pointer is - movl %esp, VG_(esp_saved_over_syscall_d2) - - # Now copy the simulated machines state into the real one - # esp still refers to the simulators stack - frstor VG_(m_state_static)+40 - movl VG_(m_state_static)+32, %eax - pushl %eax - popfl - movl VG_(m_state_static)+0, %eax - movl VG_(m_state_static)+4, %ecx - movl VG_(m_state_static)+8, %edx - movl VG_(m_state_static)+12, %ebx - movl VG_(m_state_static)+16, %esp - movl VG_(m_state_static)+20, %ebp - movl VG_(m_state_static)+24, %esi - movl VG_(m_state_static)+28, %edi - - # esp now refers to the simulatees stack - # Do the actual system call - int $0x80 - - # restore stack as soon as possible - # esp refers to simulatees stack - movl %esp, VG_(m_state_static)+16 - movl VG_(esp_saved_over_syscall_d2), %esp - # esp refers to simulators stack - - # ... and undo everything else. - # Copy real state back to simulated state. - movl %eax, VG_(m_state_static)+0 - movl %ecx, VG_(m_state_static)+4 - movl %edx, VG_(m_state_static)+8 - movl %ebx, VG_(m_state_static)+12 - movl %ebp, VG_(m_state_static)+20 - movl %esi, VG_(m_state_static)+24 - movl %edi, VG_(m_state_static)+28 - pushfl - popl %eax - movl %eax, VG_(m_state_static)+32 - fwait - fnsave VG_(m_state_static)+40 - frstor VG_(m_state_static)+40 - - # Restore the state of the simulator - frstor VG_(real_fpu_state_saved_over_syscall_d2) - popal - - ret - - ##--------------------------------------------------------------------## ##--- end vg_syscall.S ---## ##--------------------------------------------------------------------## diff --git a/coregrind/valgrind.in b/coregrind/valgrind.in index 0b566c94cf..bc4594004c 100755 --- a/coregrind/valgrind.in +++ b/coregrind/valgrind.in @@ -164,8 +164,10 @@ fi VG_ARGS="$VALGRIND_OPTS $vgsupp $vgopts" export VG_ARGS -LD_PRELOAD=$VALGRIND/valgrind.so:$LD_PRELOAD +LD_LIBRARY_PATH=$VALGRIND:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH +LD_PRELOAD=valgrind.so:$LD_PRELOAD export LD_PRELOAD +#LD_DEBUG=files +#export LD_DEBUG exec $argopts - - diff --git a/coregrind/vg_clientmalloc.c b/coregrind/vg_clientmalloc.c index d2be752d09..d59a029a19 100644 --- a/coregrind/vg_clientmalloc.c +++ b/coregrind/vg_clientmalloc.c @@ -250,10 +250,9 @@ static ShadowChunk* client_malloc_shadow ( UInt align, UInt size, /* Allocate memory, noticing whether or not we are doing the full instrumentation thing. */ -void* VG_(client_malloc) ( UInt size, UInt raw_alloc_kind ) +void* VG_(client_malloc) ( UInt size, VgAllocKind kind ) { ShadowChunk* sc; - VgAllocKind kind; VGP_PUSHCC(VgpCliMalloc); client_malloc_init(); @@ -263,21 +262,15 @@ void* VG_(client_malloc) ( UInt size, UInt raw_alloc_kind ) count_freelist(), vg_freed_list_volume, size, raw_alloc_kind ); # endif + + vg_cmalloc_n_mallocs ++; + vg_cmalloc_bs_mallocd += size; + if (!VG_(clo_instrument)) { VGP_POPCC; return VG_(malloc) ( VG_AR_CLIENT, size ); } - switch (raw_alloc_kind) { - case 0x4002: kind = Vg_AllocNewVec; break; - case 0x4001: kind = Vg_AllocNew; break; - case 0x4000: /* malloc */ - case 6666: /* calloc */ - kind = Vg_AllocMalloc; break; - default: /* should not happen */ - /* therefore we make sure it doesn't -- JRS */ - VG_(panic)("VG_(client_malloc): raw_alloc_kind"); - break; /*NOTREACHED*/ - } + sc = client_malloc_shadow ( 0, size, kind ); VGP_POPCC; return (void*)(sc->data); @@ -295,6 +288,10 @@ void* VG_(client_memalign) ( UInt align, UInt size ) count_freelist(), vg_freed_list_volume, align, size ); # endif + + vg_cmalloc_n_mallocs ++; + vg_cmalloc_bs_mallocd += size; + if (!VG_(clo_instrument)) { VGP_POPCC; return VG_(malloc_aligned) ( VG_AR_CLIENT, align, size ); @@ -305,11 +302,10 @@ void* VG_(client_memalign) ( UInt align, UInt size ) } -void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind ) +void VG_(client_free) ( void* ptrV, VgAllocKind kind ) { ShadowChunk* sc; UInt ml_no; - VgAllocKind kind; VGP_PUSHCC(VgpCliMalloc); client_malloc_init(); @@ -319,6 +315,9 @@ void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind ) count_freelist(), vg_freed_list_volume, ptrV, raw_alloc_kind ); # endif + + vg_cmalloc_n_frees ++; + if (!VG_(clo_instrument)) { VGP_POPCC; VG_(free) ( VG_AR_CLIENT, ptrV ); @@ -340,16 +339,6 @@ void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind ) return; } - switch (raw_alloc_kind) { - case 0x5002: kind = Vg_AllocNewVec; break; - case 0x5001: kind = Vg_AllocNew; break; - case 0x5000: - default: - kind = Vg_AllocMalloc; - /* should only happen if bug in client code */ - break; - } - /* check if its a matching free() / delete / delete [] */ if (kind != sc->allockind) VG_(record_freemismatch_error) ( (Addr) ptrV ); @@ -386,6 +375,9 @@ void* VG_(client_calloc) ( UInt nmemb, UInt size1 ) nmemb, size1 ); # endif + vg_cmalloc_n_mallocs ++; + vg_cmalloc_bs_mallocd += nmemb * size1; + if (!VG_(clo_instrument)) { VGP_POPCC; return VG_(calloc) ( VG_AR_CLIENT, nmemb, size1 ); @@ -430,6 +422,10 @@ void* VG_(client_realloc) ( void* ptrV, UInt size_new ) ptrV, size_new ); # endif + vg_cmalloc_n_frees ++; + vg_cmalloc_n_mallocs ++; + vg_cmalloc_bs_mallocd += size_new; + if (!VG_(clo_instrument)) { vg_assert(ptrV != NULL && size_new != 0); VGP_POPCC; @@ -573,364 +569,6 @@ void VG_(describe_addr) ( Addr a, AddrInfo* ai ) return; } -/*------------------------------------------------------------*/ -/*--- Replace the C library versions with our own. Hairy. ---*/ -/*------------------------------------------------------------*/ - -/* Below are new versions of malloc, __builtin_new, free, - __builtin_delete, calloc and realloc. - - malloc, __builtin_new, free, __builtin_delete, calloc and realloc - can be entered either on the real CPU or the simulated one. If on - the real one, this is because the dynamic linker is running the - static initialisers for C++, before starting up Valgrind itself. - In this case it is safe to route calls through to - VG_(malloc)/vg_free, since that is self-initialising. - - Once Valgrind is initialised, vg_running_on_simd_CPU becomes True. - The call needs to be transferred from the simulated CPU back to the - real one and routed to the vg_client_* functions. To do that, the - args are passed to vg_trap_here, which the simulator detects. The - bogus epilogue fn call is to guarantee that gcc doesn't tailcall - vg_trap_here, since that would cause the simulator's detection to - fail -- it only checks the targets of call transfers, not jumps. - And of course we have to be sure gcc won't inline either the - vg_trap_here or vg_bogus_epilogue. Ha ha ha. What a mess. -*/ - -/* Place afterwards to guarantee it won't get inlined ... */ -static UInt vg_trap_here_WRAPPER ( UInt arg1, UInt arg2, UInt what_to_do ); -static void vg_bogus_epilogue ( void ); - -/* ALL calls to malloc wind up here. */ -void* malloc ( UInt n ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("malloc[simd=%d](%d)", - (UInt)VG_(running_on_simd_CPU), n ); - - if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; } - - vg_cmalloc_n_mallocs ++; - vg_cmalloc_bs_mallocd += n; - - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4000 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(malloc)(VG_AR_CLIENT, n); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } -} - -void* __builtin_new ( UInt n ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("__builtin_new[simd=%d](%d)", - (UInt)VG_(running_on_simd_CPU), n ); - - if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; } - - vg_cmalloc_n_mallocs++; - vg_cmalloc_bs_mallocd += n; - - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4001 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(malloc)(VG_AR_CLIENT, n); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return v; - } -} - -void* __builtin_vec_new ( Int n ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("__builtin_vec_new[simd=%d](%d)", - (UInt)VG_(running_on_simd_CPU), n ); - - if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; } - - vg_cmalloc_n_mallocs++; - vg_cmalloc_bs_mallocd += n; - - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4002 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(malloc)(VG_AR_CLIENT, n); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return v; - } -} - -void free ( void* p ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("free[simd=%d](%p)\n", - (UInt)VG_(running_on_simd_CPU), p ); - vg_cmalloc_n_frees ++; - - if (p == NULL) - return; - if (VG_(running_on_simd_CPU)) { - (void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5000 ); - vg_bogus_epilogue(); - } else { - VG_(free)(VG_AR_CLIENT, p); - } -} - -void __builtin_delete ( void* p ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("__builtin_delete[simd=%d](%p)\n", - (UInt)VG_(running_on_simd_CPU), p ); - vg_cmalloc_n_frees ++; - - if (p == NULL) - return; - if (VG_(running_on_simd_CPU)) { - (void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5001 ); - vg_bogus_epilogue(); - } else { - VG_(free)(VG_AR_CLIENT, p); - } -} - -void __builtin_vec_delete ( void* p ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("__builtin_vec_delete[simd=%d](%p)\n", - (UInt)VG_(running_on_simd_CPU), p ); - vg_cmalloc_n_frees ++; - - if (p == NULL) - return; - if (VG_(running_on_simd_CPU)) { - (void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5002 ); - vg_bogus_epilogue(); - } else { - VG_(free)(VG_AR_CLIENT, p); - } -} - -void* calloc ( UInt nmemb, UInt size ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("calloc[simd=%d](%d,%d)", - (UInt)VG_(running_on_simd_CPU), nmemb, size ); - vg_cmalloc_n_mallocs ++; - vg_cmalloc_bs_mallocd += size * nmemb; - - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( nmemb, size, 6666 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(calloc)(VG_AR_CLIENT, nmemb, size); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return v; - } -} - -void* realloc ( void* ptrV, UInt new_size ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("realloc[simd=%d](%p,%d)", - (UInt)VG_(running_on_simd_CPU), ptrV, new_size ); - - if (VG_(clo_sloppy_malloc)) - { while ((new_size % 4) > 0) new_size++; } - - vg_cmalloc_n_frees ++; - vg_cmalloc_n_mallocs ++; - vg_cmalloc_bs_mallocd += new_size; - - if (ptrV == NULL) - return malloc(new_size); - if (new_size == 0) { - free(ptrV); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = 0\n" ); - return NULL; - } - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( (UInt)ptrV, new_size, 7777 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(realloc)(VG_AR_CLIENT, ptrV, new_size); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return v; - } -} - -void* memalign ( Int alignment, Int n ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("memalign[simd=%d](al %d, size %d)", - (UInt)VG_(running_on_simd_CPU), alignment, n ); - - if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; } - - vg_cmalloc_n_mallocs ++; - vg_cmalloc_bs_mallocd += n; - - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( alignment, n, 8888 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(malloc_aligned)(VG_AR_CLIENT, alignment, n); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } -} - -void* valloc ( Int size ) -{ - return memalign(VKI_BYTES_PER_PAGE, size); -} - - -/* Various compatibility wrapper functions, for glibc and libstdc++. */ -void cfree ( void* p ) -{ - free ( p ); -} - -void* mallinfo ( void ) -{ - VG_(message)(Vg_UserMsg, - "Warning: incorrectly-handled call to mallinfo()"); - return NULL; -} - - - -int mallopt ( int cmd, int value ) -{ - /* In glibc-2.2.4, 1 denoted a successful return value for mallopt */ - return 1; -} - - -/* Bomb out if we get any of these. */ -void pvalloc ( void ) -{ VG_(panic)("call to pvalloc\n"); } - -void malloc_stats ( void ) -{ VG_(panic)("call to malloc_stats\n"); } -void malloc_usable_size ( void ) -{ VG_(panic)("call to malloc_usable_size\n"); } -void malloc_trim ( void ) -{ VG_(panic)("call to malloc_trim\n"); } -void malloc_get_state ( void ) -{ VG_(panic)("call to malloc_get_state\n"); } -void malloc_set_state ( void ) -{ VG_(panic)("call to malloc_set_state\n"); } - - -int __posix_memalign ( void **memptr, UInt alignment, UInt size ) -{ - void *mem; - - /* Test whether the SIZE argument is valid. It must be a power of - two multiple of sizeof (void *). */ - if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0) - return 22 /*EINVAL*/; - - mem = memalign (alignment, size); - - if (mem != NULL) { - *memptr = mem; - return 0; - } - - return 12 /*ENOMEM*/; -} - - -/*------------------------------------------------------------*/ -/*--- Magic supporting hacks. ---*/ -/*------------------------------------------------------------*/ - -extern UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do ); - -static -UInt vg_trap_here_WRAPPER ( UInt arg1, UInt arg2, UInt what_to_do ) -{ - /* The point of this idiocy is to make a plain, ordinary call to - vg_trap_here which vg_dispatch_when_CALL can spot. Left to - itself, with -fpic, gcc generates "call vg_trap_here@PLT" which - doesn't get spotted, for whatever reason. I guess I could check - _all_ control flow transfers, but that would be an undesirable - performance overhead. - - If you compile without -fpic, gcc generates the obvious call - insn, so the wrappers below will work if they just call - vg_trap_here. But I don't want to rule out building with -fpic, - hence this hack. Sigh. - */ - UInt v; - -# define WHERE_TO VG_(trap_here) -# define STRINGIFY(xx) __STRING(xx) - - asm("# call to vg_trap_here\n" - "\t pushl %3\n" - "\t pushl %2\n" - "\t pushl %1\n" - "\t call " STRINGIFY(WHERE_TO) "\n" - "\t addl $12, %%esp\n" - "\t movl %%eax, %0\n" - : "=r" (v) - : "r" (arg1), "r" (arg2), "r" (what_to_do) - : "eax", "esp", "cc", "memory"); - return v; - -# undef WHERE_TO -# undef STRINGIFY -} - -/* Last, but not least ... */ -void vg_bogus_epilogue ( void ) -{ - /* Runs on simulated CPU only. */ -} - -UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do ) -{ - /* Calls to this fn are detected in vg_dispatch.S and are handled - specially. So this fn should never be entered. */ - VG_(panic)("vg_trap_here called!"); - return 0; /*NOTREACHED*/ -} - /*--------------------------------------------------------------------*/ /*--- end vg_clientmalloc.c ---*/ diff --git a/coregrind/vg_constants.h b/coregrind/vg_constants.h index ef48ef009d..b1b1b32e7d 100644 --- a/coregrind/vg_constants.h +++ b/coregrind/vg_constants.h @@ -50,23 +50,18 @@ #define VGP_(str) VGAPPEND(vgProf_,str) #define VGOFF_(str) VGAPPEND(vgOff_,str) -/* Reasons why the inner simulation loop might stop (i.e. why has - vg_dispatch_ctr reached zero? */ -#define VG_Y_SIGCHECK 0 /* signal check due */ -#define VG_Y_SMC 1 /* write to code detected */ -#define VG_Y_EXIT 2 /* natural or debug end to simulation */ -#define VG_Y_TRANSLATE 3 /* translation of vg_m_eip needed */ - -/* Check for pending signals every this-many jumps. Since this - happens in the region of once per millisecond, we also take the - opportunity do do a bit of quick sanity checking at the same time. - Look at the call sites of VG_(deliver_signals). */ -#define VG_SIGCHECK_INTERVAL 1000 - -/* A ,agic values that %ebp might be set to when returning to the + +/* Magic values that %ebp might be set to when returning to the dispatcher. The only other legitimate value is to point to the - start of VG_(baseBlock). */ -#define VG_EBP_DISPATCH_CHECKED 17 + start of VG_(baseBlock). These also are return values from + VG_(run_innerloop) to the scheduler. */ +#define VG_TRC_EBP_JMP_SPECIAL 17 +#define VG_TRC_EBP_JMP_SYSCALL 19 +#define VG_TRC_EBP_JMP_CLIENTREQ 23 + +#define VG_TRC_INNER_COUNTERZERO 29 /* ebp can't have this; sched return only */ +#define VG_TRC_INNER_FASTMISS 31 /* ditto. Means fast-cache miss. */ +#define VG_TRC_UNRESUMABLE_SIGNAL 37 /* ditto; got sigsegv/sigbus */ /* Debugging hack for assembly code ... sigh. */ #if 0 @@ -75,12 +70,13 @@ #define OYNK(nnn) #endif -#if 1 +#if 0 #define OYNNK(nnn) pushal; pushl $nnn; call VG_(oynk) ; addl $4,%esp; popal #else #define OYNNK(nnn) #endif + /* Constants for the fast translation lookup cache. */ #define VG_TT_FAST_BITS 15 #define VG_TT_FAST_SIZE (1 << VG_TT_FAST_BITS) @@ -88,6 +84,7 @@ /* Constants for the fast original-code-write check cache. */ + /* Usually you want this to be zero. */ #define VG_SMC_FASTCHECK_IN_C 0 diff --git a/coregrind/vg_dispatch.S b/coregrind/vg_dispatch.S index 52231946e2..0f4783ba0e 100644 --- a/coregrind/vg_dispatch.S +++ b/coregrind/vg_dispatch.S @@ -61,8 +61,15 @@ .globl VG_(run_innerloop) VG_(run_innerloop): #OYNK(1000) + # ----- entry point to VG_(run_innerloop) ----- - pushal + pushl %ebx + pushl %ecx + pushl %edx + pushl %esi + pushl %edi + pushl %ebp + # Set up the baseBlock pointer movl $VG_(baseBlock), %ebp @@ -70,19 +77,19 @@ VG_(run_innerloop): movl VGOFF_(m_eip), %esi movl (%ebp, %esi, 4), %eax - # fall thru to vg_dispatch + # Start off dispatching paranoically, since we no longer have + # any indication whether or not this might be a special call/ret + # transfer. + jmp dispatch_callret_maybe -.globl VG_(dispatch) -VG_(dispatch): - # %eax holds destination (original) address - # To signal any kind of interruption, set vg_dispatch_ctr - # to 1, and vg_interrupt_reason to the appropriate value - # before jumping here. - + +dispatch_main: + # Jump here to do a new dispatch. + # %eax holds destination (original) address. # %ebp indicates further details of the control transfer # requested to the address in %eax. The idea is that we # want to check all jump targets to see if they are either - # VG_(signalreturn_bogusRA) or VG_(trap_here), both of which + # VG_(signalreturn_bogusRA) or VG_(shutdown), both of which # require special treatment. However, testing all branch # targets is expensive, and anyway in most cases JITter knows # that a jump cannot be to either of these two. We therefore @@ -92,37 +99,33 @@ VG_(dispatch): # this is a jump for which the JITter knows no check need be # made. # - # If it is ebp == VG_EBP_DISPATCH_CHECKED, we had better make + # If ebp == VG_EBP_JMP_CALLRET, we had better make # the check. # + # If ebp == VG_EBP_JMP_SYSCALL, do a system call before + # continuing at eax. + # + # If ebp == VG_EBP_JMP_CLIENTREQ, do a client request before + # continuing at eax. + # # If %ebp has any other value, we panic. # # What the JITter assumes is that VG_(signalreturn_bogusRA) can # only be arrived at from an x86 ret insn, and dually that - # VG_(trap_here) can only be arrived at from an x86 call insn. + # VG_(shutdown) can only be arrived at from an x86 call insn. # The net effect is that all call and return targets are checked # but straightforward jumps are not. - # - # Thinks ... is this safe if the client happens to tailcall - # VG_(trap_here) ? I dont think that can happen -- if it did - # it would be a problem. - # + cmpl $VG_(baseBlock), %ebp - jnz dispatch_checked_maybe + jnz dispatch_exceptional -dispatch_unchecked: +dispatch_boring: # save the jump address at VG_(baseBlock)[VGOFF_(m_eip)], - # so that if this block takes a fault, we later know where we were. movl VGOFF_(m_eip), %esi movl %eax, (%ebp, %esi, 4) - # do we require attention? - # this check has to be after the call/ret transfer checks, because - # we have to ensure that any control transfer following a syscall - # return is an ordinary transfer. By the time we get here, we have - # established that the next transfer, which might get delayed till - # after a syscall return, is an ordinary one. - # All a bit subtle ... + # do a timeslice check. + # are we out of timeslice? If yes, defer to scheduler. #OYNK(1001) decl VG_(dispatch_ctr) jz counter_is_zero @@ -136,243 +139,102 @@ dispatch_unchecked: # ebx points at a tt entry # now compare target with the tte.orig_addr field (+0) cmpl %eax, (%ebx) - jnz full_search + jnz fast_lookup_failed + # Found a match. Set the tte.mru_epoch field (+8) # and call the tte.trans_addr field (+4) movl VG_(current_epoch), %ecx movl %ecx, 8(%ebx) call *4(%ebx) - jmp VG_(dispatch) + jmp dispatch_main -full_search: - #no luck? try the full table search - pushl %eax - call VG_(search_transtab) - addl $4, %esp - - # %eax has trans addr or zero - cmpl $0, %eax - jz need_translation - # full table search also zeroes the tte.last_use field, - # so we dont have to do so here. - call *%eax - jmp VG_(dispatch) +fast_lookup_failed: + # %EIP is up to date here since dispatch_boring dominates + movl $VG_TRC_INNER_FASTMISS, %eax + jmp run_innerloop_exit -need_translation: - OYNK(1003) - movl $VG_Y_TRANSLATE, VG_(interrupt_reason) counter_is_zero: - OYNK(1004) - popal - # ----- (the only) exit point from VG_(run_innerloop) ----- - # ----- unless of course vg_oursignalhandler longjmp()s - # ----- back through it, due to an unmanagable signal - ret + # %EIP is up to date here since dispatch_boring dominates + movl $VG_TRC_INNER_COUNTERZERO, %eax + jmp run_innerloop_exit + +run_innerloop_exit: + popl %ebp + popl %edi + popl %esi + popl %edx + popl %ecx + popl %ebx + ret -/* The normal way to get back to the translation loop is to put - the address of the next (original) address and return. - However, simulation of a RET insn requires a check as to whether - the next address is vg_signalreturn_bogusRA. If so, a signal - handler is returning, so we need to invoke our own mechanism to - deal with that, by calling vg_signal_returns(). This restores - the simulated machine state from the VgSigContext structure on - the stack, including the (simulated, of course) %eip saved when - the signal was delivered. We then arrange to jump to the - restored %eip. -*/ -dispatch_checked_maybe: - # Possibly a checked dispatch. Sanity check ... - cmpl $VG_EBP_DISPATCH_CHECKED, %ebp - jz dispatch_checked + +/* Other ways of getting out of the inner loop. Placed out-of-line to + make it look cleaner. +*/ +dispatch_exceptional: + # this is jumped to only, not fallen-through from above + cmpl $VG_TRC_EBP_JMP_SPECIAL, %ebp + jz dispatch_callret_maybe + cmpl $VG_TRC_EBP_JMP_SYSCALL, %ebp + jz dispatch_syscall + cmpl $VG_TRC_EBP_JMP_CLIENTREQ, %ebp + jz dispatch_clientreq + # ebp has an invalid value ... crap out. pushl $panic_msg_ebp call VG_(panic) # (never returns) -dispatch_checked: - OYNK(2000) - # first off, restore %ebp -- since it is currently wrong +dispatch_syscall: + # save %eax in %EIP and defer to sched + movl $VG_(baseBlock), %ebp + movl VGOFF_(m_eip), %esi + movl %eax, (%ebp, %esi, 4) + movl $VG_TRC_EBP_JMP_SYSCALL, %eax + jmp run_innerloop_exit + +dispatch_clientreq: + # save %eax in %EIP and defer to sched + movl $VG_(baseBlock), %ebp + movl VGOFF_(m_eip), %esi + movl %eax, (%ebp, %esi, 4) + movl $VG_TRC_EBP_JMP_CLIENTREQ, %eax + jmp run_innerloop_exit + +dispatch_callret_maybe: + # save %eax in %EIP movl $VG_(baseBlock), %ebp + movl VGOFF_(m_eip), %esi + movl %eax, (%ebp, %esi, 4) # see if we need to mess with stack blocks - pushl %ebp pushl %eax call VG_(delete_client_stack_blocks_following_ESP_change) popl %eax - popl %ebp + movl $VG_(baseBlock), %ebp - # is this a signal return? + # is this a call/return which we need to mess with cmpl $VG_(signalreturn_bogusRA), %eax - jz dispatch_to_signalreturn_bogusRA - # should we intercept this call? - cmpl $VG_(trap_here), %eax - jz dispatch_to_trap_here - # ok, its not interesting. Handle the normal way. - jmp dispatch_unchecked - -dispatch_to_signalreturn_bogusRA: - OYNK(2001) - pushal - call VG_(signal_returns) - popal - # %EIP will now point to the insn which should have followed - # the signal delivery. Jump to it. Since we no longer have any - # hint from the JITter about whether or not it is checkable, - # go via the conservative route. - movl VGOFF_(m_eip), %esi - movl (%ebp, %esi, 4), %eax - jmp dispatch_checked - - -/* Similarly, check CALL targets to see if it is the ultra-magical - vg_trap_here(), and, if so, act accordingly. See vg_clientmalloc.c. - Be careful not to get the real and simulated CPUs, - stacks and regs mixed up ... -*/ -dispatch_to_trap_here: - OYNK(111) - /* Considering the params to vg_trap_here(), we should have: - 12(%ESP) is what_to_do - 8(%ESP) is arg2 - 4(%ESP) is arg1 - 0(%ESP) is return address - */ - movl VGOFF_(m_esp), %esi - movl (%ebp, %esi, 4), %ebx - # %ebx now holds simulated %ESP - cmpl $0x4000, 12(%ebx) - jz handle_malloc - cmpl $0x4001, 12(%ebx) - jz handle_malloc - cmpl $0x4002, 12(%ebx) - jz handle_malloc - cmpl $0x5000, 12(%ebx) - jz handle_free - cmpl $0x5001, 12(%ebx) - jz handle_free - cmpl $0x5002, 12(%ebx) - jz handle_free - cmpl $6666, 12(%ebx) - jz handle_calloc - cmpl $7777, 12(%ebx) - jz handle_realloc - cmpl $8888, 12(%ebx) - jz handle_memalign - push $panic_msg_trap - call VG_(panic) - # vg_panic never returns - -handle_malloc: - # %ESP is in %ebx - pushl 12(%ebx) - pushl 8(%ebx) - call VG_(client_malloc) - addl $8, %esp - # returned value is in %eax - jmp save_eax_and_simulate_RET + jz dispatch_callret + cmpl $VG_(shutdown), %eax + jz dispatch_callret -handle_free: - # %ESP is in %ebx - pushl 12(%ebx) - pushl 8(%ebx) - call VG_(client_free) - addl $8, %esp - jmp simulate_RET - -handle_calloc: - # %ESP is in %ebx - pushl 8(%ebx) - pushl 4(%ebx) - call VG_(client_calloc) - addl $8, %esp - # returned value is in %eax - jmp save_eax_and_simulate_RET - -handle_realloc: - # %ESP is in %ebx - pushl 8(%ebx) - pushl 4(%ebx) - call VG_(client_realloc) - addl $8, %esp - # returned value is in %eax - jmp save_eax_and_simulate_RET + # ok, its not interesting. Handle the normal way. + jmp dispatch_boring -handle_memalign: - # %ESP is in %ebx - pushl 8(%ebx) - pushl 4(%ebx) - call VG_(client_memalign) - addl $8, %esp - # returned value is in %eax - jmp save_eax_and_simulate_RET +dispatch_callret: + # %EIP is up to date here since dispatch_callret_maybe dominates + movl $VG_TRC_EBP_JMP_SPECIAL, %eax + jmp run_innerloop_exit -save_eax_and_simulate_RET: - movl VGOFF_(m_eax), %esi - movl %eax, (%ebp, %esi, 4) # %eax -> %EAX - # set %EAX bits to VALID - movl VGOFF_(sh_eax), %esi - movl $0x0 /* All 32 bits VALID */, (%ebp, %esi, 4) - # fall thru ... -simulate_RET: - # standard return - movl VGOFF_(m_esp), %esi - movl (%ebp, %esi, 4), %ebx # %ESP -> %ebx - movl 0(%ebx), %eax # RA -> %eax - addl $4, %ebx # %ESP += 4 - movl %ebx, (%ebp, %esi, 4) # %ebx -> %ESP - jmp dispatch_checked # jump to %eax .data -panic_msg_trap: -.ascii "dispatch_to_trap_here: unknown what_to_do" -.byte 0 panic_msg_ebp: .ascii "vg_dispatch: %ebp has invalid value!" .byte 0 .text - -/*------------------------------------------------------------*/ -/*--- A helper for delivering signals when the client is ---*/ -/*--- (presumably) blocked in a system call. ---*/ -/*------------------------------------------------------------*/ - -/* Returns, in %eax, the next orig_addr to run. - The caller needs to decide whether the returned orig_addr - requires special handling. - - extern Addr VG_(run_singleton_translation) ( Addr trans_addr ) -*/ - -/* should we take care to save the FPU state here? */ - -.globl VG_(run_singleton_translation) -VG_(run_singleton_translation): - movl 4(%esp), %eax # eax = trans_addr - pushl %ebx - pushl %ecx - pushl %edx - pushl %esi - pushl %edi - pushl %ebp - - # set up ebp correctly for translations - movl $VG_(baseBlock), %ebp - - # run the translation - call *%eax - - # next orig_addr is correctly in %eax already - - popl %ebp - popl %edi - popl %esi - popl %edx - popl %ecx - popl %ebx - - ret ##--------------------------------------------------------------------## ##--- end vg_dispatch.S ---## diff --git a/coregrind/vg_from_ucode.c b/coregrind/vg_from_ucode.c index 5e320840ee..0514cf9e22 100644 --- a/coregrind/vg_from_ucode.c +++ b/coregrind/vg_from_ucode.c @@ -1069,44 +1069,48 @@ static void synth_call_baseBlock_method ( Bool ensure_shortform, } +static void load_ebp_from_JmpKind ( JmpKind jmpkind ) +{ + switch (jmpkind) { + case JmpBoring: + break; + case JmpCall: + case JmpRet: + emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_SPECIAL, R_EBP ); + break; + case JmpSyscall: + emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_SYSCALL, R_EBP ); + break; + case JmpClientReq: + emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_CLIENTREQ, R_EBP ); + break; + default: + VG_(panic)("load_ebp_from_JmpKind"); + } +} + /* Jump to the next translation, by loading its original addr into - %eax and returning to the scheduler. Or, if is a RET transfer, - don't return; instead jump to vg_dispatch_when_RET, which checks - whether this is a signal handler returning, and takes suitable - evasive action. + %eax and returning to the scheduler. Signal special requirements + by loading a special value into %ebp first. */ -static void synth_jmp_reg ( Int reg, - Bool is_ret_dispatch, - Bool is_call_dispatch ) +static void synth_jmp_reg ( Int reg, JmpKind jmpkind ) { + load_ebp_from_JmpKind ( jmpkind ); if (reg != R_EAX) emit_movv_reg_reg ( 4, reg, R_EAX ); - if (is_ret_dispatch || is_call_dispatch) { - /* The (hopefully) rare case. */ - vg_assert(!(is_ret_dispatch && is_call_dispatch)); - emit_movv_lit_reg ( 4, VG_EBP_DISPATCH_CHECKED, R_EBP ); - } emit_ret(); } /* Same deal as synth_jmp_reg. */ -static void synth_jmp_lit ( Addr addr ) +static void synth_jmp_lit ( Addr addr, JmpKind jmpkind ) { + load_ebp_from_JmpKind ( jmpkind ); emit_movv_lit_reg ( 4, addr, R_EAX ); emit_ret(); } -/* Dispatch, but with a call-target check. */ -static void synth_jmp_lit_call_dispatch ( Addr addr ) -{ - emit_movv_lit_reg ( 4, addr, R_EAX ); - emit_movv_lit_reg ( 4, VG_EBP_DISPATCH_CHECKED, R_EBP ); - emit_ret(); -} - - static void synth_jcond_lit ( Condcode cond, Addr addr ) { /* Do the following: @@ -1124,7 +1128,7 @@ static void synth_jcond_lit ( Condcode cond, Addr addr ) */ emit_get_eflags(); emit_jcondshort_delta ( invertCondition(cond), 5+1 ); - synth_jmp_lit ( addr ); + synth_jmp_lit ( addr, JmpBoring ); } @@ -1138,7 +1142,7 @@ static void synth_jmp_ifzero_reg_lit ( Int reg, Addr addr ) */ emit_cmpl_zero_reg ( reg ); emit_jcondshort_delta ( CondNZ, 5+1 ); - synth_jmp_lit ( addr ); + synth_jmp_lit ( addr, JmpBoring ); } @@ -2472,25 +2476,29 @@ static void emitUInstr ( Int i, UInstr* u ) vg_assert(u->tag2 == NoValue); vg_assert(u->tag1 == RealReg || u->tag1 == Literal); if (u->cond == CondAlways) { - if (u->tag1 == RealReg) { - synth_jmp_reg ( u->val1, u->ret_dispatch, u->call_dispatch ); - } else { - vg_assert(!u->ret_dispatch); - if (u->call_dispatch) - synth_jmp_lit_call_dispatch ( - u->tag1==Literal ? u->lit32 : u->val1 ); - else - synth_jmp_lit ( - u->tag1==Literal ? u->lit32 : u->val1 ); + switch (u->tag1) { + case RealReg: + synth_jmp_reg ( u->val1, u->jmpkind ); + break; + case Literal: + synth_jmp_lit ( u->lit32, u->jmpkind ); + break; + default: + VG_(panic)("emitUInstr(JMP, unconditional, default)"); + break; } } else { - if (u->tag1 == RealReg) { - VG_(panic)("emitUInstr: conditional jump to reg"); - } else { - vg_assert(!u->ret_dispatch); - vg_assert(!u->call_dispatch); - synth_jcond_lit ( u->cond, - u->tag1==Literal ? u->lit32 : u->val1 ); + switch (u->tag1) { + case RealReg: + VG_(panic)("emitUInstr(JMP, conditional, RealReg)"); + break; + case Literal: + vg_assert(u->jmpkind == JmpBoring); + synth_jcond_lit ( u->cond, u->lit32 ); + break; + default: + VG_(panic)("emitUInstr(JMP, conditional, default)"); + break; } } break; diff --git a/coregrind/vg_helpers.S b/coregrind/vg_helpers.S index 3431111ee7..72de1347cb 100644 --- a/coregrind/vg_helpers.S +++ b/coregrind/vg_helpers.S @@ -48,45 +48,6 @@ */ -/* - On entry: - %ECX value - %EBX value - %EAX value -- also the result - RA <- %esp -- after pushal+pushfl is 36(%esp) -*/ -.global VG_(helper_do_client_request) -VG_(helper_do_client_request): - pushal - pushfl - - movl 48(%esp), %eax - pushl %eax - movl 48(%esp), %eax - pushl %eax - movl 48(%esp), %eax - pushl %eax - - call VG_(handle_client_request) - movl %eax, 52(%esp) - - addl $12, %esp - - popfl - popal - ret - - -.global VG_(helper_do_syscall) -VG_(helper_do_syscall): - pushal - call VG_(wrap_syscall) - popal -# movl $VG_(baseBlock), %ebp - ret - - - .global VG_(helper_value_check0_fail) VG_(helper_value_check0_fail): pushal @@ -116,21 +77,6 @@ VG_(helper_value_check4_fail): ret -/* Set things up so the dispatch loop exits normally. Used when it is - detected that the program wants to finish, ie it has called - vg_shutdown. -*/ -.global VG_(helper_request_normal_exit) -VG_(helper_request_normal_exit): - pushl %eax - movl VG_(dispatch_ctr), %eax - movl %eax, VG_(dispatch_ctr_SAVED) - movl $1, VG_(dispatch_ctr) - movl $VG_Y_EXIT, VG_(interrupt_reason) - popl %eax - ret - - /* Do a original-code-write check for the address in %ebp. */ .global VG_(helper_smc_check4) VG_(helper_smc_check4): diff --git a/coregrind/vg_include.h b/coregrind/vg_include.h index 3181fd8f8f..5d9825f12d 100644 --- a/coregrind/vg_include.h +++ b/coregrind/vg_include.h @@ -117,6 +117,27 @@ prime. */ #define VG_N_EC_LISTS /*997*/ 4999 +/* Defines the thread-scheduling timeslice, in terms of the number of + basic blocks we attempt to run each thread for. Smaller values + give finer interleaving but much increased scheduling overheads. */ +#define VG_SCHEDULING_QUANTUM 10000 + +/* The maximum number of pthreads that we support. This is + deliberately not very high since our implementation of some of the + scheduler algorithms is surely O(N^2) in the number of threads, + since that's simple, at least. And (in practice) we hope that most + programs do not need many threads. */ +#define VG_N_THREADS 20 + +/* Number of file descriptors that can simultaneously be waited on for + I/O to complete. Perhaps this should be the same as VG_N_THREADS + (surely a thread can't wait on more than one fd at once?. Who + knows.) */ +#define VG_N_WAITING_FDS 10 + +/* Maximum number of mutexes allowed. */ +#define VG_N_MUTEXES 10 + /* --------------------------------------------------------------------- Basic types @@ -353,30 +374,219 @@ extern Bool VG_(is_empty_arena) ( ArenaId aid ); /* --------------------------------------------------------------------- - Exports of vg_signals.c + Exports of vg_clientfuns.c ------------------------------------------------------------------ */ -/* The maximum number of basic blocks that we're prepared to run in a - signal handler which is called when the client is stuck in a - blocking system call. The purpose of this is to check that such a - signal handler doesn't merely do a longjmp() and keep going - forever; it should return instead. NOTE that this doesn't apply to - signals delivered under normal conditions, only when they are - delivered and the client is already blocked in a system call. */ -#define VG_MAX_BBS_IN_IMMEDIATE_SIGNAL 50000 +/* This doesn't export code or data that valgrind.so needs to link + against. However, the scheduler does need to know the following + request codes. A few, publically-visible, request codes are also + defined in valgrind.h. */ + +#define VG_USERREQ__MALLOC 0x2001 +#define VG_USERREQ__BUILTIN_NEW 0x2002 +#define VG_USERREQ__BUILTIN_VEC_NEW 0x2003 + +#define VG_USERREQ__FREE 0x2004 +#define VG_USERREQ__BUILTIN_DELETE 0x2005 +#define VG_USERREQ__BUILTIN_VEC_DELETE 0x2006 + +#define VG_USERREQ__CALLOC 0x2007 +#define VG_USERREQ__REALLOC 0x2008 +#define VG_USERREQ__MEMALIGN 0x2009 + + +#define VG_USERREQ__PTHREAD_CREATE 0x3001 +#define VG_USERREQ__PTHREAD_CREATE_BOGUSRA 0x3002 +#define VG_USERREQ__PTHREAD_JOIN 0x3003 +#define VG_USERREQ__PTHREAD_GET_THREADID 0x3004 +#define VG_USERREQ__PTHREAD_MUTEX_INIT 0x3005 +#define VG_USERREQ__PTHREAD_MUTEX_LOCK 0x3006 +#define VG_USERREQ__PTHREAD_MUTEX_UNLOCK 0x3007 +#define VG_USERREQ__PTHREAD_MUTEX_DESTROY 0x3008 +#define VG_USERREQ__PTHREAD_CANCEL 0x3009 + +/* --------------------------------------------------------------------- + Constants pertaining to the simulated CPU state, VG_(baseBlock), + which need to go here to avoid ugly circularities. + ------------------------------------------------------------------ */ + +/* How big is the saved FPU state? */ +#define VG_SIZE_OF_FPUSTATE 108 +/* ... and in words ... */ +#define VG_SIZE_OF_FPUSTATE_W ((VG_SIZE_OF_FPUSTATE+3)/4) + + +/* --------------------------------------------------------------------- + Exports of vg_scheduler.c + ------------------------------------------------------------------ */ + +/* ThreadIds are simply indices into the vg_threads[] array. */ +typedef + UInt + ThreadId; + +/* MutexIds are simply indices into the vg_mutexes[] array. */ +typedef + UInt + MutexId; + + +#define VG_INVALID_THREADID ((ThreadId)(-1)) + +typedef + enum { + VgTs_Empty, /* this slot is not in use */ + VgTs_Runnable, /* waiting to be scheduled */ + VgTs_WaitJoiner, /* waiting for someone to do join on me */ + VgTs_WaitJoinee, /* waiting for the thread I did join on */ + VgTs_WaitFD, /* waiting for I/O completion on a fd */ + VgTs_WaitMX, /* waiting on a mutex */ + VgTs_Sleeping /* sleeping for a while */ + } + ThreadStatus; + +typedef + struct { + /* The thread identity is simply the index in vg_threads[]. + ThreadId == 0 is the root thread and has the special property + that we don't try and allocate or deallocate its stack. */ + + /* Current scheduling status. */ + ThreadStatus status; + + /* Identity of joiner (thread who called join on me), or + VG_INVALID_THREADID if no one asked to join yet. */ + ThreadId joiner; + + /* Identity of mutex we are waiting on, if .status == WaitMX. */ + MutexId waited_on_mid; + + /* If VgTs_Sleeping, this is when we should wake up. */ + ULong awaken_at; + + /* return value */ + void* retval; + + /* Stacks. When a thread slot is freed, we don't deallocate its + stack; we just leave it lying around for the next use of the + slot. If the next use of the slot requires a larger stack, + only then is the old one deallocated and a new one + allocated. + + For the main thread (threadid == 0), this mechanism doesn't + apply. We don't know the size of the stack since we didn't + allocate it, and furthermore we never reallocate it. */ + + /* The allocated size of this thread's stack (permanently zero + if this is ThreadId == 0, since we didn't allocate its stack) */ + UInt stack_size; + + /* Address of the lowest word in this thread's stack. NULL means + not allocated yet. + */ + Addr stack_base; + + /* Saved machine context. */ + UInt m_eax; + UInt m_ebx; + UInt m_ecx; + UInt m_edx; + UInt m_esi; + UInt m_edi; + UInt m_ebp; + UInt m_esp; + UInt m_eflags; + UInt m_eip; + UInt m_fpu[VG_SIZE_OF_FPUSTATE_W]; + + UInt sh_eax; + UInt sh_ebx; + UInt sh_ecx; + UInt sh_edx; + UInt sh_esi; + UInt sh_edi; + UInt sh_ebp; + UInt sh_esp; + UInt sh_eflags; + } + ThreadState; + + +/* Copy the specified thread's state into VG_(baseBlock) in + preparation for running it. */ +extern void VG_(load_thread_state)( ThreadId ); + +/* Save the specified thread's state back in VG_(baseBlock), and fill + VG_(baseBlock) with junk, for sanity-check reasons. */ +extern void VG_(save_thread_state)( ThreadId ); + +/* Get the thread state block for the specified thread. */ +extern ThreadState* VG_(get_thread_state)( ThreadId ); + + +/* Create, and add to TT/TC, the translation of a client basic + block. */ +extern void VG_(create_translation_for) ( Addr orig_addr ); + +/* Return codes from the scheduler. */ +typedef + enum { VgSrc_Deadlock, VgSrc_Shutdown, VgSrc_BbsDone } + VgSchedReturnCode; + +/* The scheduler. */ +extern VgSchedReturnCode VG_(scheduler) ( void ); + +extern void VG_(scheduler_init) ( void ); + + +/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */ +extern jmp_buf VG_(scheduler_jmpbuf); +/* ... and if so, here's the signal which caused it to do so. */ +extern Int VG_(longjmpd_on_signal); + + +/* We check that the initial stack, which we can't move, is allocated + here. VG_(scheduler_init) checks this. +*/ +#define VG_STARTUP_STACK_MASK (Addr)0xBFFF8000 + + +/* The red-zone size which we put at the bottom (highest address) of + thread stacks, for paranoia reasons. This can be arbitrary, and + doesn't really need to be set at compile time. */ +#define VG_AR_CLIENT_STACKBASE_REDZONE_SZW 4 + +#define VG_AR_CLIENT_STACKBASE_REDZONE_SZB \ + (VG_AR_CLIENT_STACKBASE_REDZONE_SZW * VKI_BYTES_PER_WORD) + + + +/* --------------------------------------------------------------------- + Exports of vg_signals.c + ------------------------------------------------------------------ */ extern void VG_(sigstartup_actions) ( void ); -extern void VG_(deliver_signals) ( void ); +extern void VG_(deliver_signals) ( ThreadId ); extern void VG_(unblock_host_signal) ( Int sigNo ); /* Fake system calls for signal handling. */ -extern void VG_(do__NR_sigaction) ( void ); +extern void VG_(do__NR_sigaction) ( ThreadId tid ); extern void VG_(do__NR_sigprocmask) ( Int how, vki_ksigset_t* set ); +/* Bogus return address for signal handlers. Is never executed. */ +extern void VG_(signalreturn_bogusRA) ( void ); +/* Modify the current thread's state once we have detected it is + returning from a signal handler. */ +extern void VG_(signal_returns) ( ThreadId ); +/* Handy utilities to block/restore all host signals. */ +extern void VG_(block_all_host_signals) + ( /* OUT */ vki_ksigset_t* saved_mask ); +extern void VG_(restore_host_signals) + ( /* IN */ vki_ksigset_t* saved_mask ); /* --------------------------------------------------------------------- Exports of vg_mylibc.c @@ -420,6 +630,7 @@ extern Char* VG_(strdup) ( ArenaId aid, const Char* s); extern Char* VG_(getenv) ( Char* name ); extern Int VG_(getpid) ( void ); +extern ULong VG_(read_microsecond_timer)( void ); extern Char VG_(toupper) ( Char c ); @@ -444,19 +655,28 @@ extern void VG_(assert_fail) ( Char* expr, Char* file, Int line, Char* fn ) __attribute__ ((__noreturn__)); -/* Later ... extern void vg_restore_SIGABRT ( void ); */ - /* Reading files. */ extern Int VG_(open_read) ( Char* pathname ); extern void VG_(close) ( Int fd ); extern Int VG_(read) ( Int fd, void* buf, Int count); extern Int VG_(write) ( Int fd, void* buf, Int count); +extern Int VG_(fcntl) ( Int fd, Int cmd, Int arg ); + +extern Int VG_(select)( Int n, + vki_fd_set* readfds, + vki_fd_set* writefds, + vki_fd_set* exceptfds, + struct vki_timeval * timeout ); +extern Int VG_(nanosleep)( const struct vki_timespec *req, + struct vki_timespec *rem ); + + /* mmap-ery ... */ extern void* VG_(mmap)( void* start, UInt length, UInt prot, UInt flags, UInt fd, UInt offset ); -extern Int VG_(munmap)( void* start, Int length ); +extern Int VG_(munmap)( void* start, Int length ); /* Print a (panic) message, and abort. */ @@ -594,6 +814,18 @@ typedef Condcode; +/* Descriptions of additional properties of *unconditional* jumps. */ +typedef + enum { + JmpBoring=0, /* boring unconditional jump */ + JmpCall=1, /* jump due to an x86 call insn */ + JmpRet=2, /* jump due to an x86 ret insn */ + JmpSyscall=3, /* do a system call, then jump */ + JmpClientReq=4 /* do a client request, then jump */ + } + JmpKind; + + /* Flags. User-level code can only read/write O(verflow), S(ign), Z(ero), A(ux-carry), C(arry), P(arity), and may also write D(irection). That's a total of 7 flags. A FlagSet is a bitset, @@ -662,8 +894,7 @@ typedef UChar cond; /* condition, for jumps */ Bool smc_check:1; /* do a smc test, if writes memory. */ Bool signed_widen:1; /* signed or unsigned WIDEN ? */ - Bool ret_dispatch:1; /* Is this jump as a result of RET ? */ - Bool call_dispatch:1; /* Is this jump as a result of CALL ? */ + JmpKind jmpkind:3; /* additional properties of unconditional JMP */ } UInstr; @@ -845,7 +1076,7 @@ typedef extern Bool VG_(client_perm_maybe_describe)( Addr a, AddrInfo* ai ); -extern UInt VG_(handle_client_request) ( UInt code, Addr aa, UInt nn ); +extern UInt VG_(handle_client_request) ( UInt* arg_block ); extern void VG_(delete_client_stack_blocks_following_ESP_change) ( void ); @@ -886,13 +1117,10 @@ extern void VG_(symtab_notify_munmap) ( Addr start, UInt length ); Exports of vg_clientmalloc.c ------------------------------------------------------------------ */ -/* these numbers are not arbitary. if you change them, - adjust vg_dispatch.S as well */ - typedef enum { Vg_AllocMalloc = 0, - Vg_AllocNew = 1, + Vg_AllocNew = 1, Vg_AllocNewVec = 2 } VgAllocKind; @@ -912,20 +1140,19 @@ extern void VG_(clientmalloc_done) ( void ); extern void VG_(describe_addr) ( Addr a, AddrInfo* ai ); extern ShadowChunk** VG_(get_malloc_shadows) ( /*OUT*/ UInt* n_shadows ); -/* This should never be called; if it is, something's seriously - wrong. */ -extern UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do ); +/* These are called from the scheduler, when it intercepts a user + request. */ +extern void* VG_(client_malloc) ( UInt size, VgAllocKind kind ); +extern void* VG_(client_memalign) ( UInt align, UInt size ); +extern void VG_(client_free) ( void* ptrV, VgAllocKind kind ); +extern void* VG_(client_calloc) ( UInt nmemb, UInt size1 ); +extern void* VG_(client_realloc) ( void* ptrV, UInt size_new ); /* --------------------------------------------------------------------- Exports of vg_main.c ------------------------------------------------------------------ */ -/* How big is the saved FPU state? */ -#define VG_SIZE_OF_FPUSTATE 108 -/* ... and in words ... */ -#define VG_SIZE_OF_FPUSTATE_W ((VG_SIZE_OF_FPUSTATE+3)/4) - /* A structure used as an intermediary when passing the simulated CPU's state to some assembly fragments, particularly system calls. Stuff is copied from baseBlock to here, the assembly magic runs, @@ -941,10 +1168,6 @@ extern UInt VG_(m_state_static) [8 /* int regs, in Intel order */ extern void VG_(copy_baseBlock_to_m_state_static) ( void ); extern void VG_(copy_m_state_static_to_baseBlock) ( void ); -/* Create, and add to TT/TC, the translation of a client basic - block. */ -extern void VG_(create_translation_for) ( Addr orig_addr ); - /* Called when some unhandleable client behaviour is detected. Prints a msg and aborts. */ extern void VG_(unimplemented) ( Char* msg ); @@ -960,12 +1183,6 @@ extern UInt VG_(stack)[10000]; vg_deliver_signal_immediately(). */ extern UInt VG_(sigstack)[10000]; - -/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */ -extern jmp_buf VG_(toploop_jmpbuf); -/* ... and if so, here's the signal which caused it to do so. */ -extern Int VG_(longjmpd_on_signal); - /* Holds client's %esp at the point we gained control. From this the client's argc, argv and envp are deduced. */ extern Addr VG_(esp_at_startup); @@ -994,13 +1211,6 @@ extern ULong VG_(bbs_to_go); /* Counts downwards in vg_run_innerloop. */ extern UInt VG_(dispatch_ctr); -/* If vg_dispatch_ctr is set to 1 to force a stop, its - previous value is saved here. */ -extern UInt VG_(dispatch_ctr_SAVED); - -/* This is why vg_run_innerloop() exited. */ -extern UInt VG_(interrupt_reason); - /* Is the client running on the simulated CPU or the real one? */ extern Bool VG_(running_on_simd_CPU); /* Initially False */ @@ -1068,6 +1278,10 @@ extern UInt VG_(smc_fancy_passed); extern UInt VG_(sanity_fast_count); extern UInt VG_(sanity_slow_count); +/* Counts pertaining to the scheduler. */ +extern UInt VG_(num_scheduling_events_MINOR); +extern UInt VG_(num_scheduling_events_MAJOR); + /* --------------------------------------------------------------------- Exports of vg_memory.c @@ -1095,7 +1309,7 @@ extern Bool VGM_(check_readable_asciiz) ( Addr a, Addr* bad_addr ); /* Sanity checks which may be done at any time. Doing them at signal-delivery time turns out to be convenient. */ -extern void VG_(do_sanity_checks) ( Bool force_expensive ); +extern void VG_(do_sanity_checks) ( ThreadId tid, Bool force_expensive ); /* Very cheap ... */ extern Bool VG_(first_and_last_secondaries_look_plausible) ( void ); @@ -1134,22 +1348,21 @@ extern Bool VG_(is_plausible_stack_addr) ( Addr ); Exports of vg_syscall_mem.c ------------------------------------------------------------------ */ -/* Counts the depth of nested syscalls. Is used in - VG_(deliver_signals) do discover whether or not the client is in a - syscall (presumably _blocked_ in a syscall) when a signal is - delivered. If so, the signal delivery mechanism needs to behave - differently from normal. */ -extern Int VG_(syscall_depth); +extern void VG_(perform_assumed_nonblocking_syscall) ( ThreadId tid ); -extern void VG_(wrap_syscall) ( void ); +extern void VG_(check_known_blocking_syscall) ( ThreadId tid, + Int syscallno, + Int* /*IN*/ res ); extern Bool VG_(is_kerror) ( Int res ); -#define KERNEL_DO_SYSCALL(result_lvalue) \ +#define KERNEL_DO_SYSCALL(thread_id, result_lvalue) \ + VG_(load_thread_state)(thread_id); \ VG_(copy_baseBlock_to_m_state_static)(); \ VG_(do_syscall)(); \ VG_(copy_m_state_static_to_baseBlock)(); \ - result_lvalue = VG_(baseBlock)[VGOFF_(m_eax)]; + VG_(save_thread_state)(thread_id); \ + result_lvalue = VG_(get_thread_state)(thread_id)->m_eax; /* --------------------------------------------------------------------- @@ -1242,20 +1455,15 @@ extern void VG_(swizzle_esp_then_start_GDB) ( void ); Exports of vg_dispatch.S ------------------------------------------------------------------ */ -extern void VG_(dispatch); -extern void VG_(run_innerloop) ( void ); - -/* Returns the next orig_addr to run. */ -extern Addr VG_(run_singleton_translation) ( Addr trans_addr ); +/* Run a thread for a (very short) while, until some event happens + which means we need to defer to the scheduler. */ +extern UInt VG_(run_innerloop) ( void ); /* --------------------------------------------------------------------- Exports of vg_helpers.S ------------------------------------------------------------------ */ -/* For doing exits ... */ -extern void VG_(helper_request_normal_exit); - /* SMC fast checks. */ extern void VG_(helper_smc_check4); @@ -1304,9 +1512,6 @@ extern void VG_(helper_value_check2_fail); extern void VG_(helper_value_check1_fail); extern void VG_(helper_value_check0_fail); -extern void VG_(helper_do_syscall); -extern void VG_(helper_do_client_request); - /* --------------------------------------------------------------------- The state of the simulated CPU. @@ -1434,9 +1639,6 @@ extern Int VGOFF_(helper_value_check2_fail); extern Int VGOFF_(helper_value_check1_fail); extern Int VGOFF_(helper_value_check0_fail); -extern Int VGOFF_(helper_do_syscall); -extern Int VGOFF_(helper_do_client_request); - extern Int VGOFF_(helperc_STOREV4); /* :: UInt -> Addr -> void */ extern Int VGOFF_(helperc_STOREV2); /* :: UInt -> Addr -> void */ extern Int VGOFF_(helperc_STOREV1); /* :: UInt -> Addr -> void */ @@ -1449,8 +1651,6 @@ extern Int VGOFF_(handle_esp_assignment); /* :: Addr -> void */ extern Int VGOFF_(fpu_write_check); /* :: Addr -> Int -> void */ extern Int VGOFF_(fpu_read_check); /* :: Addr -> Int -> void */ -extern Int VGOFF_(helper_request_normal_exit); - #endif /* ndef __VG_INCLUDE_H */ diff --git a/coregrind/vg_kerneliface.h b/coregrind/vg_kerneliface.h index 15ce80d352..9ec236acb5 100644 --- a/coregrind/vg_kerneliface.h +++ b/coregrind/vg_kerneliface.h @@ -135,6 +135,10 @@ typedef /* Copied from /usr/src/linux-2.4.9-13/include/asm/errno.h */ #define VKI_EINVAL 22 /* Invalid argument */ +#define VKI_ENOMEM 12 /* Out of memory */ + +#define VKI_EWOULDBLOCK VKI_EAGAIN /* Operation would block */ +#define VKI_EAGAIN 11 /* Try again */ /* Gawd ... hack ... */ @@ -166,6 +170,108 @@ typedef struct vki__user_cap_data_struct { #define VKI_SIZEOF_STRUCT_TERMIO 17 +/* File descriptor sets, for doing select(). Copied from + /usr/src/linux-2.4.9-31/include/linux/posix_types.h +*/ +/* + * This allows for 1024 file descriptors: if NR_OPEN is ever grown + * beyond that you'll have to change this too. But 1024 fd's seem to be + * enough even for such "real" unices like OSF/1, so hopefully this is + * one limit that doesn't have to be changed [again]. + * + * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in + * (and thus ) - but this is a more logical + * place for them. Solved by having dummy defines in . + */ + +/* + * Those macros may have been defined in . But we always + * use the ones here. + */ +#undef VKI_NFDBITS +#define VKI_NFDBITS (8 * sizeof(unsigned long)) + +#undef VKI_FD_SETSIZE +#define VKI_FD_SETSIZE 1024 + +#undef VKI_FDSET_LONGS +#define VKI_FDSET_LONGS (VKI_FD_SETSIZE/VKI_NFDBITS) + +#undef VKI_FDELT +#define VKI_FDELT(d) ((d) / VKI_NFDBITS) + +#undef VKI_FDMASK +#define VKI_FDMASK(d) (1UL << ((d) % VKI_NFDBITS)) + +typedef struct { + unsigned long vki_fds_bits [VKI_FDSET_LONGS]; +} vki_fd_set; + + +/* Gawd ... + Copied from /usr/src/linux-2.4.9-31/./include/asm-i386/posix_types.h +*/ +#undef VKI_FD_SET +#define VKI_FD_SET(fd,fdsetp) \ + __asm__ __volatile__("btsl %1,%0": \ + "=m" (*(vki_fd_set *) (fdsetp)):"r" ((int) (fd))) + +#undef VKI_FD_CLR +#define VKI_FD_CLR(fd,fdsetp) \ + __asm__ __volatile__("btrl %1,%0": \ + "=m" (*(vki_fd_set *) (fdsetp)):"r" ((int) (fd))) + +#undef VKI_FD_ISSET +#define VKI_FD_ISSET(fd,fdsetp) (__extension__ ({ \ + unsigned char __result; \ + __asm__ __volatile__("btl %1,%2 ; setb %0" \ + :"=q" (__result) :"r" ((int) (fd)), \ + "m" (*(vki_fd_set *) (fdsetp))); \ + __result; })) + +#undef VKI_FD_ZERO +#define VKI_FD_ZERO(fdsetp) \ +do { \ + int __d0, __d1; \ + __asm__ __volatile__("cld ; rep ; stosl" \ + :"=m" (*(vki_fd_set *) (fdsetp)), \ + "=&c" (__d0), "=&D" (__d1) \ + :"a" (0), "1" (VKI_FDSET_LONGS), \ + "2" ((vki_fd_set *) (fdsetp)) : "memory"); \ +} while (0) + + + +/* +./include/asm-i386/posix_types.h:typedef long __kernel_suseconds_t; +./include/linux/types.h:typedef __kernel_suseconds_t suseconds_t; + +./include/asm-i386/posix_types.h:typedef long __kernel_time_t; +./include/linux/types.h:typedef __kernel_time_t time_t; +*/ + +struct vki_timeval { + /* time_t */ long tv_sec; /* seconds */ + /* suseconds_t */ long tv_usec; /* microseconds */ +}; + + + +/* For fcntl on fds .. + from ./include/asm-i386/fcntl.h */ +#define VKI_F_GETFL 3 /* get file->f_flags */ +#define VKI_F_SETFL 4 /* set file->f_flags */ + +#define VKI_O_NONBLOCK 04000 + +/* For nanosleep ... + from ./include/linux/time.h */ +struct vki_timespec { + /* time_t */ long tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +}; + + #endif /* ndef __VG_KERNELIFACE_H */ /*--------------------------------------------------------------------*/ diff --git a/coregrind/vg_main.c b/coregrind/vg_main.c index 2fb92cf204..6e5c01ab03 100644 --- a/coregrind/vg_main.c +++ b/coregrind/vg_main.c @@ -99,8 +99,6 @@ Int VGOFF_(helper_value_check4_fail) = INVALID_OFFSET; Int VGOFF_(helper_value_check2_fail) = INVALID_OFFSET; Int VGOFF_(helper_value_check1_fail) = INVALID_OFFSET; Int VGOFF_(helper_value_check0_fail) = INVALID_OFFSET; -Int VGOFF_(helper_do_syscall) = INVALID_OFFSET; -Int VGOFF_(helper_do_client_request) = INVALID_OFFSET; Int VGOFF_(helperc_LOADV4) = INVALID_OFFSET; Int VGOFF_(helperc_LOADV2) = INVALID_OFFSET; Int VGOFF_(helperc_LOADV1) = INVALID_OFFSET; @@ -110,7 +108,6 @@ Int VGOFF_(helperc_STOREV1) = INVALID_OFFSET; Int VGOFF_(handle_esp_assignment) = INVALID_OFFSET; Int VGOFF_(fpu_write_check) = INVALID_OFFSET; Int VGOFF_(fpu_read_check) = INVALID_OFFSET; -Int VGOFF_(helper_request_normal_exit) = INVALID_OFFSET; /* This is the actual defn of baseblock. */ @@ -305,14 +302,6 @@ static void vg_init_baseBlock ( void ) = alloc_BaB_1_set( (Addr) & VG_(helper_DAS) ); VGOFF_(helper_DAA) = alloc_BaB_1_set( (Addr) & VG_(helper_DAA) ); - - VGOFF_(helper_request_normal_exit) - = alloc_BaB_1_set( (Addr) & VG_(helper_request_normal_exit) ); - - VGOFF_(helper_do_syscall) - = alloc_BaB_1_set( (Addr) & VG_(helper_do_syscall) ); - VGOFF_(helper_do_client_request) - = alloc_BaB_1_set( (Addr) & VG_(helper_do_client_request) ); } @@ -336,17 +325,6 @@ Addr VG_(esp_saved_over_syscall_d2); /* Counts downwards in vg_run_innerloop. */ UInt VG_(dispatch_ctr); -/* If vg_dispatch_ctr is set to 1 to force a stop, its - previous value is saved here. */ -UInt VG_(dispatch_ctr_SAVED); - -/* This is why vg_run_innerloop() exited. */ -UInt VG_(interrupt_reason); - -/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */ -jmp_buf VG_(toploop_jmpbuf); -/* ... and if so, here's the signal which caused it to do so. */ -Int VG_(longjmpd_on_signal); /* 64-bit counter for the number of basic blocks done. */ ULong VG_(bbs_done); @@ -423,10 +401,12 @@ UInt VG_(smc_discard_count) = 0; /* Counts pertaining to internal sanity checking. */ - UInt VG_(sanity_fast_count) = 0; UInt VG_(sanity_slow_count) = 0; +/* Counts pertaining to the scheduler. */ +UInt VG_(num_scheduling_events_MINOR) = 0; +UInt VG_(num_scheduling_events_MAJOR) = 0; /* --------------------------------------------------------------------- @@ -481,176 +461,6 @@ Char** VG_(client_envp); static Char vg_cmdline_copy[M_VG_CMDLINE_STRLEN]; -/* --------------------------------------------------------------------- - Top level simulation loop. - ------------------------------------------------------------------ */ - -/* Create a translation of the client basic block beginning at - orig_addr, and add it to the translation cache & translation table. - This probably doesn't really belong here, but, hey ... */ -void VG_(create_translation_for) ( Addr orig_addr ) -{ - Addr trans_addr; - TTEntry tte; - Int orig_size, trans_size; - /* Ensure there is space to hold a translation. */ - VG_(maybe_do_lru_pass)(); - VG_(translate)( orig_addr, &orig_size, &trans_addr, &trans_size ); - /* Copy data at trans_addr into the translation cache. - Returned pointer is to the code, not to the 4-byte - header. */ - /* Since the .orig_size and .trans_size fields are - UShort, be paranoid. */ - vg_assert(orig_size > 0 && orig_size < 65536); - vg_assert(trans_size > 0 && trans_size < 65536); - tte.orig_size = orig_size; - tte.orig_addr = orig_addr; - tte.trans_size = trans_size; - tte.trans_addr = VG_(copy_to_transcache) - ( trans_addr, trans_size ); - tte.mru_epoch = VG_(current_epoch); - /* Free the intermediary -- was allocated by VG_(emit_code). */ - VG_(jitfree)( (void*)trans_addr ); - /* Add to trans tab and set back pointer. */ - VG_(add_to_trans_tab) ( &tte ); - /* Update stats. */ - VG_(this_epoch_in_count) ++; - VG_(this_epoch_in_osize) += orig_size; - VG_(this_epoch_in_tsize) += trans_size; - VG_(overall_in_count) ++; - VG_(overall_in_osize) += orig_size; - VG_(overall_in_tsize) += trans_size; - /* Record translated area for SMC detection. */ - VG_(smc_mark_original) ( - VG_(baseBlock)[VGOFF_(m_eip)], orig_size ); -} - - -/* Runs the client program from %EIP (baseBlock[off_eip]) until it - asks to exit, or until vg_bbs_to_go jumps have happened (the latter - case is for debugging). */ - -void VG_(toploop) ( void ) -{ - volatile UInt dispatch_ctr_SAVED; - volatile Int done_this_time; - - /* For the LRU structures, records when the epoch began. */ - volatile ULong epoch_started_at = 0; - - while (True) { - next_outer_loop: - - /* Age the LRU structures if an epoch has been completed. */ - if (VG_(bbs_done) - epoch_started_at >= VG_BBS_PER_EPOCH) { - VG_(current_epoch)++; - epoch_started_at = VG_(bbs_done); - if (VG_(clo_verbosity) > 2) { - UInt tt_used, tc_used; - VG_(get_tt_tc_used) ( &tt_used, &tc_used ); - VG_(message)(Vg_UserMsg, - "%lu bbs, in: %d (%d -> %d), out %d (%d -> %d), TT %d, TC %d", - VG_(bbs_done), - VG_(this_epoch_in_count), - VG_(this_epoch_in_osize), - VG_(this_epoch_in_tsize), - VG_(this_epoch_out_count), - VG_(this_epoch_out_osize), - VG_(this_epoch_out_tsize), - tt_used, tc_used - ); - } - VG_(this_epoch_in_count) = 0; - VG_(this_epoch_in_osize) = 0; - VG_(this_epoch_in_tsize) = 0; - VG_(this_epoch_out_count) = 0; - VG_(this_epoch_out_osize) = 0; - VG_(this_epoch_out_tsize) = 0; - } - - /* Figure out how many bbs to ask vg_run_innerloop to do. */ - if (VG_(bbs_to_go) >= VG_SIGCHECK_INTERVAL) - VG_(dispatch_ctr) = 1 + VG_SIGCHECK_INTERVAL; - else - VG_(dispatch_ctr) = 1 + (UInt)VG_(bbs_to_go); - - /* ... and remember what we asked for. */ - dispatch_ctr_SAVED = VG_(dispatch_ctr); - - /* Now have a go at doing them. */ - VG_(interrupt_reason) = VG_Y_SIGCHECK; - if (__builtin_setjmp(VG_(toploop_jmpbuf)) == 0) { - /* try this ... */ - VG_(run_innerloop)(); - /* We get here if the client didn't take a fault. */ - switch (VG_(interrupt_reason)) { - case VG_Y_SIGCHECK: - /* The counter fell to zero and no other situation has - been detected. */ - vg_assert(VG_(dispatch_ctr) == 0); - done_this_time = dispatch_ctr_SAVED - 1; - VG_(bbs_to_go) -= (ULong)done_this_time; - VG_(bbs_done) += (ULong)done_this_time; - /* Exit if the debug run has ended. */ - if (VG_(bbs_to_go) == 0) goto debug_stop; - VG_(deliver_signals)(); - VG_(do_sanity_checks)(False); - goto next_outer_loop; - case VG_Y_EXIT: - /* The target program tried to exit. */ - done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr_SAVED); - done_this_time --; - VG_(bbs_to_go) -= (ULong)done_this_time; - VG_(bbs_done) += (ULong)done_this_time; - return; - case VG_Y_SMC: - /* A write to original code was detected. */ - done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr_SAVED); - VG_(bbs_to_go) -= (ULong)done_this_time; - VG_(bbs_done) += (ULong)done_this_time; - VG_(flush_transtab)(); - goto next_outer_loop; - case VG_Y_TRANSLATE: { - /* Need to provide a translation of code at vg_m_eip. */ - done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr); - vg_assert(done_this_time > 0); - done_this_time --; - VG_(bbs_to_go) -= (ULong)done_this_time; - VG_(bbs_done) += (ULong)done_this_time; - VG_(create_translation_for)(VG_(baseBlock)[VGOFF_(m_eip)]); - goto next_outer_loop; - } - default: - VG_(panic)("vg_toploop: invalid interrupt reason"); - } - } else { - /* We get here if the client took a fault, which caused our - signal handler to longjmp. */ - done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr); - VG_(bbs_to_go) -= (ULong)done_this_time; - VG_(bbs_done) += (ULong)done_this_time; - if (VG_(interrupt_reason) == VG_Y_EXIT) return; - VG_(deliver_signals)(); - VG_(do_sanity_checks)(False); - VG_(unblock_host_signal)(VG_(longjmpd_on_signal)); - } - } - - /* NOTREACHED */ - - debug_stop: - /* If we exited because of a debug stop, print the translation - of the last block executed -- by translating it again, and - throwing away the result. */ - VG_(printf)( - "======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n"); - VG_(translate)( VG_(baseBlock)[VGOFF_(m_eip)], NULL, NULL, NULL ); - VG_(printf)("\n"); - VG_(printf)( - "======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n"); -} - - /* --------------------------------------------------------------------- Processing of command-line options. ------------------------------------------------------------------ */ @@ -705,7 +515,7 @@ static void process_cmd_line_options ( void ) VG_(clo_optimise) = True; VG_(clo_instrument) = True; VG_(clo_cleanup) = True; - VG_(clo_client_perms) = False; + VG_(clo_client_perms) = True; VG_(clo_smc_check) = /* VG_CLO_SMC_SOME */ VG_CLO_SMC_NONE; VG_(clo_trace_syscalls) = False; VG_(clo_trace_signals) = False; @@ -1014,6 +824,7 @@ static void process_cmd_line_options ( void ) bad_option("--gdb-attach=yes and --trace-children=yes"); } +#if 0 if (VG_(clo_client_perms) && !VG_(clo_instrument)) { VG_(message)(Vg_UserMsg, ""); VG_(message)(Vg_UserMsg, @@ -1023,6 +834,7 @@ static void process_cmd_line_options ( void ) if (VG_(clo_client_perms)) vg_assert(VG_(clo_instrument)); +#endif VG_(clo_logfile_fd) = eventually_logfile_fd; @@ -1106,8 +918,9 @@ void VG_(copy_m_state_static_to_baseBlock) ( void ) static void vg_show_counts ( void ) { VG_(message)(Vg_DebugMsg, - " dispatch: %lu basic blocks, %d tt_fast misses.", - VG_(bbs_done), VG_(tt_fast_misses)); + " lru: %d epochs, %d clearings.", + VG_(current_epoch), + VG_(number_of_lrus) ); VG_(message)(Vg_DebugMsg, "translate: new %d (%d -> %d), discard %d (%d -> %d).", VG_(overall_in_count), @@ -1117,9 +930,10 @@ static void vg_show_counts ( void ) VG_(overall_out_osize), VG_(overall_out_tsize) ); VG_(message)(Vg_DebugMsg, - " lru: %d epochs, %d clearings.", - VG_(current_epoch), - VG_(number_of_lrus) ); + " dispatch: %lu basic blocks, %d/%d sched events, %d tt_fast misses.", + VG_(bbs_done), VG_(num_scheduling_events_MAJOR), + VG_(num_scheduling_events_MINOR), + VG_(tt_fast_misses)); VG_(message)(Vg_DebugMsg, "reg-alloc: %d t-req-spill, " "%d+%d orig+spill uis, %d total-reg-r.", @@ -1150,7 +964,8 @@ static void vg_show_counts ( void ) void VG_(main) ( void ) { - Int i; + Int i; + VgSchedReturnCode src; /* Set up our stack sanity-check words. */ for (i = 0; i < 10; i++) { @@ -1211,11 +1026,18 @@ void VG_(main) ( void ) VG_(message)(Vg_UserMsg, ""); VG_(bbs_to_go) = VG_(clo_stop_after); - VG_(toploop)(); + + VG_(scheduler_init)(); + src = VG_(scheduler)(); if (VG_(clo_verbosity) > 0) VG_(message)(Vg_UserMsg, ""); + if (src == VgSrc_Deadlock) { + VG_(message)(Vg_UserMsg, + "Warning: pthread scheduler exited due to deadlock"); + } + if (VG_(clo_instrument)) { VG_(show_all_errors)(); VG_(clientmalloc_done)(); @@ -1226,8 +1048,9 @@ void VG_(main) ( void ) if (VG_(clo_leak_check)) VG_(detect_memory_leaks)(); } VG_(running_on_simd_CPU) = False; - - VG_(do_sanity_checks)(True /*include expensive checks*/ ); + + VG_(do_sanity_checks)( 0 /* root thread */, + True /*include expensive checks*/ ); if (VG_(clo_verbosity) > 1) vg_show_counts(); @@ -1262,6 +1085,7 @@ void VG_(main) ( void ) } /* Prepare to restore state to the real CPU. */ + VG_(load_thread_state)(0); VG_(copy_baseBlock_to_m_state_static)(); /* This pushes a return address on the simulator's stack, which @@ -1349,116 +1173,6 @@ extern void VG_(unimplemented) ( Char* msg ) } -/*-------------------------------------------------------------*/ -/*--- Replace some C lib things with equivs which don't get ---*/ -/*--- spurious value warnings. THEY RUN ON SIMD CPU! ---*/ -/*-------------------------------------------------------------*/ - -char* strrchr ( const char* s, int c ) -{ - UChar ch = (UChar)((UInt)c); - UChar* p = (UChar*)s; - UChar* last = NULL; - while (True) { - if (*p == ch) last = p; - if (*p == 0) return last; - p++; - } -} - -char* strchr ( const char* s, int c ) -{ - UChar ch = (UChar)((UInt)c); - UChar* p = (UChar*)s; - while (True) { - if (*p == ch) return p; - if (*p == 0) return NULL; - p++; - } -} - -char* strcat ( char* dest, const char* src ) -{ - Char* dest_orig = dest; - while (*dest) dest++; - while (*src) *dest++ = *src++; - *dest = 0; - return dest_orig; -} - -unsigned int strlen ( const char* str ) -{ - UInt i = 0; - while (str[i] != 0) i++; - return i; -} - -char* strcpy ( char* dest, const char* src ) -{ - Char* dest_orig = dest; - while (*src) *dest++ = *src++; - *dest = 0; - return dest_orig; -} - -int strncmp ( const char* s1, const char* s2, unsigned int nmax ) -{ - unsigned int n = 0; - while (True) { - if (n >= nmax) return 0; - if (*s1 == 0 && *s2 == 0) return 0; - if (*s1 == 0) return -1; - if (*s2 == 0) return 1; - - if (*(UChar*)s1 < *(UChar*)s2) return -1; - if (*(UChar*)s1 > *(UChar*)s2) return 1; - - s1++; s2++; n++; - } -} - -int strcmp ( const char* s1, const char* s2 ) -{ - while (True) { - if (*s1 == 0 && *s2 == 0) return 0; - if (*s1 == 0) return -1; - if (*s2 == 0) return 1; - - if (*(char*)s1 < *(char*)s2) return -1; - if (*(char*)s1 > *(char*)s2) return 1; - - s1++; s2++; - } -} - -void* memchr(const void *s, int c, unsigned int n) -{ - unsigned int i; - UChar c0 = (UChar)c; - UChar* p = (UChar*)s; - for (i = 0; i < n; i++) - if (p[i] == c0) return (void*)(&p[i]); - return NULL; -} - -void* memcpy( void *dst, const void *src, unsigned int len ) -{ - register char *d; - register char *s; - if ( dst > src ) { - d = (char *)dst + len - 1; - s = (char *)src + len - 1; - while ( len-- ) - *d-- = *s--; - } else if ( dst < src ) { - d = (char *)dst; - s = (char *)src; - while ( len-- ) - *d++ = *s++; - } - return dst; -} - /*--------------------------------------------------------------------*/ /*--- end vg_main.c ---*/ /*--------------------------------------------------------------------*/ diff --git a/coregrind/vg_memory.c b/coregrind/vg_memory.c index eea86acd61..b219a3a07f 100644 --- a/coregrind/vg_memory.c +++ b/coregrind/vg_memory.c @@ -2122,10 +2122,11 @@ Bool VG_(first_and_last_secondaries_look_plausible) ( void ) /* A fast sanity check -- suitable for calling circa once per millisecond. */ -void VG_(do_sanity_checks) ( Bool force_expensive ) +void VG_(do_sanity_checks) ( ThreadId tid, Bool force_expensive ) { - Int i; - Bool do_expensive_checks; + Int i; + Bool do_expensive_checks; + ThreadState* tst; if (VG_(sanity_level) < 1) return; @@ -2133,6 +2134,9 @@ void VG_(do_sanity_checks) ( Bool force_expensive ) VG_(sanity_fast_count)++; + tst = VG_(get_thread_state)(tid); + vg_assert(tst != NULL && tst->status != VgTs_Empty); + /* Check that we haven't overrun our private stack. */ for (i = 0; i < 10; i++) { vg_assert(VG_(stack)[i] @@ -2146,7 +2150,7 @@ void VG_(do_sanity_checks) ( Bool force_expensive ) if (VG_(clo_instrument)) { /* Check that the eflags tag is as expected. */ - UInt vv = VG_(baseBlock)[VGOFF_(sh_eflags)]; + UInt vv = tst->sh_eflags; vg_assert(vv == VGM_EFLAGS_VALID || VGM_EFLAGS_INVALID); /* Check that nobody has spuriously claimed that the first or @@ -2154,12 +2158,6 @@ void VG_(do_sanity_checks) ( Bool force_expensive ) vg_assert(VG_(first_and_last_secondaries_look_plausible)); } -# if 0 - if ( (VG_(baseBlock)[VGOFF_(sh_eflags)] & 1) == 1) - VG_(printf)("UNDEF\n") ; else - VG_(printf)("def\n") ; -# endif - /* --- Now some more expensive checks. ---*/ /* Once every 25 times, check some more expensive stuff. */ @@ -2233,6 +2231,9 @@ static void uint_to_bits ( UInt x, Char* str ) vg_assert(w == 36); } +/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread + state table. */ + void VG_(show_reg_tags) ( void ) { Char buf1[36]; diff --git a/coregrind/vg_mylibc.c b/coregrind/vg_mylibc.c index 2ba0753d32..31f2b18928 100644 --- a/coregrind/vg_mylibc.c +++ b/coregrind/vg_mylibc.c @@ -232,7 +232,7 @@ Int VG_(ksignal)(Int signum, void (*sighandler)(Int)) /* --------------------------------------------------------------------- - mmap/munmap, exit + mmap/munmap, exit, fcntl ------------------------------------------------------------------ */ /* Returns -1 on failure. */ @@ -266,6 +266,43 @@ void VG_(exit)( Int status ) vg_assert(2+2 == 5); } +/* Returns -1 on error. */ +Int VG_(fcntl) ( Int fd, Int cmd, Int arg ) +{ + Int res = vg_do_syscall3(__NR_fcntl, fd, cmd, arg); + return VG_(is_kerror)(res) ? -1 : res; +} + +/* Returns -1 on error. */ +Int VG_(select)( Int n, + vki_fd_set* readfds, + vki_fd_set* writefds, + vki_fd_set* exceptfds, + struct vki_timeval * timeout ) +{ + Int res; + UInt args[5]; + args[0] = n; + args[1] = (UInt)readfds; + args[2] = (UInt)writefds; + args[3] = (UInt)exceptfds; + args[4] = (UInt)timeout; + res = vg_do_syscall1(__NR_select, (UInt)(&(args[0])) ); + return VG_(is_kerror)(res) ? -1 : res; + return res; +} + +/* Returns -1 on error, but 0 if ok or interrupted. */ +Int VG_(nanosleep)( const struct vki_timespec *req, + struct vki_timespec *rem ) +{ + Int res; + res = vg_do_syscall2(__NR_nanosleep, (UInt)req, (UInt)rem); + if (res == -VKI_EINVAL) return -1; + return 0; +} + + /* --------------------------------------------------------------------- printf implementation. The key function, vg_vprintf(), emits chars into a caller-supplied function. Distantly derived from: @@ -809,7 +846,6 @@ void VG_(assert_fail) ( Char* expr, Char* file, Int line, Char* fn ) "valgrind", file, line, fn, expr ); VG_(printf)("Please report this bug to me at: %s\n\n", EMAIL_ADDR); VG_(shutdown_logging)(); - /* vg_restore_SIGABRT(); */ VG_(exit)(1); } @@ -819,7 +855,6 @@ void VG_(panic) ( Char* str ) VG_(printf)("Basic block ctr is approximately %llu\n", VG_(bbs_done) ); VG_(printf)("Please report this bug to me at: %s\n\n", EMAIL_ADDR); VG_(shutdown_logging)(); - /* vg_restore_SIGABRT(); */ VG_(exit)(1); } @@ -900,6 +935,16 @@ Int VG_(getpid) ( void ) return res; } +/* Read a notional elapsed (wallclock-time) timer, giving a 64-bit + microseconds count. */ +ULong VG_(read_microsecond_timer)( void ) +{ + Int res; + struct vki_timeval tv; + res = vg_do_syscall2(__NR_gettimeofday, (UInt)&tv, (UInt)NULL); + vg_assert(!VG_(is_kerror)(res)); + return (1000000ULL * (ULong)(tv.tv_sec)) + (ULong)(tv.tv_usec); +} /* --------------------------------------------------------------------- Primitive support for bagging memory via mmap. diff --git a/coregrind/vg_signals.c b/coregrind/vg_signals.c index 2372fc4738..ea2826bebb 100644 --- a/coregrind/vg_signals.c +++ b/coregrind/vg_signals.c @@ -36,13 +36,6 @@ #include "vg_unsafe.h" -/* --------------------------------------------------------------------- - An implementation of signal sets and other grunge, identical to - that in the target kernels (Linux 2.2.X and 2.4.X). - ------------------------------------------------------------------ */ - - - /* --------------------------------------------------------------------- Signal state for this process. ------------------------------------------------------------------ */ @@ -64,8 +57,29 @@ void* VG_(sighandler)[VKI_KNSIG]; void* VG_(sigpending)[VKI_KNSIG]; -/* See decl in vg_include.h for explanation. */ -Int VG_(syscall_depth) = 0; + +/* --------------------------------------------------------------------- + Handy utilities to block/restore all host signals. + ------------------------------------------------------------------ */ + +/* Block all host signals, dumping the old mask in *saved_mask. */ +void VG_(block_all_host_signals) ( /* OUT */ vki_ksigset_t* saved_mask ) +{ + Int ret; + vki_ksigset_t block_procmask; + VG_(ksigfillset)(&block_procmask); + ret = VG_(ksigprocmask) + (VKI_SIG_SETMASK, &block_procmask, saved_mask); + vg_assert(ret == 0); +} + +/* Restore the blocking mask using the supplied saved one. */ +void VG_(restore_host_signals) ( /* IN */ vki_ksigset_t* saved_mask ) +{ + Int ret; + ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL); + vg_assert(ret == 0); +} /* --------------------------------------------------------------------- @@ -78,9 +92,14 @@ Int VG_(syscall_depth) = 0; typedef struct { - UInt retaddr; /* Sig handler's (bogus) return address */ - Int sigNo; /* The arg to the sig handler. */ + /* These are parameters to the signal handler. */ + UInt retaddr; /* Sig handler's (bogus) return address */ + Int sigNo; /* The arg to the sig handler. */ + Addr psigInfo; /* ptr to siginfo_t; NULL for now. */ + Addr puContext; /* ptr to ucontext; NULL for now. */ + /* Sanity check word. */ UInt magicPI; + /* Saved processor state. */ UInt fpustate[VG_SIZE_OF_FPUSTATE_W]; UInt eax; UInt ecx; @@ -92,9 +111,14 @@ typedef UInt edi; Addr eip; UInt eflags; + /* Scheduler-private stuff: what was the thread's status prior to + delivering this signal? */ + ThreadStatus status; + /* Sanity check word. Is the highest-addressed word; do not + move!*/ UInt magicE; } - VgSigContext; + VgSigFrame; @@ -113,35 +137,52 @@ void VG_(signalreturn_bogusRA) ( void ) handler. This includes the signal number and a bogus return address. */ static -void vg_push_signal_frame ( int sigNo ) +void vg_push_signal_frame ( ThreadId tid, int sigNo ) { Int i; - UInt esp; - VgSigContext sigctx; + Addr esp; + VgSigFrame* frame; + ThreadState* tst; + + tst = VG_(get_thread_state)(tid); + esp = tst->m_esp; + + esp -= sizeof(VgSigFrame); + frame = (VgSigFrame*)esp; + /* Assert that the frame is placed correctly. */ + vg_assert( (sizeof(VgSigFrame) & 0x3) == 0 ); + vg_assert( ((Char*)(&frame->magicE)) + sizeof(UInt) + == ((Char*)(tst->m_esp)) ); + + frame->retaddr = (UInt)(&VG_(signalreturn_bogusRA)); + frame->sigNo = sigNo; + frame->psigInfo = (Addr)NULL; + frame->puContext = (Addr)NULL; + frame->magicPI = 0x31415927; + for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++) - sigctx.fpustate[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i]; - - sigctx.magicPI = 0x31415927; - sigctx.magicE = 0x27182818; - sigctx.eax = VG_(baseBlock)[VGOFF_(m_eax)]; - sigctx.ecx = VG_(baseBlock)[VGOFF_(m_ecx)]; - sigctx.edx = VG_(baseBlock)[VGOFF_(m_edx)]; - sigctx.ebx = VG_(baseBlock)[VGOFF_(m_ebx)]; - sigctx.ebp = VG_(baseBlock)[VGOFF_(m_ebp)]; - sigctx.esp = VG_(baseBlock)[VGOFF_(m_esp)]; - sigctx.esi = VG_(baseBlock)[VGOFF_(m_esi)]; - sigctx.edi = VG_(baseBlock)[VGOFF_(m_edi)]; - sigctx.eflags = VG_(baseBlock)[VGOFF_(m_eflags)]; - sigctx.eip = VG_(baseBlock)[VGOFF_(m_eip)]; - sigctx.retaddr = (UInt)(&VG_(signalreturn_bogusRA)); - sigctx.sigNo = sigNo; - - esp = VG_(baseBlock)[VGOFF_(m_esp)]; - vg_assert((sizeof(VgSigContext) & 0x3) == 0); - - esp -= sizeof(VgSigContext); - for (i = 0; i < sizeof(VgSigContext)/4; i++) - ((UInt*)esp)[i] = ((UInt*)(&sigctx))[i]; + frame->fpustate[i] = tst->m_fpu[i]; + + frame->eax = tst->m_eax; + frame->ecx = tst->m_ecx; + frame->edx = tst->m_edx; + frame->ebx = tst->m_ebx; + frame->ebp = tst->m_ebp; + frame->esp = tst->m_esp; + frame->esi = tst->m_esi; + frame->edi = tst->m_edi; + frame->eip = tst->m_eip; + frame->eflags = tst->m_eflags; + + frame->status = tst->status; + + frame->magicE = 0x27182818; + + /* Set the thread so it will next run the handler. */ + tst->m_esp = esp; + tst->m_eip = (Addr)VG_(sigpending)[sigNo]; + /* This thread needs to be marked runnable, but we leave that the + caller to do. */ /* Make sigNo and retaddr fields readable -- at 0(%ESP) and 4(%ESP) */ if (VG_(clo_instrument)) { @@ -149,11 +190,9 @@ void vg_push_signal_frame ( int sigNo ) VGM_(make_readable) ( ((Addr)esp)+4 ,4 ); } - VG_(baseBlock)[VGOFF_(m_esp)] = esp; - VG_(baseBlock)[VGOFF_(m_eip)] = (Addr)VG_(sigpending)[sigNo]; /* VG_(printf)("pushed signal frame; %%ESP now = %p, next %%EBP = %p\n", - esp, VG_(baseBlock)[VGOFF_(m_eip)]); + esp, tst->m_eip); */ } @@ -162,43 +201,56 @@ void vg_push_signal_frame ( int sigNo ) simulated machine state, and return the signal number that the frame was for. */ static -Int vg_pop_signal_frame ( void ) +Int vg_pop_signal_frame ( ThreadId tid ) { - UInt esp; + Addr esp; Int sigNo, i; - VgSigContext* sigctx; - /* esp is now pointing at the magicPI word on the stack, viz, - eight bytes above the bottom of the vg_sigcontext. - */ - esp = VG_(baseBlock)[VGOFF_(m_esp)]; - sigctx = (VgSigContext*)(esp-4); + VgSigFrame* frame; + ThreadState* tst; + + tst = VG_(get_thread_state)(tid); - vg_assert(sigctx->magicPI == 0x31415927); - vg_assert(sigctx->magicE == 0x27182818); + /* esp is now pointing at the sigNo field in the signal frame. */ + esp = tst->m_esp; + frame = (VgSigFrame*)(esp-4); + + vg_assert(frame->magicPI == 0x31415927); + vg_assert(frame->magicE == 0x27182818); if (VG_(clo_trace_signals)) VG_(message)(Vg_DebugMsg, "vg_pop_signal_frame: valid magic"); /* restore machine state */ for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++) - VG_(baseBlock)[VGOFF_(m_fpustate) + i] = sigctx->fpustate[i]; + tst->m_fpu[i] = frame->fpustate[i]; - /* Mark the sigctx structure as nonaccessible. Has to happen - _before_ vg_m_state.m_esp is given a new value.*/ - if (VG_(clo_instrument)) - VGM_(handle_esp_assignment) ( sigctx->esp ); + /* Mark the frame structure as nonaccessible. Has to happen + _before_ vg_m_state.m_esp is given a new value. + handle_esp_assignment reads %ESP from baseBlock, so we park it + there first. Re-place the junk there afterwards. */ + if (VG_(clo_instrument)) { + vg_assert(VG_(baseBlock)[VGOFF_(m_esp)] == 0xDEADBEEF); + VG_(baseBlock)[VGOFF_(m_esp)] = tst->m_esp; + VGM_(handle_esp_assignment) ( frame->esp ); + VG_(baseBlock)[VGOFF_(m_esp)] = 0xDEADBEEF; + } /* Restore machine state from the saved context. */ - VG_(baseBlock)[VGOFF_(m_eax)] = sigctx->eax; - VG_(baseBlock)[VGOFF_(m_ecx)] = sigctx->ecx; - VG_(baseBlock)[VGOFF_(m_edx)] = sigctx->edx; - VG_(baseBlock)[VGOFF_(m_ebx)] = sigctx->ebx; - VG_(baseBlock)[VGOFF_(m_ebp)] = sigctx->ebp; - VG_(baseBlock)[VGOFF_(m_esp)] = sigctx->esp; - VG_(baseBlock)[VGOFF_(m_esi)] = sigctx->esi; - VG_(baseBlock)[VGOFF_(m_edi)] = sigctx->edi; - VG_(baseBlock)[VGOFF_(m_eflags)] = sigctx->eflags; - VG_(baseBlock)[VGOFF_(m_eip)] = sigctx->eip; - sigNo = sigctx->sigNo; + tst->m_eax = frame->eax; + tst->m_ecx = frame->ecx; + tst->m_edx = frame->edx; + tst->m_ebx = frame->ebx; + tst->m_ebp = frame->ebp; + tst->m_esp = frame->esp; + tst->m_esi = frame->esi; + tst->m_edi = frame->edi; + tst->m_eflags = frame->eflags; + tst->m_eip = frame->eip; + sigNo = frame->sigNo; + + /* And restore the thread's status to what it was before the signal + was delivered. */ + tst->status = frame->status; + return sigNo; } @@ -207,18 +259,17 @@ Int vg_pop_signal_frame ( void ) VgSigContext and continue with whatever was going on before the handler ran. */ -void VG_(signal_returns) ( void ) +void VG_(signal_returns) ( ThreadId tid ) { - Int sigNo, ret; - vki_ksigset_t block_procmask; + Int sigNo; vki_ksigset_t saved_procmask; /* Block host signals ... */ - VG_(ksigfillset)(&block_procmask); - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask); - vg_assert(ret == 0); + VG_(block_all_host_signals)( &saved_procmask ); - sigNo = vg_pop_signal_frame(); + /* Pop the signal frame and restore tid's status to what it was + before the signal was delivered. */ + sigNo = vg_pop_signal_frame(tid); /* You would have thought that the following assertion made sense here: @@ -242,40 +293,18 @@ void VG_(signal_returns) ( void ) VG_(sigpending)[sigNo] = VG_SIGIDLE; /* Unlock and return. */ - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL); - vg_assert(ret == 0); + VG_(restore_host_signals)( &saved_procmask ); - /* The main dispatch loop now continues at vg_m_eip. */ -} - - -/* Restore the default host behaviour of SIGABRT, and unblock it, - so we can exit the simulator cleanly by doing exit/abort/assert fail. -*/ -void VG_(restore_SIGABRT) ( void ) -{ - vki_ksigset_t set; - vki_ksigaction act; - act.ksa_flags = VKI_SA_RESTART; - act.ksa_handler = VKI_SIG_DFL; - VG_(ksigemptyset)(&act.ksa_mask); - - VG_(ksigemptyset)(&set); - VG_(ksigaddset)(&set,VKI_SIGABRT); - - /* If this doesn't work, tough. Don't check return code. */ - VG_(ksigaction)(VKI_SIGABRT, &act, NULL); - VG_(ksigprocmask)(VKI_SIG_UNBLOCK, &set, NULL); + /* Scheduler now can resume this thread, or perhaps some other. */ } /* Deliver all pending signals, by building stack frames for their handlers. */ -void VG_(deliver_signals) ( void ) +void VG_(deliver_signals) ( ThreadId tid ) { - vki_ksigset_t block_procmask; vki_ksigset_t saved_procmask; - Int ret, sigNo; + Int sigNo; Bool found; /* A cheap check. We don't need to have exclusive access @@ -295,10 +324,9 @@ void VG_(deliver_signals) ( void ) blocking all the host's signals. That means vg_oursignalhandler can't run whilst we are messing with stuff. */ - VG_(ksigfillset)(&block_procmask); - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask); - vg_assert(ret == 0); + VG_(block_all_host_signals)( &saved_procmask ); + /* Look for signals to deliver ... */ for (sigNo = 1; sigNo < VKI_KNSIG; sigNo++) { if (VG_(sigpending)[sigNo] == VG_SIGIDLE || VG_(sigpending)[sigNo] == VG_SIGRUNNING) continue; @@ -310,94 +338,19 @@ void VG_(deliver_signals) ( void ) %EIP so that when execution continues, we will enter the signal handler with the frame on top of the client's stack, as it expects. */ - vg_push_signal_frame ( sigNo ); - + vg_push_signal_frame ( tid, sigNo ); + VG_(get_thread_state)(tid)->status = VgTs_Runnable; + /* Signify that the signal has been delivered. */ VG_(sigpending)[sigNo] = VG_SIGRUNNING; } /* Unlock and return. */ - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL); - vg_assert(ret == 0); + VG_(restore_host_signals)( &saved_procmask ); return; } -/* ----------- HACK ALERT ----------- */ -/* Note carefully that this runs with all host signals disabled! */ -static -void vg_deliver_signal_immediately ( Int sigNo ) -{ - Int n_bbs_done; - Int sigNo2; - Addr next_orig_addr; - Addr next_trans_addr; - - if (VG_(clo_verbosity) > 0 - && (True || VG_(clo_trace_signals))) - VG_(message)(Vg_DebugExtraMsg, - "deliver signal %d immediately: BEGIN", sigNo ); - /* VG_(printf)("resumption addr is %p\n", - VG_(baseBlock)[VGOFF_(m_eip)]); */ - - vg_push_signal_frame ( sigNo ); - n_bbs_done = 0; - - /* Single-step the client (ie, run the handler) until it jumps to - VG_(signalreturn_bogusRA) */ - - while (True) { - - if (n_bbs_done >= VG_MAX_BBS_IN_IMMEDIATE_SIGNAL) - VG_(unimplemented)( - "handling signal whilst client blocked in syscall: " - "handler runs too long" - ); - - next_orig_addr = VG_(baseBlock)[VGOFF_(m_eip)]; - - if (next_orig_addr == (Addr)(&VG_(trap_here))) - VG_(unimplemented)( - "handling signal whilst client blocked in syscall: " - "handler calls malloc (et al)" - ); - - /* VG_(printf)("next orig addr = %p\n", next_orig_addr); */ - if (next_orig_addr == (Addr)(&VG_(signalreturn_bogusRA))) - break; - - next_trans_addr = VG_(search_transtab) ( next_orig_addr ); - if (next_trans_addr == (Addr)NULL) { - VG_(create_translation_for) ( next_orig_addr ); - next_trans_addr = VG_(search_transtab) ( next_orig_addr ); - } - - vg_assert(next_trans_addr != (Addr)NULL); - next_orig_addr = VG_(run_singleton_translation)(next_trans_addr); - VG_(baseBlock)[VGOFF_(m_eip)] = next_orig_addr; - n_bbs_done++; - } - - sigNo2 = vg_pop_signal_frame(); - vg_assert(sigNo2 == sigNo); - - if (VG_(clo_verbosity) > 0 - && (True || VG_(clo_trace_signals))) - VG_(message)(Vg_DebugExtraMsg, - "deliver signal %d immediately: END, %d bbs done", - sigNo, n_bbs_done ); - - /* Invalidate the tt_fast cache. We've been (potentially) adding - translations and even possibly doing LRUs without keeping it up - to date, so we'd better nuke it before going any further, to - avoid inconsistencies with the main TT/TC structure. */ - VG_(invalidate_tt_fast)(); -} - - -/* ----------- end of HACK ALERT ----------- */ - - /* Receive a signal from the host, and either discard it or park it in the queue of pending signals. All other signals will be blocked when this handler runs. Runs with all host signals blocked, so as @@ -405,8 +358,7 @@ void vg_deliver_signal_immediately ( Int sigNo ) static void VG_(oursignalhandler) ( Int sigNo ) { - Int ret; - vki_ksigset_t block_procmask; + Int dummy_local; vki_ksigset_t saved_procmask; if (VG_(clo_trace_signals)) { @@ -418,20 +370,24 @@ static void VG_(oursignalhandler) ( Int sigNo ) /* Sanity check. Ensure we're really running on the signal stack we asked for. */ if ( !( - ((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&ret)) + ((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&dummy_local)) && - ((Char*)(&ret) < (Char*)(&(VG_(sigstack)[10000]))) + ((Char*)(&dummy_local) < (Char*)(&(VG_(sigstack)[10000]))) ) ) { - VG_(message)(Vg_DebugMsg, "FATAL: signal delivered on the wrong stack?!"); - VG_(message)(Vg_DebugMsg, "A possible workaround follows. Please tell me"); - VG_(message)(Vg_DebugMsg, "(jseward@acm.org) if the suggested workaround doesn't help."); + VG_(message)(Vg_DebugMsg, + "FATAL: signal delivered on the wrong stack?!"); + VG_(message)(Vg_DebugMsg, + "A possible workaround follows. Please tell me"); + VG_(message)(Vg_DebugMsg, + "(jseward@acm.org) if the suggested workaround doesn't help."); VG_(unimplemented) - ("support for progs compiled with -p/-pg; rebuild your prog without -p/-pg"); + ("support for progs compiled with -p/-pg; " + "rebuild your prog without -p/-pg"); } - vg_assert((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&ret)); - vg_assert((Char*)(&ret) < (Char*)(&(VG_(sigstack)[10000]))); + vg_assert((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&dummy_local)); + vg_assert((Char*)(&dummy_local) < (Char*)(&(VG_(sigstack)[10000]))); if (sigNo == VKI_SIGABRT && VG_(sighandler)[sigNo] == NULL) { /* We get here if SIGABRT is delivered and the client hasn't @@ -442,21 +398,19 @@ static void VG_(oursignalhandler) ( Int sigNo ) VG_(end_msg)(); } VG_(ksignal)(VKI_SIGABRT, VKI_SIG_DFL); - VG_(interrupt_reason) = VG_Y_EXIT; VG_(longjmpd_on_signal) = VKI_SIGABRT; - __builtin_longjmp(VG_(toploop_jmpbuf),1); + __builtin_longjmp(VG_(scheduler_jmpbuf),1); } - /* Block all host signals. */ - VG_(ksigfillset)(&block_procmask); - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask); - vg_assert(ret == 0); + VG_(block_all_host_signals)( &saved_procmask ); if (VG_(sighandler)[sigNo] == NULL) { if (VG_(clo_trace_signals)) { VG_(add_to_msg)("unexpected!"); VG_(end_msg)(); } + /* Note: we panic with all signals blocked here. Don't think + that matters. */ VG_(panic)("vg_oursignalhandler: unexpected signal"); } @@ -478,47 +432,26 @@ static void VG_(oursignalhandler) ( Int sigNo ) } } else { - /* Ok, we'd better deliver it to the client, one way or another. */ + /* Ok, we'd better deliver it to the client. */ vg_assert(VG_(sigpending)[sigNo] == VG_SIGIDLE); - - if (VG_(syscall_depth) == 0) { - /* The usual case; delivering a signal to the client, and the - client is not currently in a syscall. Queue it up for - delivery at some point in the future. */ - VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo]; - if (VG_(clo_trace_signals)) { - VG_(add_to_msg)("queued" ); - VG_(end_msg)(); - } - } else { - /* The nasty case, which was causing kmail to freeze up: the - client is (presumably blocked) in a syscall. We have to - deliver the signal right now, because it may be that - running the sighandler is the only way that the syscall - will be able to return. In which case, if we don't do - that, the client will deadlock. */ - if (VG_(clo_trace_signals)) { - VG_(add_to_msg)("delivering immediately" ); - VG_(end_msg)(); - } - /* Note that this runs with all host signals blocked. */ - VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo]; - vg_deliver_signal_immediately(sigNo); - VG_(sigpending)[sigNo] = VG_SIGIDLE; - /* VG_(printf)("resuming at %p\n", VG_(baseBlock)[VGOFF_(m_eip)]); */ + /* Queue it up for delivery at some point in the future. */ + VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo]; + if (VG_(clo_trace_signals)) { + VG_(add_to_msg)("queued" ); + VG_(end_msg)(); } } - /* We've finished messing with the queue, so re-enable host signals. */ - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL); + /* We've finished messing with the queue, so re-enable host + signals. */ + VG_(restore_host_signals)( &saved_procmask ); - vg_assert(ret == 0); if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS || sigNo == VKI_SIGFPE || sigNo == VKI_SIGILL) { - /* Can't continue; must longjmp and thus enter the sighandler - immediately. */ + /* Can't continue; must longjmp back to the scheduler and thus + enter the sighandler immediately. */ VG_(longjmpd_on_signal) = sigNo; - __builtin_longjmp(VG_(toploop_jmpbuf),1); + __builtin_longjmp(VG_(scheduler_jmpbuf),1); } } @@ -559,17 +492,14 @@ void VG_(sigstartup_actions) ( void ) { Int i, ret; - vki_ksigset_t block_procmask; vki_ksigset_t saved_procmask; vki_kstack_t altstack_info; vki_ksigaction sa; - /* VG_(printf)("SIGSTARTUP\n"); */ + /* VG_(printf)("SIGSTARTUP\n"); */ /* Block all signals. saved_procmask remembers the previous mask. */ - VG_(ksigfillset)(&block_procmask); - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask); - vg_assert(ret == 0); + VG_(block_all_host_signals)( &saved_procmask ); /* Register an alternative stack for our own signal handler to run on. */ @@ -615,8 +545,7 @@ void VG_(sigstartup_actions) ( void ) VG_(ksignal)(VKI_SIGABRT, &VG_(oursignalhandler)); /* Finally, restore the blocking mask. */ - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL); - vg_assert(ret == 0); + VG_(restore_host_signals)( &saved_procmask ); } @@ -635,14 +564,10 @@ void VG_(sigshutdown_actions) ( void ) { Int i, ret; - vki_ksigset_t block_procmask; vki_ksigset_t saved_procmask; vki_ksigaction sa; - /* Block all signals. */ - VG_(ksigfillset)(&block_procmask); - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask); - vg_assert(ret == 0); + VG_(block_all_host_signals)( &saved_procmask ); /* copy the sim signal actions to the real ones. */ for (i = 1; i < VKI_KNSIG; i++) { @@ -654,9 +579,7 @@ void VG_(sigshutdown_actions) ( void ) ret = VG_(ksigaction)(i, &sa, NULL); } - /* Finally, copy the simulated process mask to the real one. */ - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL); - vg_assert(ret == 0); + VG_(restore_host_signals)( &saved_procmask ); } @@ -665,18 +588,16 @@ void VG_(sigshutdown_actions) ( void ) ------------------------------------------------------------------ */ /* Do more error checking? */ -void VG_(do__NR_sigaction) ( void ) +void VG_(do__NR_sigaction) ( ThreadId tid ) { UInt res; void* our_old_handler; vki_ksigaction* new_action; vki_ksigaction* old_action; - UInt param1 - = VG_(baseBlock)[VGOFF_(m_ebx)]; /* int sigNo */ - UInt param2 - = VG_(baseBlock)[VGOFF_(m_ecx)]; /* k_sigaction* new_action */ - UInt param3 - = VG_(baseBlock)[VGOFF_(m_edx)]; /* k_sigaction* old_action */ + ThreadState* tst = VG_(get_thread_state)( tid ); + UInt param1 = tst->m_ebx; /* int sigNo */ + UInt param2 = tst->m_ecx; /* k_sigaction* new_action */ + UInt param3 = tst->m_edx; /* k_sigaction* old_action */ new_action = (vki_ksigaction*)param2; old_action = (vki_ksigaction*)param3; @@ -722,7 +643,7 @@ void VG_(do__NR_sigaction) ( void ) } } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); /* VG_(printf)("RES = %d\n", res); */ /* If the client asks for the old handler, maintain our fiction @@ -750,7 +671,7 @@ void VG_(do__NR_sigaction) ( void ) goto good; good: - VG_(baseBlock)[VGOFF_(m_eax)] = (UInt)0; + tst->m_eax = (UInt)0; return; bad_signo: diff --git a/coregrind/vg_syscall.S b/coregrind/vg_syscall.S index 210328a690..c07d95567c 100644 --- a/coregrind/vg_syscall.S +++ b/coregrind/vg_syscall.S @@ -41,10 +41,6 @@ # m_state_static, and back afterwards. VG_(do_syscall): - cmpl $2, VG_(syscall_depth) - jz do_syscall_DEPTH_2 - - # depth 1 copy follows ... # Save all the int registers of the real machines state on the # simulators stack. pushal @@ -104,76 +100,6 @@ VG_(do_syscall): ret - - - - - - - -do_syscall_DEPTH_2: - - # depth 2 copy follows ... - # Save all the int registers of the real machines state on the - # simulators stack. - pushal - - # and save the real FPU state too - fwait - fnsave VG_(real_fpu_state_saved_over_syscall_d2) - frstor VG_(real_fpu_state_saved_over_syscall_d2) - - # remember what the simulators stack pointer is - movl %esp, VG_(esp_saved_over_syscall_d2) - - # Now copy the simulated machines state into the real one - # esp still refers to the simulators stack - frstor VG_(m_state_static)+40 - movl VG_(m_state_static)+32, %eax - pushl %eax - popfl - movl VG_(m_state_static)+0, %eax - movl VG_(m_state_static)+4, %ecx - movl VG_(m_state_static)+8, %edx - movl VG_(m_state_static)+12, %ebx - movl VG_(m_state_static)+16, %esp - movl VG_(m_state_static)+20, %ebp - movl VG_(m_state_static)+24, %esi - movl VG_(m_state_static)+28, %edi - - # esp now refers to the simulatees stack - # Do the actual system call - int $0x80 - - # restore stack as soon as possible - # esp refers to simulatees stack - movl %esp, VG_(m_state_static)+16 - movl VG_(esp_saved_over_syscall_d2), %esp - # esp refers to simulators stack - - # ... and undo everything else. - # Copy real state back to simulated state. - movl %eax, VG_(m_state_static)+0 - movl %ecx, VG_(m_state_static)+4 - movl %edx, VG_(m_state_static)+8 - movl %ebx, VG_(m_state_static)+12 - movl %ebp, VG_(m_state_static)+20 - movl %esi, VG_(m_state_static)+24 - movl %edi, VG_(m_state_static)+28 - pushfl - popl %eax - movl %eax, VG_(m_state_static)+32 - fwait - fnsave VG_(m_state_static)+40 - frstor VG_(m_state_static)+40 - - # Restore the state of the simulator - frstor VG_(real_fpu_state_saved_over_syscall_d2) - popal - - ret - - ##--------------------------------------------------------------------## ##--- end vg_syscall.S ---## ##--------------------------------------------------------------------## diff --git a/coregrind/vg_to_ucode.c b/coregrind/vg_to_ucode.c index b3bd3c367c..c45ad81f8e 100644 --- a/coregrind/vg_to_ucode.c +++ b/coregrind/vg_to_ucode.c @@ -1607,7 +1607,7 @@ Addr dis_Grp5 ( UCodeBlock* cb, Int sz, Addr eip, Bool* isEnd ) SMC_IF_ALL(cb); uInstr1(cb, JMP, 0, TempReg, t1); uCond(cb, CondAlways); - LAST_UINSTR(cb).call_dispatch = True; + LAST_UINSTR(cb).jmpkind = JmpCall; *isEnd = True; break; case 4: /* jmp Ev */ @@ -1654,7 +1654,7 @@ Addr dis_Grp5 ( UCodeBlock* cb, Int sz, Addr eip, Bool* isEnd ) SMC_IF_ALL(cb); uInstr1(cb, JMP, 0, TempReg, t1); uCond(cb, CondAlways); - LAST_UINSTR(cb).call_dispatch = True; + LAST_UINSTR(cb).jmpkind = JmpCall; *isEnd = True; break; case 4: /* JMP Ev */ @@ -2859,32 +2859,6 @@ Addr dis_xadd_G_E ( UCodeBlock* cb, } -/* Push %ECX, %EBX and %EAX, call helper_do_client_request, and put - the resulting %EAX value back. */ -static -void dis_ClientRequest ( UCodeBlock* cb ) -{ - Int tmpc = newTemp(cb); - Int tmpb = newTemp(cb); - Int tmpa = newTemp(cb); - uInstr2(cb, GET, 4, ArchReg, R_ECX, TempReg, tmpc); - uInstr2(cb, GET, 4, ArchReg, R_EBX, TempReg, tmpb); - uInstr2(cb, GET, 4, ArchReg, R_EAX, TempReg, tmpa); - uInstr0(cb, CALLM_S, 0); - uInstr1(cb, PUSH, 4, TempReg, tmpc); - uInstr1(cb, PUSH, 4, TempReg, tmpb); - uInstr1(cb, PUSH, 4, TempReg, tmpa); - uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_do_client_request)); - uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty); - uInstr1(cb, POP, 4, TempReg, tmpa); - uInstr1(cb, CLEAR, 0, Lit16, 8); - uInstr0(cb, CALLM_E, 0); - uInstr2(cb, PUT, 4, TempReg, tmpa, ArchReg, R_EAX); - if (dis) - VG_(printf)("%%eax = client_request ( %%eax, %%ebx, %%ecx )\n"); -} - - /*------------------------------------------------------------*/ /*--- Disassembling entire basic blocks ---*/ /*------------------------------------------------------------*/ @@ -2909,21 +2883,31 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd ) if (dis) VG_(printf)("\t0x%x: ", eip); /* Spot the client-request magic sequence, if required. */ - if (VG_(clo_client_perms)) { + if (1 /*VG_(clo_client_perms)*/) { UChar* myeip = (UChar*)eip; /* Spot this: C1C01D roll $29, %eax C1C003 roll $3, %eax - C1C01B roll $27, %eax - C1C005 roll $5, %eax + C1C81B rorl $27, %eax + C1C805 rorl $5, %eax + C1C00D roll $13, %eax + C1C013 roll $19, %eax */ - if (myeip[0] == 0xC1 && myeip[1] == 0xC0 && myeip[2] == 0x1D && - myeip[3] == 0xC1 && myeip[4] == 0xC0 && myeip[5] == 0x03 && - myeip[6] == 0xC1 && myeip[7] == 0xC0 && myeip[8] == 0x1B && - myeip[9] == 0xC1 && myeip[10] == 0xC0 && myeip[11] == 0x05) { - vg_assert(VG_(clo_instrument)); - dis_ClientRequest(cb); - eip += 12; + if (myeip[ 0] == 0xC1 && myeip[ 1] == 0xC0 && myeip[ 2] == 0x1D && + myeip[ 3] == 0xC1 && myeip[ 4] == 0xC0 && myeip[ 5] == 0x03 && + myeip[ 6] == 0xC1 && myeip[ 7] == 0xC8 && myeip[ 8] == 0x1B && + myeip[ 9] == 0xC1 && myeip[10] == 0xC8 && myeip[11] == 0x05 && + myeip[12] == 0xC1 && myeip[13] == 0xC0 && myeip[14] == 0x0D && + myeip[15] == 0xC1 && myeip[16] == 0xC0 && myeip[17] == 0x13 + ) { + eip += 18; + uInstr1(cb, JMP, 0, Literal, 0); + uLiteral(cb, eip); + uCond(cb, CondAlways); + LAST_UINSTR(cb).jmpkind = JmpClientReq; + *isEnd = True; + if (dis) + VG_(printf)("%%edx = client_request ( %%eax )\n"); return eip; } } @@ -2978,9 +2962,7 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd ) uInstr2(cb, PUT, 4, TempReg, t1, ArchReg, R_ESP); uInstr1(cb, JMP, 0, TempReg, t2); uCond(cb, CondAlways); - - if (d32 == 0) - LAST_UINSTR(cb).ret_dispatch = True; + LAST_UINSTR(cb).jmpkind = JmpRet; *isEnd = True; if (dis) { @@ -2992,22 +2974,6 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd ) case 0xE8: /* CALL J4 */ d32 = getUDisp32(eip); eip += 4; d32 += eip; /* eip now holds return-to addr, d32 is call-to addr */ - if (d32 == (Addr)&VG_(shutdown)) { - /* Set vg_dispatch_ctr to 1, vg_interrupt_reason to VG_Y_EXIT, - and get back to the dispatch loop. We ask for a jump to this - CALL insn because vg_dispatch will ultimately transfer control - to the real CPU, and we want this call to be the first insn - it does. */ - uInstr0(cb, CALLM_S, 0); - uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_request_normal_exit)); - uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty); - uInstr0(cb, CALLM_E, 0); - uInstr1(cb, JMP, 0, Literal, 0); - uLiteral(cb, eip-5); - uCond(cb, CondAlways); - *isEnd = True; - if (dis) VG_(printf)("call 0x%x\n",d32); - } else if (d32 == eip && getUChar(eip) >= 0x58 && getUChar(eip) <= 0x5F) { /* Specially treat the position-independent-code idiom @@ -3040,7 +3006,7 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd ) uInstr1(cb, JMP, 0, Literal, 0); uLiteral(cb, d32); uCond(cb, CondAlways); - LAST_UINSTR(cb).call_dispatch = True; + LAST_UINSTR(cb).jmpkind = JmpCall; *isEnd = True; if (dis) VG_(printf)("call 0x%x\n",d32); } @@ -3179,14 +3145,10 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd ) /* It's important that all ArchRegs carry their up-to-date value at this point. So we declare an end-of-block here, which forces any TempRegs caching ArchRegs to be flushed. */ - t1 = newTemp(cb); - uInstr0(cb, CALLM_S, 0); - uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_do_syscall) ); - uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty); - uInstr0(cb, CALLM_E, 0); uInstr1(cb, JMP, 0, Literal, 0); uLiteral(cb, eip); uCond(cb, CondAlways); + LAST_UINSTR(cb).jmpkind = JmpSyscall; *isEnd = True; if (dis) VG_(printf)("int $0x80\n"); break; diff --git a/coregrind/vg_translate.c b/coregrind/vg_translate.c index 1423b8d3da..d2a8571cda 100644 --- a/coregrind/vg_translate.c +++ b/coregrind/vg_translate.c @@ -153,8 +153,8 @@ void VG_(emptyUInstr) ( UInstr* u ) u->val1 = u->val2 = u->val3 = 0; u->tag1 = u->tag2 = u->tag3 = NoValue; u->flags_r = u->flags_w = FlagsEmpty; - u->call_dispatch = False; - u->smc_check = u->signed_widen = u->ret_dispatch = False; + u->jmpkind = JmpBoring; + u->smc_check = u->signed_widen = False; u->lit32 = 0; u->opcode = 0; u->size = 0; @@ -259,8 +259,7 @@ void copyAuxInfoFromTo ( UInstr* src, UInstr* dst ) dst->extra4b = src->extra4b; dst->smc_check = src->smc_check; dst->signed_widen = src->signed_widen; - dst->ret_dispatch = src->ret_dispatch; - dst->call_dispatch = src->call_dispatch; + dst->jmpkind = src->jmpkind; dst->flags_r = src->flags_r; dst->flags_w = src->flags_w; } @@ -917,10 +916,15 @@ void VG_(ppUInstr) ( Int instrNo, UInstr* u ) case JMP: case CC2VAL: case PUSH: case POP: case CLEAR: case CALLM: - if (u->opcode == JMP && u->ret_dispatch) - VG_(printf)("-r"); - if (u->opcode == JMP && u->call_dispatch) - VG_(printf)("-c"); + if (u->opcode == JMP) { + switch (u->jmpkind) { + case JmpCall: VG_(printf)("-c"); break; + case JmpRet: VG_(printf)("-r"); break; + case JmpSyscall: VG_(printf)("-sys"); break; + case JmpClientReq: VG_(printf)("-cli"); break; + default: break; + } + } VG_(printf)("\t"); ppUOperand(u, 1, u->size, False); break; diff --git a/coregrind/vg_transtab.c b/coregrind/vg_transtab.c index 1580f03928..34c35cc4e1 100644 --- a/coregrind/vg_transtab.c +++ b/coregrind/vg_transtab.c @@ -533,9 +533,9 @@ void VG_(smc_check4) ( Addr a ) /* Force an exit before the next basic block, so the translation cache can be flushed appropriately. */ - VG_(dispatch_ctr_SAVED) = VG_(dispatch_ctr); - VG_(dispatch_ctr) = 1; - VG_(interrupt_reason) = VG_Y_SMC; + // VG_(dispatch_ctr_SAVED) = VG_(dispatch_ctr); + //VG_(dispatch_ctr) = 1; + //VG_(interrupt_reason) = VG_Y_SMC; } diff --git a/helgrind/Makefile.am b/helgrind/Makefile.am index 4f0034ab54..9e73341c85 100644 --- a/helgrind/Makefile.am +++ b/helgrind/Makefile.am @@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so valgrinq_so_SOURCES = vg_valgrinq_dummy.c valgrind_so_SOURCES = \ + vg_scheduler.c \ vg_clientmalloc.c \ vg_clientperms.c \ vg_demangle.c \ @@ -70,10 +71,10 @@ noinst_HEADERS = \ vg_unsafe.h -vg_memory.o: vg_memory.c +vg_memory.o: vg_memory.c $(noinst_HEADERS) $(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $< -vg_clientmalloc.o: vg_clientmalloc.c +vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS) $(COMPILE) -fno-omit-frame-pointer -c $< diff --git a/include/valgrind.h b/include/valgrind.h index 5a32ab5656..4271cd8e52 100644 --- a/include/valgrind.h +++ b/include/valgrind.h @@ -48,19 +48,55 @@ /* This defines the magic code sequence which the JITter spots and handles magically. Don't look too closely at this; it will rot - your brain. + your brain. Valgrind dumps the result value in %EDX, so we first + copy the default value there, so that it is returned when not + running on Valgrind. Since %EAX points to a block of mem + containing the args, you can pass as many args as you want like + this. Currently this is set up to deal with 4 args since that's + the max that we appear to need (pthread_create). */ -#define VALGRIND_MAGIC_SEQUENCE(_zzq_res,_zzq_code,_zzq_addr,_zzq_len) \ - asm volatile("movl %1, %%eax\n\t" \ - "movl %2, %%ebx\n\t" \ - "movl %3, %%ecx\n\t" \ - "roll $29, %%eax ; roll $3, %%eax\n\t" \ - "roll $27, %%eax ; roll $5, %%eax\n\t" \ - "movl %%eax, %0\t" \ - : "=r" (_zzq_res) \ - : "r" (_zzq_code), "r" (_zzq_addr), "r" (_zzq_len) \ - : "eax", "ebx", "ecx", "cc", "memory" \ - ); +#define VALGRIND_MAGIC_SEQUENCE( \ + _zzq_rlval, /* result lvalue */ \ + _zzq_default, /* result returned when running on real CPU */ \ + _zzq_request, /* request code */ \ + _zzq_arg1, /* request first param */ \ + _zzq_arg2, /* request second param */ \ + _zzq_arg3, /* request third param */ \ + _zzq_arg4 /* request fourth param */ ) \ + \ + { volatile unsigned int _zzq_args[5]; \ + _zzq_args[0] = (volatile unsigned int)_zzq_request; \ + _zzq_args[1] = (volatile unsigned int)_zzq_arg1; \ + _zzq_args[2] = (volatile unsigned int)_zzq_arg2; \ + _zzq_args[3] = (volatile unsigned int)_zzq_arg3; \ + _zzq_args[4] = (volatile unsigned int)_zzq_arg4; \ + asm volatile("movl %1, %%eax\n\t" \ + "movl %2, %%edx\n\t" \ + "roll $29, %%eax ; roll $3, %%eax\n\t" \ + "rorl $27, %%eax ; rorl $5, %%eax\n\t" \ + "roll $13, %%eax ; roll $19, %%eax\n\t" \ + "movl %%edx, %0\t" \ + : "=r" (_zzq_rlval) \ + : "r" (&_zzq_args[0]), "r" (_zzq_default) \ + : "eax", "edx", "cc", "memory" \ + ); \ + } + + +/* Some request codes. There are many more of these, but most are not + exposed to end-user view. These are the public ones, all of the + form 0x1000 + small_number. +*/ + +#define VG_USERREQ__MAKE_NOACCESS 0x1001 +#define VG_USERREQ__MAKE_WRITABLE 0x1002 +#define VG_USERREQ__MAKE_READABLE 0x1003 +#define VG_USERREQ__DISCARD 0x1004 +#define VG_USERREQ__CHECK_WRITABLE 0x1005 +#define VG_USERREQ__CHECK_READABLE 0x1006 +#define VG_USERREQ__MAKE_NOACCESS_STACK 0x1007 +#define VG_USERREQ__RUNNING_ON_VALGRIND 0x1008 +#define VG_USERREQ__DO_LEAK_CHECK 0x1009 /* unimplemented */ @@ -71,7 +107,9 @@ descriptions Valgrind will use in subsequent error messages. */ #define VALGRIND_MAKE_NOACCESS(_qzz_addr,_qzz_len) \ ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,1001,_qzz_addr,_qzz_len); \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \ + VG_USERREQ__MAKE_NOACCESS, \ + _qzz_addr, _qzz_len, 0, 0); \ _qzz_res; \ }) @@ -79,7 +117,9 @@ for _qzz_len bytes. */ #define VALGRIND_MAKE_WRITABLE(_qzz_addr,_qzz_len) \ ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,1002,_qzz_addr,_qzz_len); \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \ + VG_USERREQ__MAKE_WRITABLE, \ + _qzz_addr,_ qzz_len, 0, 0); \ _qzz_res; \ }) @@ -87,7 +127,9 @@ for _qzz_len bytes. */ #define VALGRIND_MAKE_READABLE(_qzz_addr,_qzz_len) \ ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,1003,_qzz_addr,_qzz_len); \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \ + VG_USERREQ__MAKE_READABLE, \ + _qzz_addr, _qzz_len, 0, 0); \ _qzz_res; \ }) @@ -99,7 +141,9 @@ handle. */ #define VALGRIND_DISCARD(_qzz_blkindex) \ ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,2004,0,_qzz_blkindex); \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \ + VG_USERREQ__DISCARD, \ + 0, _qzz_blkindex, 0, 0); \ _qzz_res; \ }) @@ -111,20 +155,24 @@ If suitable addressibility is not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ -#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \ - ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,2002,_qzz_addr,_qzz_len); \ - _qzz_res; \ +#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \ + ({unsigned int _qzz_res; \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \ + VG_USERREQ__CHECK_WRITABLE, \ + _qzz_addr, _qzz_len, 0, 0); \ + _qzz_res; \ }) /* Check that memory at _qzz_addr is addressible and defined for _qzz_len bytes. If suitable addressibility and definedness are not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ -#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \ - ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,2003,_qzz_addr,_qzz_len); \ - _qzz_res; \ +#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \ + ({unsigned int _qzz_res; \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \ + VG_USERREQ__CHECK_READABLE, \ + _qzz_addr, _qzz_len, 0, 0); \ + _qzz_res; \ }) @@ -133,10 +181,10 @@ are not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ -#define VALGRIND_CHECK_DEFINED(__lvalue) \ - (void) \ - VALGRIND_CHECK_READABLE( \ - (volatile unsigned char *)&(__lvalue), \ +#define VALGRIND_CHECK_DEFINED(__lvalue) \ + (void) \ + VALGRIND_CHECK_READABLE( \ + (volatile unsigned char *)&(__lvalue), \ (unsigned int)(sizeof (__lvalue))) @@ -146,11 +194,38 @@ value. The record associated with this setting will be automatically removed by Valgrind when the containing routine exits. */ -#define VALGRIND_MAKE_NOACCESS_STACK(_qzz_addr,_qzz_len) \ - ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,3001,_qzz_addr,_qzz_len); \ - _qzz_res; \ +#define VALGRIND_MAKE_NOACCESS_STACK(_qzz_addr,_qzz_len) \ + {unsigned int _qzz_res; \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \ + VG_USERREQ__MAKE_NOACCESS_STACK, \ + _qzz_addr, _qzz_len, 0, 0); \ + } + + +/* Returns 1 if running on Valgrind, 0 if running on the real CPU. + Currently implemented but untested. */ +#define RUNNING_ON_VALGRIND \ + ({unsigned int _qzz_res; \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* returned if not */, \ + VG_USERREQ__RUNNING_ON_VALGRIND, \ + 0, 0, 0, 0); \ + _qzz_res; \ }) +/* Mark memory, intended to be on the client's stack, at _qzz_addr as + unaddressible and undefined for _qzz_len bytes. Does not return a + value. The record associated with this setting will be + automatically removed by Valgrind when the containing routine + exits. + + Currently implemented but untested. +*/ +#define VALGRIND_DO_LEAK_CHECK \ + {unsigned int _qzz_res; \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \ + VG_USERREQ__DO_LEAK_CHECK, \ + 0, 0, 0, 0); \ + } + #endif diff --git a/lackey/Makefile.am b/lackey/Makefile.am index 4f0034ab54..9e73341c85 100644 --- a/lackey/Makefile.am +++ b/lackey/Makefile.am @@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so valgrinq_so_SOURCES = vg_valgrinq_dummy.c valgrind_so_SOURCES = \ + vg_scheduler.c \ vg_clientmalloc.c \ vg_clientperms.c \ vg_demangle.c \ @@ -70,10 +71,10 @@ noinst_HEADERS = \ vg_unsafe.h -vg_memory.o: vg_memory.c +vg_memory.o: vg_memory.c $(noinst_HEADERS) $(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $< -vg_clientmalloc.o: vg_clientmalloc.c +vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS) $(COMPILE) -fno-omit-frame-pointer -c $< diff --git a/memcheck/Makefile.am b/memcheck/Makefile.am index 4f0034ab54..9e73341c85 100644 --- a/memcheck/Makefile.am +++ b/memcheck/Makefile.am @@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so valgrinq_so_SOURCES = vg_valgrinq_dummy.c valgrind_so_SOURCES = \ + vg_scheduler.c \ vg_clientmalloc.c \ vg_clientperms.c \ vg_demangle.c \ @@ -70,10 +71,10 @@ noinst_HEADERS = \ vg_unsafe.h -vg_memory.o: vg_memory.c +vg_memory.o: vg_memory.c $(noinst_HEADERS) $(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $< -vg_clientmalloc.o: vg_clientmalloc.c +vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS) $(COMPILE) -fno-omit-frame-pointer -c $< diff --git a/none/Makefile.am b/none/Makefile.am index 4f0034ab54..9e73341c85 100644 --- a/none/Makefile.am +++ b/none/Makefile.am @@ -29,6 +29,7 @@ val_PROGRAMS = valgrind.so valgrinq.so valgrinq_so_SOURCES = vg_valgrinq_dummy.c valgrind_so_SOURCES = \ + vg_scheduler.c \ vg_clientmalloc.c \ vg_clientperms.c \ vg_demangle.c \ @@ -70,10 +71,10 @@ noinst_HEADERS = \ vg_unsafe.h -vg_memory.o: vg_memory.c +vg_memory.o: vg_memory.c $(noinst_HEADERS) $(COMPILE) -O2 @PREFERRED_STACK_BOUNDARY@ -c $< -vg_clientmalloc.o: vg_clientmalloc.c +vg_clientmalloc.o: vg_clientmalloc.c $(noinst_HEADERS) $(COMPILE) -fno-omit-frame-pointer -c $< diff --git a/tests/blocked_syscall.c b/tests/blocked_syscall.c index 0ac9d5cc98..53af388f03 100644 --- a/tests/blocked_syscall.c +++ b/tests/blocked_syscall.c @@ -10,7 +10,7 @@ void the_sighandler ( int signo ) { int nw; // assert(signo == SIGUSR1); - // printf("sighandler running; should unblock now\n"); + printf("sighandler running; should unblock now\n"); nw = write(fds[1], "zzz", 1); // assert(nw == 1); } diff --git a/valgrind.h b/valgrind.h index 5a32ab5656..4271cd8e52 100644 --- a/valgrind.h +++ b/valgrind.h @@ -48,19 +48,55 @@ /* This defines the magic code sequence which the JITter spots and handles magically. Don't look too closely at this; it will rot - your brain. + your brain. Valgrind dumps the result value in %EDX, so we first + copy the default value there, so that it is returned when not + running on Valgrind. Since %EAX points to a block of mem + containing the args, you can pass as many args as you want like + this. Currently this is set up to deal with 4 args since that's + the max that we appear to need (pthread_create). */ -#define VALGRIND_MAGIC_SEQUENCE(_zzq_res,_zzq_code,_zzq_addr,_zzq_len) \ - asm volatile("movl %1, %%eax\n\t" \ - "movl %2, %%ebx\n\t" \ - "movl %3, %%ecx\n\t" \ - "roll $29, %%eax ; roll $3, %%eax\n\t" \ - "roll $27, %%eax ; roll $5, %%eax\n\t" \ - "movl %%eax, %0\t" \ - : "=r" (_zzq_res) \ - : "r" (_zzq_code), "r" (_zzq_addr), "r" (_zzq_len) \ - : "eax", "ebx", "ecx", "cc", "memory" \ - ); +#define VALGRIND_MAGIC_SEQUENCE( \ + _zzq_rlval, /* result lvalue */ \ + _zzq_default, /* result returned when running on real CPU */ \ + _zzq_request, /* request code */ \ + _zzq_arg1, /* request first param */ \ + _zzq_arg2, /* request second param */ \ + _zzq_arg3, /* request third param */ \ + _zzq_arg4 /* request fourth param */ ) \ + \ + { volatile unsigned int _zzq_args[5]; \ + _zzq_args[0] = (volatile unsigned int)_zzq_request; \ + _zzq_args[1] = (volatile unsigned int)_zzq_arg1; \ + _zzq_args[2] = (volatile unsigned int)_zzq_arg2; \ + _zzq_args[3] = (volatile unsigned int)_zzq_arg3; \ + _zzq_args[4] = (volatile unsigned int)_zzq_arg4; \ + asm volatile("movl %1, %%eax\n\t" \ + "movl %2, %%edx\n\t" \ + "roll $29, %%eax ; roll $3, %%eax\n\t" \ + "rorl $27, %%eax ; rorl $5, %%eax\n\t" \ + "roll $13, %%eax ; roll $19, %%eax\n\t" \ + "movl %%edx, %0\t" \ + : "=r" (_zzq_rlval) \ + : "r" (&_zzq_args[0]), "r" (_zzq_default) \ + : "eax", "edx", "cc", "memory" \ + ); \ + } + + +/* Some request codes. There are many more of these, but most are not + exposed to end-user view. These are the public ones, all of the + form 0x1000 + small_number. +*/ + +#define VG_USERREQ__MAKE_NOACCESS 0x1001 +#define VG_USERREQ__MAKE_WRITABLE 0x1002 +#define VG_USERREQ__MAKE_READABLE 0x1003 +#define VG_USERREQ__DISCARD 0x1004 +#define VG_USERREQ__CHECK_WRITABLE 0x1005 +#define VG_USERREQ__CHECK_READABLE 0x1006 +#define VG_USERREQ__MAKE_NOACCESS_STACK 0x1007 +#define VG_USERREQ__RUNNING_ON_VALGRIND 0x1008 +#define VG_USERREQ__DO_LEAK_CHECK 0x1009 /* unimplemented */ @@ -71,7 +107,9 @@ descriptions Valgrind will use in subsequent error messages. */ #define VALGRIND_MAKE_NOACCESS(_qzz_addr,_qzz_len) \ ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,1001,_qzz_addr,_qzz_len); \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \ + VG_USERREQ__MAKE_NOACCESS, \ + _qzz_addr, _qzz_len, 0, 0); \ _qzz_res; \ }) @@ -79,7 +117,9 @@ for _qzz_len bytes. */ #define VALGRIND_MAKE_WRITABLE(_qzz_addr,_qzz_len) \ ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,1002,_qzz_addr,_qzz_len); \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \ + VG_USERREQ__MAKE_WRITABLE, \ + _qzz_addr,_ qzz_len, 0, 0); \ _qzz_res; \ }) @@ -87,7 +127,9 @@ for _qzz_len bytes. */ #define VALGRIND_MAKE_READABLE(_qzz_addr,_qzz_len) \ ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,1003,_qzz_addr,_qzz_len); \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \ + VG_USERREQ__MAKE_READABLE, \ + _qzz_addr, _qzz_len, 0, 0); \ _qzz_res; \ }) @@ -99,7 +141,9 @@ handle. */ #define VALGRIND_DISCARD(_qzz_blkindex) \ ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,2004,0,_qzz_blkindex); \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* default return */, \ + VG_USERREQ__DISCARD, \ + 0, _qzz_blkindex, 0, 0); \ _qzz_res; \ }) @@ -111,20 +155,24 @@ If suitable addressibility is not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ -#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \ - ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,2002,_qzz_addr,_qzz_len); \ - _qzz_res; \ +#define VALGRIND_CHECK_WRITABLE(_qzz_addr,_qzz_len) \ + ({unsigned int _qzz_res; \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \ + VG_USERREQ__CHECK_WRITABLE, \ + _qzz_addr, _qzz_len, 0, 0); \ + _qzz_res; \ }) /* Check that memory at _qzz_addr is addressible and defined for _qzz_len bytes. If suitable addressibility and definedness are not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ -#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \ - ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,2003,_qzz_addr,_qzz_len); \ - _qzz_res; \ +#define VALGRIND_CHECK_READABLE(_qzz_addr,_qzz_len) \ + ({unsigned int _qzz_res; \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \ + VG_USERREQ__CHECK_READABLE, \ + _qzz_addr, _qzz_len, 0, 0); \ + _qzz_res; \ }) @@ -133,10 +181,10 @@ are not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ -#define VALGRIND_CHECK_DEFINED(__lvalue) \ - (void) \ - VALGRIND_CHECK_READABLE( \ - (volatile unsigned char *)&(__lvalue), \ +#define VALGRIND_CHECK_DEFINED(__lvalue) \ + (void) \ + VALGRIND_CHECK_READABLE( \ + (volatile unsigned char *)&(__lvalue), \ (unsigned int)(sizeof (__lvalue))) @@ -146,11 +194,38 @@ value. The record associated with this setting will be automatically removed by Valgrind when the containing routine exits. */ -#define VALGRIND_MAKE_NOACCESS_STACK(_qzz_addr,_qzz_len) \ - ({unsigned int _qzz_res; \ - VALGRIND_MAGIC_SEQUENCE(_qzz_res,3001,_qzz_addr,_qzz_len); \ - _qzz_res; \ +#define VALGRIND_MAKE_NOACCESS_STACK(_qzz_addr,_qzz_len) \ + {unsigned int _qzz_res; \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \ + VG_USERREQ__MAKE_NOACCESS_STACK, \ + _qzz_addr, _qzz_len, 0, 0); \ + } + + +/* Returns 1 if running on Valgrind, 0 if running on the real CPU. + Currently implemented but untested. */ +#define RUNNING_ON_VALGRIND \ + ({unsigned int _qzz_res; \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0 /* returned if not */, \ + VG_USERREQ__RUNNING_ON_VALGRIND, \ + 0, 0, 0, 0); \ + _qzz_res; \ }) +/* Mark memory, intended to be on the client's stack, at _qzz_addr as + unaddressible and undefined for _qzz_len bytes. Does not return a + value. The record associated with this setting will be + automatically removed by Valgrind when the containing routine + exits. + + Currently implemented but untested. +*/ +#define VALGRIND_DO_LEAK_CHECK \ + {unsigned int _qzz_res; \ + VALGRIND_MAGIC_SEQUENCE(_qzz_res, 0, \ + VG_USERREQ__DO_LEAK_CHECK, \ + 0, 0, 0, 0); \ + } + #endif diff --git a/valgrind.in b/valgrind.in index 0b566c94cf..bc4594004c 100755 --- a/valgrind.in +++ b/valgrind.in @@ -164,8 +164,10 @@ fi VG_ARGS="$VALGRIND_OPTS $vgsupp $vgopts" export VG_ARGS -LD_PRELOAD=$VALGRIND/valgrind.so:$LD_PRELOAD +LD_LIBRARY_PATH=$VALGRIND:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH +LD_PRELOAD=valgrind.so:$LD_PRELOAD export LD_PRELOAD +#LD_DEBUG=files +#export LD_DEBUG exec $argopts - - diff --git a/vg_clientmalloc.c b/vg_clientmalloc.c index d2be752d09..d59a029a19 100644 --- a/vg_clientmalloc.c +++ b/vg_clientmalloc.c @@ -250,10 +250,9 @@ static ShadowChunk* client_malloc_shadow ( UInt align, UInt size, /* Allocate memory, noticing whether or not we are doing the full instrumentation thing. */ -void* VG_(client_malloc) ( UInt size, UInt raw_alloc_kind ) +void* VG_(client_malloc) ( UInt size, VgAllocKind kind ) { ShadowChunk* sc; - VgAllocKind kind; VGP_PUSHCC(VgpCliMalloc); client_malloc_init(); @@ -263,21 +262,15 @@ void* VG_(client_malloc) ( UInt size, UInt raw_alloc_kind ) count_freelist(), vg_freed_list_volume, size, raw_alloc_kind ); # endif + + vg_cmalloc_n_mallocs ++; + vg_cmalloc_bs_mallocd += size; + if (!VG_(clo_instrument)) { VGP_POPCC; return VG_(malloc) ( VG_AR_CLIENT, size ); } - switch (raw_alloc_kind) { - case 0x4002: kind = Vg_AllocNewVec; break; - case 0x4001: kind = Vg_AllocNew; break; - case 0x4000: /* malloc */ - case 6666: /* calloc */ - kind = Vg_AllocMalloc; break; - default: /* should not happen */ - /* therefore we make sure it doesn't -- JRS */ - VG_(panic)("VG_(client_malloc): raw_alloc_kind"); - break; /*NOTREACHED*/ - } + sc = client_malloc_shadow ( 0, size, kind ); VGP_POPCC; return (void*)(sc->data); @@ -295,6 +288,10 @@ void* VG_(client_memalign) ( UInt align, UInt size ) count_freelist(), vg_freed_list_volume, align, size ); # endif + + vg_cmalloc_n_mallocs ++; + vg_cmalloc_bs_mallocd += size; + if (!VG_(clo_instrument)) { VGP_POPCC; return VG_(malloc_aligned) ( VG_AR_CLIENT, align, size ); @@ -305,11 +302,10 @@ void* VG_(client_memalign) ( UInt align, UInt size ) } -void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind ) +void VG_(client_free) ( void* ptrV, VgAllocKind kind ) { ShadowChunk* sc; UInt ml_no; - VgAllocKind kind; VGP_PUSHCC(VgpCliMalloc); client_malloc_init(); @@ -319,6 +315,9 @@ void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind ) count_freelist(), vg_freed_list_volume, ptrV, raw_alloc_kind ); # endif + + vg_cmalloc_n_frees ++; + if (!VG_(clo_instrument)) { VGP_POPCC; VG_(free) ( VG_AR_CLIENT, ptrV ); @@ -340,16 +339,6 @@ void VG_(client_free) ( void* ptrV, UInt raw_alloc_kind ) return; } - switch (raw_alloc_kind) { - case 0x5002: kind = Vg_AllocNewVec; break; - case 0x5001: kind = Vg_AllocNew; break; - case 0x5000: - default: - kind = Vg_AllocMalloc; - /* should only happen if bug in client code */ - break; - } - /* check if its a matching free() / delete / delete [] */ if (kind != sc->allockind) VG_(record_freemismatch_error) ( (Addr) ptrV ); @@ -386,6 +375,9 @@ void* VG_(client_calloc) ( UInt nmemb, UInt size1 ) nmemb, size1 ); # endif + vg_cmalloc_n_mallocs ++; + vg_cmalloc_bs_mallocd += nmemb * size1; + if (!VG_(clo_instrument)) { VGP_POPCC; return VG_(calloc) ( VG_AR_CLIENT, nmemb, size1 ); @@ -430,6 +422,10 @@ void* VG_(client_realloc) ( void* ptrV, UInt size_new ) ptrV, size_new ); # endif + vg_cmalloc_n_frees ++; + vg_cmalloc_n_mallocs ++; + vg_cmalloc_bs_mallocd += size_new; + if (!VG_(clo_instrument)) { vg_assert(ptrV != NULL && size_new != 0); VGP_POPCC; @@ -573,364 +569,6 @@ void VG_(describe_addr) ( Addr a, AddrInfo* ai ) return; } -/*------------------------------------------------------------*/ -/*--- Replace the C library versions with our own. Hairy. ---*/ -/*------------------------------------------------------------*/ - -/* Below are new versions of malloc, __builtin_new, free, - __builtin_delete, calloc and realloc. - - malloc, __builtin_new, free, __builtin_delete, calloc and realloc - can be entered either on the real CPU or the simulated one. If on - the real one, this is because the dynamic linker is running the - static initialisers for C++, before starting up Valgrind itself. - In this case it is safe to route calls through to - VG_(malloc)/vg_free, since that is self-initialising. - - Once Valgrind is initialised, vg_running_on_simd_CPU becomes True. - The call needs to be transferred from the simulated CPU back to the - real one and routed to the vg_client_* functions. To do that, the - args are passed to vg_trap_here, which the simulator detects. The - bogus epilogue fn call is to guarantee that gcc doesn't tailcall - vg_trap_here, since that would cause the simulator's detection to - fail -- it only checks the targets of call transfers, not jumps. - And of course we have to be sure gcc won't inline either the - vg_trap_here or vg_bogus_epilogue. Ha ha ha. What a mess. -*/ - -/* Place afterwards to guarantee it won't get inlined ... */ -static UInt vg_trap_here_WRAPPER ( UInt arg1, UInt arg2, UInt what_to_do ); -static void vg_bogus_epilogue ( void ); - -/* ALL calls to malloc wind up here. */ -void* malloc ( UInt n ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("malloc[simd=%d](%d)", - (UInt)VG_(running_on_simd_CPU), n ); - - if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; } - - vg_cmalloc_n_mallocs ++; - vg_cmalloc_bs_mallocd += n; - - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4000 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(malloc)(VG_AR_CLIENT, n); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } -} - -void* __builtin_new ( UInt n ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("__builtin_new[simd=%d](%d)", - (UInt)VG_(running_on_simd_CPU), n ); - - if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; } - - vg_cmalloc_n_mallocs++; - vg_cmalloc_bs_mallocd += n; - - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4001 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(malloc)(VG_AR_CLIENT, n); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return v; - } -} - -void* __builtin_vec_new ( Int n ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("__builtin_vec_new[simd=%d](%d)", - (UInt)VG_(running_on_simd_CPU), n ); - - if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; } - - vg_cmalloc_n_mallocs++; - vg_cmalloc_bs_mallocd += n; - - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( 0, n, 0x4002 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(malloc)(VG_AR_CLIENT, n); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return v; - } -} - -void free ( void* p ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("free[simd=%d](%p)\n", - (UInt)VG_(running_on_simd_CPU), p ); - vg_cmalloc_n_frees ++; - - if (p == NULL) - return; - if (VG_(running_on_simd_CPU)) { - (void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5000 ); - vg_bogus_epilogue(); - } else { - VG_(free)(VG_AR_CLIENT, p); - } -} - -void __builtin_delete ( void* p ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("__builtin_delete[simd=%d](%p)\n", - (UInt)VG_(running_on_simd_CPU), p ); - vg_cmalloc_n_frees ++; - - if (p == NULL) - return; - if (VG_(running_on_simd_CPU)) { - (void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5001 ); - vg_bogus_epilogue(); - } else { - VG_(free)(VG_AR_CLIENT, p); - } -} - -void __builtin_vec_delete ( void* p ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("__builtin_vec_delete[simd=%d](%p)\n", - (UInt)VG_(running_on_simd_CPU), p ); - vg_cmalloc_n_frees ++; - - if (p == NULL) - return; - if (VG_(running_on_simd_CPU)) { - (void)vg_trap_here_WRAPPER ( 0, (UInt)p, 0x5002 ); - vg_bogus_epilogue(); - } else { - VG_(free)(VG_AR_CLIENT, p); - } -} - -void* calloc ( UInt nmemb, UInt size ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("calloc[simd=%d](%d,%d)", - (UInt)VG_(running_on_simd_CPU), nmemb, size ); - vg_cmalloc_n_mallocs ++; - vg_cmalloc_bs_mallocd += size * nmemb; - - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( nmemb, size, 6666 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(calloc)(VG_AR_CLIENT, nmemb, size); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return v; - } -} - -void* realloc ( void* ptrV, UInt new_size ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("realloc[simd=%d](%p,%d)", - (UInt)VG_(running_on_simd_CPU), ptrV, new_size ); - - if (VG_(clo_sloppy_malloc)) - { while ((new_size % 4) > 0) new_size++; } - - vg_cmalloc_n_frees ++; - vg_cmalloc_n_mallocs ++; - vg_cmalloc_bs_mallocd += new_size; - - if (ptrV == NULL) - return malloc(new_size); - if (new_size == 0) { - free(ptrV); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = 0\n" ); - return NULL; - } - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( (UInt)ptrV, new_size, 7777 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(realloc)(VG_AR_CLIENT, ptrV, new_size); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return v; - } -} - -void* memalign ( Int alignment, Int n ) -{ - if (VG_(clo_trace_malloc)) - VG_(printf)("memalign[simd=%d](al %d, size %d)", - (UInt)VG_(running_on_simd_CPU), alignment, n ); - - if (VG_(clo_sloppy_malloc)) { while ((n % 4) > 0) n++; } - - vg_cmalloc_n_mallocs ++; - vg_cmalloc_bs_mallocd += n; - - if (VG_(running_on_simd_CPU)) { - UInt v = vg_trap_here_WRAPPER ( alignment, n, 8888 ); - vg_bogus_epilogue(); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } else { - void* v = VG_(malloc_aligned)(VG_AR_CLIENT, alignment, n); - if (VG_(clo_trace_malloc)) - VG_(printf)(" = %p\n", v ); - return (void*)v; - } -} - -void* valloc ( Int size ) -{ - return memalign(VKI_BYTES_PER_PAGE, size); -} - - -/* Various compatibility wrapper functions, for glibc and libstdc++. */ -void cfree ( void* p ) -{ - free ( p ); -} - -void* mallinfo ( void ) -{ - VG_(message)(Vg_UserMsg, - "Warning: incorrectly-handled call to mallinfo()"); - return NULL; -} - - - -int mallopt ( int cmd, int value ) -{ - /* In glibc-2.2.4, 1 denoted a successful return value for mallopt */ - return 1; -} - - -/* Bomb out if we get any of these. */ -void pvalloc ( void ) -{ VG_(panic)("call to pvalloc\n"); } - -void malloc_stats ( void ) -{ VG_(panic)("call to malloc_stats\n"); } -void malloc_usable_size ( void ) -{ VG_(panic)("call to malloc_usable_size\n"); } -void malloc_trim ( void ) -{ VG_(panic)("call to malloc_trim\n"); } -void malloc_get_state ( void ) -{ VG_(panic)("call to malloc_get_state\n"); } -void malloc_set_state ( void ) -{ VG_(panic)("call to malloc_set_state\n"); } - - -int __posix_memalign ( void **memptr, UInt alignment, UInt size ) -{ - void *mem; - - /* Test whether the SIZE argument is valid. It must be a power of - two multiple of sizeof (void *). */ - if (size % sizeof (void *) != 0 || (size & (size - 1)) != 0) - return 22 /*EINVAL*/; - - mem = memalign (alignment, size); - - if (mem != NULL) { - *memptr = mem; - return 0; - } - - return 12 /*ENOMEM*/; -} - - -/*------------------------------------------------------------*/ -/*--- Magic supporting hacks. ---*/ -/*------------------------------------------------------------*/ - -extern UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do ); - -static -UInt vg_trap_here_WRAPPER ( UInt arg1, UInt arg2, UInt what_to_do ) -{ - /* The point of this idiocy is to make a plain, ordinary call to - vg_trap_here which vg_dispatch_when_CALL can spot. Left to - itself, with -fpic, gcc generates "call vg_trap_here@PLT" which - doesn't get spotted, for whatever reason. I guess I could check - _all_ control flow transfers, but that would be an undesirable - performance overhead. - - If you compile without -fpic, gcc generates the obvious call - insn, so the wrappers below will work if they just call - vg_trap_here. But I don't want to rule out building with -fpic, - hence this hack. Sigh. - */ - UInt v; - -# define WHERE_TO VG_(trap_here) -# define STRINGIFY(xx) __STRING(xx) - - asm("# call to vg_trap_here\n" - "\t pushl %3\n" - "\t pushl %2\n" - "\t pushl %1\n" - "\t call " STRINGIFY(WHERE_TO) "\n" - "\t addl $12, %%esp\n" - "\t movl %%eax, %0\n" - : "=r" (v) - : "r" (arg1), "r" (arg2), "r" (what_to_do) - : "eax", "esp", "cc", "memory"); - return v; - -# undef WHERE_TO -# undef STRINGIFY -} - -/* Last, but not least ... */ -void vg_bogus_epilogue ( void ) -{ - /* Runs on simulated CPU only. */ -} - -UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do ) -{ - /* Calls to this fn are detected in vg_dispatch.S and are handled - specially. So this fn should never be entered. */ - VG_(panic)("vg_trap_here called!"); - return 0; /*NOTREACHED*/ -} - /*--------------------------------------------------------------------*/ /*--- end vg_clientmalloc.c ---*/ diff --git a/vg_clientperms.c b/vg_clientperms.c index 5f19e4b9c2..f6351d3714 100644 --- a/vg_clientperms.c +++ b/vg_clientperms.c @@ -33,6 +33,8 @@ #include "vg_include.h" #include "vg_constants.h" +#include "valgrind.h" /* for VG_USERREQ__* */ + /*------------------------------------------------------------*/ /*--- General client block management. ---*/ @@ -287,73 +289,81 @@ void VG_(delete_client_stack_blocks_following_ESP_change) ( void ) } -UInt VG_(handle_client_request) ( UInt code, Addr aa, UInt nn ) +UInt VG_(handle_client_request) ( UInt* arg_block ) { - Int i; - Bool ok; - Addr bad_addr; + Int i; + Bool ok; + Addr bad_addr; + UInt* arg = arg_block; if (VG_(clo_verbosity) > 2) VG_(printf)("client request: code %d, addr %p, len %d\n", - code, aa, nn ); + arg[0], arg[1], arg[2] ); vg_assert(VG_(clo_client_perms)); vg_assert(VG_(clo_instrument)); - switch (code) { - case 1001: /* make no access */ + switch (arg[0]) { + case VG_USERREQ__MAKE_NOACCESS: /* make no access */ i = vg_alloc_client_block(); /* VG_(printf)("allocated %d %p\n", i, vg_cgbs); */ vg_cgbs[i].kind = CG_NoAccess; - vg_cgbs[i].start = aa; - vg_cgbs[i].size = nn; + vg_cgbs[i].start = arg[1]; + vg_cgbs[i].size = arg[2]; vg_cgbs[i].where = VG_(get_ExeContext) ( False ); - VGM_(make_noaccess) ( aa, nn ); + VGM_(make_noaccess) ( arg[1], arg[2] ); return i; - case 1002: /* make writable */ + case VG_USERREQ__MAKE_WRITABLE: /* make writable */ i = vg_alloc_client_block(); vg_cgbs[i].kind = CG_Writable; - vg_cgbs[i].start = aa; - vg_cgbs[i].size = nn; + vg_cgbs[i].start = arg[1]; + vg_cgbs[i].size = arg[2]; vg_cgbs[i].where = VG_(get_ExeContext) ( False ); - VGM_(make_writable) ( aa, nn ); + VGM_(make_writable) ( arg[1], arg[2] ); return i; - case 1003: /* make readable */ + case VG_USERREQ__MAKE_READABLE: /* make readable */ i = vg_alloc_client_block(); vg_cgbs[i].kind = CG_Readable; - vg_cgbs[i].start = aa; - vg_cgbs[i].size = nn; + vg_cgbs[i].start = arg[1]; + vg_cgbs[i].size = arg[2]; vg_cgbs[i].where = VG_(get_ExeContext) ( False ); - VGM_(make_readable) ( aa, nn ); + VGM_(make_readable) ( arg[1], arg[2] ); return i; - case 2002: /* check writable */ - ok = VGM_(check_writable) ( aa, nn, &bad_addr ); + case VG_USERREQ__CHECK_WRITABLE: /* check writable */ + ok = VGM_(check_writable) ( arg[1], arg[2], &bad_addr ); if (!ok) VG_(record_user_err) ( bad_addr, True ); return ok ? (UInt)NULL : bad_addr; - case 2003: /* check readable */ - ok = VGM_(check_readable) ( aa, nn, &bad_addr ); + case VG_USERREQ__CHECK_READABLE: /* check readable */ + ok = VGM_(check_readable) ( arg[1], arg[2], &bad_addr ); if (!ok) VG_(record_user_err) ( bad_addr, False ); return ok ? (UInt)NULL : bad_addr; - case 2004: /* discard */ + case VG_USERREQ__DISCARD: /* discard */ if (vg_cgbs == NULL - || nn >= vg_cgb_used || vg_cgbs[nn].kind == CG_NotInUse) + || arg[2] >= vg_cgb_used || vg_cgbs[arg[2]].kind == CG_NotInUse) return 1; - vg_assert(nn >= 0 && nn < vg_cgb_used); - vg_cgbs[nn].kind = CG_NotInUse; + vg_assert(arg[2] >= 0 && arg[2] < vg_cgb_used); + vg_cgbs[arg[2]].kind = CG_NotInUse; vg_cgb_discards++; return 0; - case 3001: /* make noaccess stack block */ - vg_add_client_stack_block ( aa, nn ); + case VG_USERREQ__MAKE_NOACCESS_STACK: /* make noaccess stack block */ + vg_add_client_stack_block ( arg[1], arg[2] ); return 0; + case VG_USERREQ__RUNNING_ON_VALGRIND: + return 1; + + case VG_USERREQ__DO_LEAK_CHECK: + VG_(detect_memory_leaks)(); + return 0; /* return value is meaningless */ + default: VG_(message)(Vg_UserMsg, - "Warning: unknown client request code %d", code); + "Warning: unknown client request code %d", arg[0]); return 1; } } diff --git a/vg_constants.h b/vg_constants.h index ef48ef009d..b1b1b32e7d 100644 --- a/vg_constants.h +++ b/vg_constants.h @@ -50,23 +50,18 @@ #define VGP_(str) VGAPPEND(vgProf_,str) #define VGOFF_(str) VGAPPEND(vgOff_,str) -/* Reasons why the inner simulation loop might stop (i.e. why has - vg_dispatch_ctr reached zero? */ -#define VG_Y_SIGCHECK 0 /* signal check due */ -#define VG_Y_SMC 1 /* write to code detected */ -#define VG_Y_EXIT 2 /* natural or debug end to simulation */ -#define VG_Y_TRANSLATE 3 /* translation of vg_m_eip needed */ - -/* Check for pending signals every this-many jumps. Since this - happens in the region of once per millisecond, we also take the - opportunity do do a bit of quick sanity checking at the same time. - Look at the call sites of VG_(deliver_signals). */ -#define VG_SIGCHECK_INTERVAL 1000 - -/* A ,agic values that %ebp might be set to when returning to the + +/* Magic values that %ebp might be set to when returning to the dispatcher. The only other legitimate value is to point to the - start of VG_(baseBlock). */ -#define VG_EBP_DISPATCH_CHECKED 17 + start of VG_(baseBlock). These also are return values from + VG_(run_innerloop) to the scheduler. */ +#define VG_TRC_EBP_JMP_SPECIAL 17 +#define VG_TRC_EBP_JMP_SYSCALL 19 +#define VG_TRC_EBP_JMP_CLIENTREQ 23 + +#define VG_TRC_INNER_COUNTERZERO 29 /* ebp can't have this; sched return only */ +#define VG_TRC_INNER_FASTMISS 31 /* ditto. Means fast-cache miss. */ +#define VG_TRC_UNRESUMABLE_SIGNAL 37 /* ditto; got sigsegv/sigbus */ /* Debugging hack for assembly code ... sigh. */ #if 0 @@ -75,12 +70,13 @@ #define OYNK(nnn) #endif -#if 1 +#if 0 #define OYNNK(nnn) pushal; pushl $nnn; call VG_(oynk) ; addl $4,%esp; popal #else #define OYNNK(nnn) #endif + /* Constants for the fast translation lookup cache. */ #define VG_TT_FAST_BITS 15 #define VG_TT_FAST_SIZE (1 << VG_TT_FAST_BITS) @@ -88,6 +84,7 @@ /* Constants for the fast original-code-write check cache. */ + /* Usually you want this to be zero. */ #define VG_SMC_FASTCHECK_IN_C 0 diff --git a/vg_dispatch.S b/vg_dispatch.S index 52231946e2..0f4783ba0e 100644 --- a/vg_dispatch.S +++ b/vg_dispatch.S @@ -61,8 +61,15 @@ .globl VG_(run_innerloop) VG_(run_innerloop): #OYNK(1000) + # ----- entry point to VG_(run_innerloop) ----- - pushal + pushl %ebx + pushl %ecx + pushl %edx + pushl %esi + pushl %edi + pushl %ebp + # Set up the baseBlock pointer movl $VG_(baseBlock), %ebp @@ -70,19 +77,19 @@ VG_(run_innerloop): movl VGOFF_(m_eip), %esi movl (%ebp, %esi, 4), %eax - # fall thru to vg_dispatch + # Start off dispatching paranoically, since we no longer have + # any indication whether or not this might be a special call/ret + # transfer. + jmp dispatch_callret_maybe -.globl VG_(dispatch) -VG_(dispatch): - # %eax holds destination (original) address - # To signal any kind of interruption, set vg_dispatch_ctr - # to 1, and vg_interrupt_reason to the appropriate value - # before jumping here. - + +dispatch_main: + # Jump here to do a new dispatch. + # %eax holds destination (original) address. # %ebp indicates further details of the control transfer # requested to the address in %eax. The idea is that we # want to check all jump targets to see if they are either - # VG_(signalreturn_bogusRA) or VG_(trap_here), both of which + # VG_(signalreturn_bogusRA) or VG_(shutdown), both of which # require special treatment. However, testing all branch # targets is expensive, and anyway in most cases JITter knows # that a jump cannot be to either of these two. We therefore @@ -92,37 +99,33 @@ VG_(dispatch): # this is a jump for which the JITter knows no check need be # made. # - # If it is ebp == VG_EBP_DISPATCH_CHECKED, we had better make + # If ebp == VG_EBP_JMP_CALLRET, we had better make # the check. # + # If ebp == VG_EBP_JMP_SYSCALL, do a system call before + # continuing at eax. + # + # If ebp == VG_EBP_JMP_CLIENTREQ, do a client request before + # continuing at eax. + # # If %ebp has any other value, we panic. # # What the JITter assumes is that VG_(signalreturn_bogusRA) can # only be arrived at from an x86 ret insn, and dually that - # VG_(trap_here) can only be arrived at from an x86 call insn. + # VG_(shutdown) can only be arrived at from an x86 call insn. # The net effect is that all call and return targets are checked # but straightforward jumps are not. - # - # Thinks ... is this safe if the client happens to tailcall - # VG_(trap_here) ? I dont think that can happen -- if it did - # it would be a problem. - # + cmpl $VG_(baseBlock), %ebp - jnz dispatch_checked_maybe + jnz dispatch_exceptional -dispatch_unchecked: +dispatch_boring: # save the jump address at VG_(baseBlock)[VGOFF_(m_eip)], - # so that if this block takes a fault, we later know where we were. movl VGOFF_(m_eip), %esi movl %eax, (%ebp, %esi, 4) - # do we require attention? - # this check has to be after the call/ret transfer checks, because - # we have to ensure that any control transfer following a syscall - # return is an ordinary transfer. By the time we get here, we have - # established that the next transfer, which might get delayed till - # after a syscall return, is an ordinary one. - # All a bit subtle ... + # do a timeslice check. + # are we out of timeslice? If yes, defer to scheduler. #OYNK(1001) decl VG_(dispatch_ctr) jz counter_is_zero @@ -136,243 +139,102 @@ dispatch_unchecked: # ebx points at a tt entry # now compare target with the tte.orig_addr field (+0) cmpl %eax, (%ebx) - jnz full_search + jnz fast_lookup_failed + # Found a match. Set the tte.mru_epoch field (+8) # and call the tte.trans_addr field (+4) movl VG_(current_epoch), %ecx movl %ecx, 8(%ebx) call *4(%ebx) - jmp VG_(dispatch) + jmp dispatch_main -full_search: - #no luck? try the full table search - pushl %eax - call VG_(search_transtab) - addl $4, %esp - - # %eax has trans addr or zero - cmpl $0, %eax - jz need_translation - # full table search also zeroes the tte.last_use field, - # so we dont have to do so here. - call *%eax - jmp VG_(dispatch) +fast_lookup_failed: + # %EIP is up to date here since dispatch_boring dominates + movl $VG_TRC_INNER_FASTMISS, %eax + jmp run_innerloop_exit -need_translation: - OYNK(1003) - movl $VG_Y_TRANSLATE, VG_(interrupt_reason) counter_is_zero: - OYNK(1004) - popal - # ----- (the only) exit point from VG_(run_innerloop) ----- - # ----- unless of course vg_oursignalhandler longjmp()s - # ----- back through it, due to an unmanagable signal - ret + # %EIP is up to date here since dispatch_boring dominates + movl $VG_TRC_INNER_COUNTERZERO, %eax + jmp run_innerloop_exit + +run_innerloop_exit: + popl %ebp + popl %edi + popl %esi + popl %edx + popl %ecx + popl %ebx + ret -/* The normal way to get back to the translation loop is to put - the address of the next (original) address and return. - However, simulation of a RET insn requires a check as to whether - the next address is vg_signalreturn_bogusRA. If so, a signal - handler is returning, so we need to invoke our own mechanism to - deal with that, by calling vg_signal_returns(). This restores - the simulated machine state from the VgSigContext structure on - the stack, including the (simulated, of course) %eip saved when - the signal was delivered. We then arrange to jump to the - restored %eip. -*/ -dispatch_checked_maybe: - # Possibly a checked dispatch. Sanity check ... - cmpl $VG_EBP_DISPATCH_CHECKED, %ebp - jz dispatch_checked + +/* Other ways of getting out of the inner loop. Placed out-of-line to + make it look cleaner. +*/ +dispatch_exceptional: + # this is jumped to only, not fallen-through from above + cmpl $VG_TRC_EBP_JMP_SPECIAL, %ebp + jz dispatch_callret_maybe + cmpl $VG_TRC_EBP_JMP_SYSCALL, %ebp + jz dispatch_syscall + cmpl $VG_TRC_EBP_JMP_CLIENTREQ, %ebp + jz dispatch_clientreq + # ebp has an invalid value ... crap out. pushl $panic_msg_ebp call VG_(panic) # (never returns) -dispatch_checked: - OYNK(2000) - # first off, restore %ebp -- since it is currently wrong +dispatch_syscall: + # save %eax in %EIP and defer to sched + movl $VG_(baseBlock), %ebp + movl VGOFF_(m_eip), %esi + movl %eax, (%ebp, %esi, 4) + movl $VG_TRC_EBP_JMP_SYSCALL, %eax + jmp run_innerloop_exit + +dispatch_clientreq: + # save %eax in %EIP and defer to sched + movl $VG_(baseBlock), %ebp + movl VGOFF_(m_eip), %esi + movl %eax, (%ebp, %esi, 4) + movl $VG_TRC_EBP_JMP_CLIENTREQ, %eax + jmp run_innerloop_exit + +dispatch_callret_maybe: + # save %eax in %EIP movl $VG_(baseBlock), %ebp + movl VGOFF_(m_eip), %esi + movl %eax, (%ebp, %esi, 4) # see if we need to mess with stack blocks - pushl %ebp pushl %eax call VG_(delete_client_stack_blocks_following_ESP_change) popl %eax - popl %ebp + movl $VG_(baseBlock), %ebp - # is this a signal return? + # is this a call/return which we need to mess with cmpl $VG_(signalreturn_bogusRA), %eax - jz dispatch_to_signalreturn_bogusRA - # should we intercept this call? - cmpl $VG_(trap_here), %eax - jz dispatch_to_trap_here - # ok, its not interesting. Handle the normal way. - jmp dispatch_unchecked - -dispatch_to_signalreturn_bogusRA: - OYNK(2001) - pushal - call VG_(signal_returns) - popal - # %EIP will now point to the insn which should have followed - # the signal delivery. Jump to it. Since we no longer have any - # hint from the JITter about whether or not it is checkable, - # go via the conservative route. - movl VGOFF_(m_eip), %esi - movl (%ebp, %esi, 4), %eax - jmp dispatch_checked - - -/* Similarly, check CALL targets to see if it is the ultra-magical - vg_trap_here(), and, if so, act accordingly. See vg_clientmalloc.c. - Be careful not to get the real and simulated CPUs, - stacks and regs mixed up ... -*/ -dispatch_to_trap_here: - OYNK(111) - /* Considering the params to vg_trap_here(), we should have: - 12(%ESP) is what_to_do - 8(%ESP) is arg2 - 4(%ESP) is arg1 - 0(%ESP) is return address - */ - movl VGOFF_(m_esp), %esi - movl (%ebp, %esi, 4), %ebx - # %ebx now holds simulated %ESP - cmpl $0x4000, 12(%ebx) - jz handle_malloc - cmpl $0x4001, 12(%ebx) - jz handle_malloc - cmpl $0x4002, 12(%ebx) - jz handle_malloc - cmpl $0x5000, 12(%ebx) - jz handle_free - cmpl $0x5001, 12(%ebx) - jz handle_free - cmpl $0x5002, 12(%ebx) - jz handle_free - cmpl $6666, 12(%ebx) - jz handle_calloc - cmpl $7777, 12(%ebx) - jz handle_realloc - cmpl $8888, 12(%ebx) - jz handle_memalign - push $panic_msg_trap - call VG_(panic) - # vg_panic never returns - -handle_malloc: - # %ESP is in %ebx - pushl 12(%ebx) - pushl 8(%ebx) - call VG_(client_malloc) - addl $8, %esp - # returned value is in %eax - jmp save_eax_and_simulate_RET + jz dispatch_callret + cmpl $VG_(shutdown), %eax + jz dispatch_callret -handle_free: - # %ESP is in %ebx - pushl 12(%ebx) - pushl 8(%ebx) - call VG_(client_free) - addl $8, %esp - jmp simulate_RET - -handle_calloc: - # %ESP is in %ebx - pushl 8(%ebx) - pushl 4(%ebx) - call VG_(client_calloc) - addl $8, %esp - # returned value is in %eax - jmp save_eax_and_simulate_RET - -handle_realloc: - # %ESP is in %ebx - pushl 8(%ebx) - pushl 4(%ebx) - call VG_(client_realloc) - addl $8, %esp - # returned value is in %eax - jmp save_eax_and_simulate_RET + # ok, its not interesting. Handle the normal way. + jmp dispatch_boring -handle_memalign: - # %ESP is in %ebx - pushl 8(%ebx) - pushl 4(%ebx) - call VG_(client_memalign) - addl $8, %esp - # returned value is in %eax - jmp save_eax_and_simulate_RET +dispatch_callret: + # %EIP is up to date here since dispatch_callret_maybe dominates + movl $VG_TRC_EBP_JMP_SPECIAL, %eax + jmp run_innerloop_exit -save_eax_and_simulate_RET: - movl VGOFF_(m_eax), %esi - movl %eax, (%ebp, %esi, 4) # %eax -> %EAX - # set %EAX bits to VALID - movl VGOFF_(sh_eax), %esi - movl $0x0 /* All 32 bits VALID */, (%ebp, %esi, 4) - # fall thru ... -simulate_RET: - # standard return - movl VGOFF_(m_esp), %esi - movl (%ebp, %esi, 4), %ebx # %ESP -> %ebx - movl 0(%ebx), %eax # RA -> %eax - addl $4, %ebx # %ESP += 4 - movl %ebx, (%ebp, %esi, 4) # %ebx -> %ESP - jmp dispatch_checked # jump to %eax .data -panic_msg_trap: -.ascii "dispatch_to_trap_here: unknown what_to_do" -.byte 0 panic_msg_ebp: .ascii "vg_dispatch: %ebp has invalid value!" .byte 0 .text - -/*------------------------------------------------------------*/ -/*--- A helper for delivering signals when the client is ---*/ -/*--- (presumably) blocked in a system call. ---*/ -/*------------------------------------------------------------*/ - -/* Returns, in %eax, the next orig_addr to run. - The caller needs to decide whether the returned orig_addr - requires special handling. - - extern Addr VG_(run_singleton_translation) ( Addr trans_addr ) -*/ - -/* should we take care to save the FPU state here? */ - -.globl VG_(run_singleton_translation) -VG_(run_singleton_translation): - movl 4(%esp), %eax # eax = trans_addr - pushl %ebx - pushl %ecx - pushl %edx - pushl %esi - pushl %edi - pushl %ebp - - # set up ebp correctly for translations - movl $VG_(baseBlock), %ebp - - # run the translation - call *%eax - - # next orig_addr is correctly in %eax already - - popl %ebp - popl %edi - popl %esi - popl %edx - popl %ecx - popl %ebx - - ret ##--------------------------------------------------------------------## ##--- end vg_dispatch.S ---## diff --git a/vg_from_ucode.c b/vg_from_ucode.c index 5e320840ee..0514cf9e22 100644 --- a/vg_from_ucode.c +++ b/vg_from_ucode.c @@ -1069,44 +1069,48 @@ static void synth_call_baseBlock_method ( Bool ensure_shortform, } +static void load_ebp_from_JmpKind ( JmpKind jmpkind ) +{ + switch (jmpkind) { + case JmpBoring: + break; + case JmpCall: + case JmpRet: + emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_SPECIAL, R_EBP ); + break; + case JmpSyscall: + emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_SYSCALL, R_EBP ); + break; + case JmpClientReq: + emit_movv_lit_reg ( 4, VG_TRC_EBP_JMP_CLIENTREQ, R_EBP ); + break; + default: + VG_(panic)("load_ebp_from_JmpKind"); + } +} + /* Jump to the next translation, by loading its original addr into - %eax and returning to the scheduler. Or, if is a RET transfer, - don't return; instead jump to vg_dispatch_when_RET, which checks - whether this is a signal handler returning, and takes suitable - evasive action. + %eax and returning to the scheduler. Signal special requirements + by loading a special value into %ebp first. */ -static void synth_jmp_reg ( Int reg, - Bool is_ret_dispatch, - Bool is_call_dispatch ) +static void synth_jmp_reg ( Int reg, JmpKind jmpkind ) { + load_ebp_from_JmpKind ( jmpkind ); if (reg != R_EAX) emit_movv_reg_reg ( 4, reg, R_EAX ); - if (is_ret_dispatch || is_call_dispatch) { - /* The (hopefully) rare case. */ - vg_assert(!(is_ret_dispatch && is_call_dispatch)); - emit_movv_lit_reg ( 4, VG_EBP_DISPATCH_CHECKED, R_EBP ); - } emit_ret(); } /* Same deal as synth_jmp_reg. */ -static void synth_jmp_lit ( Addr addr ) +static void synth_jmp_lit ( Addr addr, JmpKind jmpkind ) { + load_ebp_from_JmpKind ( jmpkind ); emit_movv_lit_reg ( 4, addr, R_EAX ); emit_ret(); } -/* Dispatch, but with a call-target check. */ -static void synth_jmp_lit_call_dispatch ( Addr addr ) -{ - emit_movv_lit_reg ( 4, addr, R_EAX ); - emit_movv_lit_reg ( 4, VG_EBP_DISPATCH_CHECKED, R_EBP ); - emit_ret(); -} - - static void synth_jcond_lit ( Condcode cond, Addr addr ) { /* Do the following: @@ -1124,7 +1128,7 @@ static void synth_jcond_lit ( Condcode cond, Addr addr ) */ emit_get_eflags(); emit_jcondshort_delta ( invertCondition(cond), 5+1 ); - synth_jmp_lit ( addr ); + synth_jmp_lit ( addr, JmpBoring ); } @@ -1138,7 +1142,7 @@ static void synth_jmp_ifzero_reg_lit ( Int reg, Addr addr ) */ emit_cmpl_zero_reg ( reg ); emit_jcondshort_delta ( CondNZ, 5+1 ); - synth_jmp_lit ( addr ); + synth_jmp_lit ( addr, JmpBoring ); } @@ -2472,25 +2476,29 @@ static void emitUInstr ( Int i, UInstr* u ) vg_assert(u->tag2 == NoValue); vg_assert(u->tag1 == RealReg || u->tag1 == Literal); if (u->cond == CondAlways) { - if (u->tag1 == RealReg) { - synth_jmp_reg ( u->val1, u->ret_dispatch, u->call_dispatch ); - } else { - vg_assert(!u->ret_dispatch); - if (u->call_dispatch) - synth_jmp_lit_call_dispatch ( - u->tag1==Literal ? u->lit32 : u->val1 ); - else - synth_jmp_lit ( - u->tag1==Literal ? u->lit32 : u->val1 ); + switch (u->tag1) { + case RealReg: + synth_jmp_reg ( u->val1, u->jmpkind ); + break; + case Literal: + synth_jmp_lit ( u->lit32, u->jmpkind ); + break; + default: + VG_(panic)("emitUInstr(JMP, unconditional, default)"); + break; } } else { - if (u->tag1 == RealReg) { - VG_(panic)("emitUInstr: conditional jump to reg"); - } else { - vg_assert(!u->ret_dispatch); - vg_assert(!u->call_dispatch); - synth_jcond_lit ( u->cond, - u->tag1==Literal ? u->lit32 : u->val1 ); + switch (u->tag1) { + case RealReg: + VG_(panic)("emitUInstr(JMP, conditional, RealReg)"); + break; + case Literal: + vg_assert(u->jmpkind == JmpBoring); + synth_jcond_lit ( u->cond, u->lit32 ); + break; + default: + VG_(panic)("emitUInstr(JMP, conditional, default)"); + break; } } break; diff --git a/vg_helpers.S b/vg_helpers.S index 3431111ee7..72de1347cb 100644 --- a/vg_helpers.S +++ b/vg_helpers.S @@ -48,45 +48,6 @@ */ -/* - On entry: - %ECX value - %EBX value - %EAX value -- also the result - RA <- %esp -- after pushal+pushfl is 36(%esp) -*/ -.global VG_(helper_do_client_request) -VG_(helper_do_client_request): - pushal - pushfl - - movl 48(%esp), %eax - pushl %eax - movl 48(%esp), %eax - pushl %eax - movl 48(%esp), %eax - pushl %eax - - call VG_(handle_client_request) - movl %eax, 52(%esp) - - addl $12, %esp - - popfl - popal - ret - - -.global VG_(helper_do_syscall) -VG_(helper_do_syscall): - pushal - call VG_(wrap_syscall) - popal -# movl $VG_(baseBlock), %ebp - ret - - - .global VG_(helper_value_check0_fail) VG_(helper_value_check0_fail): pushal @@ -116,21 +77,6 @@ VG_(helper_value_check4_fail): ret -/* Set things up so the dispatch loop exits normally. Used when it is - detected that the program wants to finish, ie it has called - vg_shutdown. -*/ -.global VG_(helper_request_normal_exit) -VG_(helper_request_normal_exit): - pushl %eax - movl VG_(dispatch_ctr), %eax - movl %eax, VG_(dispatch_ctr_SAVED) - movl $1, VG_(dispatch_ctr) - movl $VG_Y_EXIT, VG_(interrupt_reason) - popl %eax - ret - - /* Do a original-code-write check for the address in %ebp. */ .global VG_(helper_smc_check4) VG_(helper_smc_check4): diff --git a/vg_include.h b/vg_include.h index 3181fd8f8f..5d9825f12d 100644 --- a/vg_include.h +++ b/vg_include.h @@ -117,6 +117,27 @@ prime. */ #define VG_N_EC_LISTS /*997*/ 4999 +/* Defines the thread-scheduling timeslice, in terms of the number of + basic blocks we attempt to run each thread for. Smaller values + give finer interleaving but much increased scheduling overheads. */ +#define VG_SCHEDULING_QUANTUM 10000 + +/* The maximum number of pthreads that we support. This is + deliberately not very high since our implementation of some of the + scheduler algorithms is surely O(N^2) in the number of threads, + since that's simple, at least. And (in practice) we hope that most + programs do not need many threads. */ +#define VG_N_THREADS 20 + +/* Number of file descriptors that can simultaneously be waited on for + I/O to complete. Perhaps this should be the same as VG_N_THREADS + (surely a thread can't wait on more than one fd at once?. Who + knows.) */ +#define VG_N_WAITING_FDS 10 + +/* Maximum number of mutexes allowed. */ +#define VG_N_MUTEXES 10 + /* --------------------------------------------------------------------- Basic types @@ -353,30 +374,219 @@ extern Bool VG_(is_empty_arena) ( ArenaId aid ); /* --------------------------------------------------------------------- - Exports of vg_signals.c + Exports of vg_clientfuns.c ------------------------------------------------------------------ */ -/* The maximum number of basic blocks that we're prepared to run in a - signal handler which is called when the client is stuck in a - blocking system call. The purpose of this is to check that such a - signal handler doesn't merely do a longjmp() and keep going - forever; it should return instead. NOTE that this doesn't apply to - signals delivered under normal conditions, only when they are - delivered and the client is already blocked in a system call. */ -#define VG_MAX_BBS_IN_IMMEDIATE_SIGNAL 50000 +/* This doesn't export code or data that valgrind.so needs to link + against. However, the scheduler does need to know the following + request codes. A few, publically-visible, request codes are also + defined in valgrind.h. */ + +#define VG_USERREQ__MALLOC 0x2001 +#define VG_USERREQ__BUILTIN_NEW 0x2002 +#define VG_USERREQ__BUILTIN_VEC_NEW 0x2003 + +#define VG_USERREQ__FREE 0x2004 +#define VG_USERREQ__BUILTIN_DELETE 0x2005 +#define VG_USERREQ__BUILTIN_VEC_DELETE 0x2006 + +#define VG_USERREQ__CALLOC 0x2007 +#define VG_USERREQ__REALLOC 0x2008 +#define VG_USERREQ__MEMALIGN 0x2009 + + +#define VG_USERREQ__PTHREAD_CREATE 0x3001 +#define VG_USERREQ__PTHREAD_CREATE_BOGUSRA 0x3002 +#define VG_USERREQ__PTHREAD_JOIN 0x3003 +#define VG_USERREQ__PTHREAD_GET_THREADID 0x3004 +#define VG_USERREQ__PTHREAD_MUTEX_INIT 0x3005 +#define VG_USERREQ__PTHREAD_MUTEX_LOCK 0x3006 +#define VG_USERREQ__PTHREAD_MUTEX_UNLOCK 0x3007 +#define VG_USERREQ__PTHREAD_MUTEX_DESTROY 0x3008 +#define VG_USERREQ__PTHREAD_CANCEL 0x3009 + +/* --------------------------------------------------------------------- + Constants pertaining to the simulated CPU state, VG_(baseBlock), + which need to go here to avoid ugly circularities. + ------------------------------------------------------------------ */ + +/* How big is the saved FPU state? */ +#define VG_SIZE_OF_FPUSTATE 108 +/* ... and in words ... */ +#define VG_SIZE_OF_FPUSTATE_W ((VG_SIZE_OF_FPUSTATE+3)/4) + + +/* --------------------------------------------------------------------- + Exports of vg_scheduler.c + ------------------------------------------------------------------ */ + +/* ThreadIds are simply indices into the vg_threads[] array. */ +typedef + UInt + ThreadId; + +/* MutexIds are simply indices into the vg_mutexes[] array. */ +typedef + UInt + MutexId; + + +#define VG_INVALID_THREADID ((ThreadId)(-1)) + +typedef + enum { + VgTs_Empty, /* this slot is not in use */ + VgTs_Runnable, /* waiting to be scheduled */ + VgTs_WaitJoiner, /* waiting for someone to do join on me */ + VgTs_WaitJoinee, /* waiting for the thread I did join on */ + VgTs_WaitFD, /* waiting for I/O completion on a fd */ + VgTs_WaitMX, /* waiting on a mutex */ + VgTs_Sleeping /* sleeping for a while */ + } + ThreadStatus; + +typedef + struct { + /* The thread identity is simply the index in vg_threads[]. + ThreadId == 0 is the root thread and has the special property + that we don't try and allocate or deallocate its stack. */ + + /* Current scheduling status. */ + ThreadStatus status; + + /* Identity of joiner (thread who called join on me), or + VG_INVALID_THREADID if no one asked to join yet. */ + ThreadId joiner; + + /* Identity of mutex we are waiting on, if .status == WaitMX. */ + MutexId waited_on_mid; + + /* If VgTs_Sleeping, this is when we should wake up. */ + ULong awaken_at; + + /* return value */ + void* retval; + + /* Stacks. When a thread slot is freed, we don't deallocate its + stack; we just leave it lying around for the next use of the + slot. If the next use of the slot requires a larger stack, + only then is the old one deallocated and a new one + allocated. + + For the main thread (threadid == 0), this mechanism doesn't + apply. We don't know the size of the stack since we didn't + allocate it, and furthermore we never reallocate it. */ + + /* The allocated size of this thread's stack (permanently zero + if this is ThreadId == 0, since we didn't allocate its stack) */ + UInt stack_size; + + /* Address of the lowest word in this thread's stack. NULL means + not allocated yet. + */ + Addr stack_base; + + /* Saved machine context. */ + UInt m_eax; + UInt m_ebx; + UInt m_ecx; + UInt m_edx; + UInt m_esi; + UInt m_edi; + UInt m_ebp; + UInt m_esp; + UInt m_eflags; + UInt m_eip; + UInt m_fpu[VG_SIZE_OF_FPUSTATE_W]; + + UInt sh_eax; + UInt sh_ebx; + UInt sh_ecx; + UInt sh_edx; + UInt sh_esi; + UInt sh_edi; + UInt sh_ebp; + UInt sh_esp; + UInt sh_eflags; + } + ThreadState; + + +/* Copy the specified thread's state into VG_(baseBlock) in + preparation for running it. */ +extern void VG_(load_thread_state)( ThreadId ); + +/* Save the specified thread's state back in VG_(baseBlock), and fill + VG_(baseBlock) with junk, for sanity-check reasons. */ +extern void VG_(save_thread_state)( ThreadId ); + +/* Get the thread state block for the specified thread. */ +extern ThreadState* VG_(get_thread_state)( ThreadId ); + + +/* Create, and add to TT/TC, the translation of a client basic + block. */ +extern void VG_(create_translation_for) ( Addr orig_addr ); + +/* Return codes from the scheduler. */ +typedef + enum { VgSrc_Deadlock, VgSrc_Shutdown, VgSrc_BbsDone } + VgSchedReturnCode; + +/* The scheduler. */ +extern VgSchedReturnCode VG_(scheduler) ( void ); + +extern void VG_(scheduler_init) ( void ); + + +/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */ +extern jmp_buf VG_(scheduler_jmpbuf); +/* ... and if so, here's the signal which caused it to do so. */ +extern Int VG_(longjmpd_on_signal); + + +/* We check that the initial stack, which we can't move, is allocated + here. VG_(scheduler_init) checks this. +*/ +#define VG_STARTUP_STACK_MASK (Addr)0xBFFF8000 + + +/* The red-zone size which we put at the bottom (highest address) of + thread stacks, for paranoia reasons. This can be arbitrary, and + doesn't really need to be set at compile time. */ +#define VG_AR_CLIENT_STACKBASE_REDZONE_SZW 4 + +#define VG_AR_CLIENT_STACKBASE_REDZONE_SZB \ + (VG_AR_CLIENT_STACKBASE_REDZONE_SZW * VKI_BYTES_PER_WORD) + + + +/* --------------------------------------------------------------------- + Exports of vg_signals.c + ------------------------------------------------------------------ */ extern void VG_(sigstartup_actions) ( void ); -extern void VG_(deliver_signals) ( void ); +extern void VG_(deliver_signals) ( ThreadId ); extern void VG_(unblock_host_signal) ( Int sigNo ); /* Fake system calls for signal handling. */ -extern void VG_(do__NR_sigaction) ( void ); +extern void VG_(do__NR_sigaction) ( ThreadId tid ); extern void VG_(do__NR_sigprocmask) ( Int how, vki_ksigset_t* set ); +/* Bogus return address for signal handlers. Is never executed. */ +extern void VG_(signalreturn_bogusRA) ( void ); +/* Modify the current thread's state once we have detected it is + returning from a signal handler. */ +extern void VG_(signal_returns) ( ThreadId ); +/* Handy utilities to block/restore all host signals. */ +extern void VG_(block_all_host_signals) + ( /* OUT */ vki_ksigset_t* saved_mask ); +extern void VG_(restore_host_signals) + ( /* IN */ vki_ksigset_t* saved_mask ); /* --------------------------------------------------------------------- Exports of vg_mylibc.c @@ -420,6 +630,7 @@ extern Char* VG_(strdup) ( ArenaId aid, const Char* s); extern Char* VG_(getenv) ( Char* name ); extern Int VG_(getpid) ( void ); +extern ULong VG_(read_microsecond_timer)( void ); extern Char VG_(toupper) ( Char c ); @@ -444,19 +655,28 @@ extern void VG_(assert_fail) ( Char* expr, Char* file, Int line, Char* fn ) __attribute__ ((__noreturn__)); -/* Later ... extern void vg_restore_SIGABRT ( void ); */ - /* Reading files. */ extern Int VG_(open_read) ( Char* pathname ); extern void VG_(close) ( Int fd ); extern Int VG_(read) ( Int fd, void* buf, Int count); extern Int VG_(write) ( Int fd, void* buf, Int count); +extern Int VG_(fcntl) ( Int fd, Int cmd, Int arg ); + +extern Int VG_(select)( Int n, + vki_fd_set* readfds, + vki_fd_set* writefds, + vki_fd_set* exceptfds, + struct vki_timeval * timeout ); +extern Int VG_(nanosleep)( const struct vki_timespec *req, + struct vki_timespec *rem ); + + /* mmap-ery ... */ extern void* VG_(mmap)( void* start, UInt length, UInt prot, UInt flags, UInt fd, UInt offset ); -extern Int VG_(munmap)( void* start, Int length ); +extern Int VG_(munmap)( void* start, Int length ); /* Print a (panic) message, and abort. */ @@ -594,6 +814,18 @@ typedef Condcode; +/* Descriptions of additional properties of *unconditional* jumps. */ +typedef + enum { + JmpBoring=0, /* boring unconditional jump */ + JmpCall=1, /* jump due to an x86 call insn */ + JmpRet=2, /* jump due to an x86 ret insn */ + JmpSyscall=3, /* do a system call, then jump */ + JmpClientReq=4 /* do a client request, then jump */ + } + JmpKind; + + /* Flags. User-level code can only read/write O(verflow), S(ign), Z(ero), A(ux-carry), C(arry), P(arity), and may also write D(irection). That's a total of 7 flags. A FlagSet is a bitset, @@ -662,8 +894,7 @@ typedef UChar cond; /* condition, for jumps */ Bool smc_check:1; /* do a smc test, if writes memory. */ Bool signed_widen:1; /* signed or unsigned WIDEN ? */ - Bool ret_dispatch:1; /* Is this jump as a result of RET ? */ - Bool call_dispatch:1; /* Is this jump as a result of CALL ? */ + JmpKind jmpkind:3; /* additional properties of unconditional JMP */ } UInstr; @@ -845,7 +1076,7 @@ typedef extern Bool VG_(client_perm_maybe_describe)( Addr a, AddrInfo* ai ); -extern UInt VG_(handle_client_request) ( UInt code, Addr aa, UInt nn ); +extern UInt VG_(handle_client_request) ( UInt* arg_block ); extern void VG_(delete_client_stack_blocks_following_ESP_change) ( void ); @@ -886,13 +1117,10 @@ extern void VG_(symtab_notify_munmap) ( Addr start, UInt length ); Exports of vg_clientmalloc.c ------------------------------------------------------------------ */ -/* these numbers are not arbitary. if you change them, - adjust vg_dispatch.S as well */ - typedef enum { Vg_AllocMalloc = 0, - Vg_AllocNew = 1, + Vg_AllocNew = 1, Vg_AllocNewVec = 2 } VgAllocKind; @@ -912,20 +1140,19 @@ extern void VG_(clientmalloc_done) ( void ); extern void VG_(describe_addr) ( Addr a, AddrInfo* ai ); extern ShadowChunk** VG_(get_malloc_shadows) ( /*OUT*/ UInt* n_shadows ); -/* This should never be called; if it is, something's seriously - wrong. */ -extern UInt VG_(trap_here) ( UInt arg1, UInt arg2, UInt what_to_do ); +/* These are called from the scheduler, when it intercepts a user + request. */ +extern void* VG_(client_malloc) ( UInt size, VgAllocKind kind ); +extern void* VG_(client_memalign) ( UInt align, UInt size ); +extern void VG_(client_free) ( void* ptrV, VgAllocKind kind ); +extern void* VG_(client_calloc) ( UInt nmemb, UInt size1 ); +extern void* VG_(client_realloc) ( void* ptrV, UInt size_new ); /* --------------------------------------------------------------------- Exports of vg_main.c ------------------------------------------------------------------ */ -/* How big is the saved FPU state? */ -#define VG_SIZE_OF_FPUSTATE 108 -/* ... and in words ... */ -#define VG_SIZE_OF_FPUSTATE_W ((VG_SIZE_OF_FPUSTATE+3)/4) - /* A structure used as an intermediary when passing the simulated CPU's state to some assembly fragments, particularly system calls. Stuff is copied from baseBlock to here, the assembly magic runs, @@ -941,10 +1168,6 @@ extern UInt VG_(m_state_static) [8 /* int regs, in Intel order */ extern void VG_(copy_baseBlock_to_m_state_static) ( void ); extern void VG_(copy_m_state_static_to_baseBlock) ( void ); -/* Create, and add to TT/TC, the translation of a client basic - block. */ -extern void VG_(create_translation_for) ( Addr orig_addr ); - /* Called when some unhandleable client behaviour is detected. Prints a msg and aborts. */ extern void VG_(unimplemented) ( Char* msg ); @@ -960,12 +1183,6 @@ extern UInt VG_(stack)[10000]; vg_deliver_signal_immediately(). */ extern UInt VG_(sigstack)[10000]; - -/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */ -extern jmp_buf VG_(toploop_jmpbuf); -/* ... and if so, here's the signal which caused it to do so. */ -extern Int VG_(longjmpd_on_signal); - /* Holds client's %esp at the point we gained control. From this the client's argc, argv and envp are deduced. */ extern Addr VG_(esp_at_startup); @@ -994,13 +1211,6 @@ extern ULong VG_(bbs_to_go); /* Counts downwards in vg_run_innerloop. */ extern UInt VG_(dispatch_ctr); -/* If vg_dispatch_ctr is set to 1 to force a stop, its - previous value is saved here. */ -extern UInt VG_(dispatch_ctr_SAVED); - -/* This is why vg_run_innerloop() exited. */ -extern UInt VG_(interrupt_reason); - /* Is the client running on the simulated CPU or the real one? */ extern Bool VG_(running_on_simd_CPU); /* Initially False */ @@ -1068,6 +1278,10 @@ extern UInt VG_(smc_fancy_passed); extern UInt VG_(sanity_fast_count); extern UInt VG_(sanity_slow_count); +/* Counts pertaining to the scheduler. */ +extern UInt VG_(num_scheduling_events_MINOR); +extern UInt VG_(num_scheduling_events_MAJOR); + /* --------------------------------------------------------------------- Exports of vg_memory.c @@ -1095,7 +1309,7 @@ extern Bool VGM_(check_readable_asciiz) ( Addr a, Addr* bad_addr ); /* Sanity checks which may be done at any time. Doing them at signal-delivery time turns out to be convenient. */ -extern void VG_(do_sanity_checks) ( Bool force_expensive ); +extern void VG_(do_sanity_checks) ( ThreadId tid, Bool force_expensive ); /* Very cheap ... */ extern Bool VG_(first_and_last_secondaries_look_plausible) ( void ); @@ -1134,22 +1348,21 @@ extern Bool VG_(is_plausible_stack_addr) ( Addr ); Exports of vg_syscall_mem.c ------------------------------------------------------------------ */ -/* Counts the depth of nested syscalls. Is used in - VG_(deliver_signals) do discover whether or not the client is in a - syscall (presumably _blocked_ in a syscall) when a signal is - delivered. If so, the signal delivery mechanism needs to behave - differently from normal. */ -extern Int VG_(syscall_depth); +extern void VG_(perform_assumed_nonblocking_syscall) ( ThreadId tid ); -extern void VG_(wrap_syscall) ( void ); +extern void VG_(check_known_blocking_syscall) ( ThreadId tid, + Int syscallno, + Int* /*IN*/ res ); extern Bool VG_(is_kerror) ( Int res ); -#define KERNEL_DO_SYSCALL(result_lvalue) \ +#define KERNEL_DO_SYSCALL(thread_id, result_lvalue) \ + VG_(load_thread_state)(thread_id); \ VG_(copy_baseBlock_to_m_state_static)(); \ VG_(do_syscall)(); \ VG_(copy_m_state_static_to_baseBlock)(); \ - result_lvalue = VG_(baseBlock)[VGOFF_(m_eax)]; + VG_(save_thread_state)(thread_id); \ + result_lvalue = VG_(get_thread_state)(thread_id)->m_eax; /* --------------------------------------------------------------------- @@ -1242,20 +1455,15 @@ extern void VG_(swizzle_esp_then_start_GDB) ( void ); Exports of vg_dispatch.S ------------------------------------------------------------------ */ -extern void VG_(dispatch); -extern void VG_(run_innerloop) ( void ); - -/* Returns the next orig_addr to run. */ -extern Addr VG_(run_singleton_translation) ( Addr trans_addr ); +/* Run a thread for a (very short) while, until some event happens + which means we need to defer to the scheduler. */ +extern UInt VG_(run_innerloop) ( void ); /* --------------------------------------------------------------------- Exports of vg_helpers.S ------------------------------------------------------------------ */ -/* For doing exits ... */ -extern void VG_(helper_request_normal_exit); - /* SMC fast checks. */ extern void VG_(helper_smc_check4); @@ -1304,9 +1512,6 @@ extern void VG_(helper_value_check2_fail); extern void VG_(helper_value_check1_fail); extern void VG_(helper_value_check0_fail); -extern void VG_(helper_do_syscall); -extern void VG_(helper_do_client_request); - /* --------------------------------------------------------------------- The state of the simulated CPU. @@ -1434,9 +1639,6 @@ extern Int VGOFF_(helper_value_check2_fail); extern Int VGOFF_(helper_value_check1_fail); extern Int VGOFF_(helper_value_check0_fail); -extern Int VGOFF_(helper_do_syscall); -extern Int VGOFF_(helper_do_client_request); - extern Int VGOFF_(helperc_STOREV4); /* :: UInt -> Addr -> void */ extern Int VGOFF_(helperc_STOREV2); /* :: UInt -> Addr -> void */ extern Int VGOFF_(helperc_STOREV1); /* :: UInt -> Addr -> void */ @@ -1449,8 +1651,6 @@ extern Int VGOFF_(handle_esp_assignment); /* :: Addr -> void */ extern Int VGOFF_(fpu_write_check); /* :: Addr -> Int -> void */ extern Int VGOFF_(fpu_read_check); /* :: Addr -> Int -> void */ -extern Int VGOFF_(helper_request_normal_exit); - #endif /* ndef __VG_INCLUDE_H */ diff --git a/vg_kerneliface.h b/vg_kerneliface.h index 15ce80d352..9ec236acb5 100644 --- a/vg_kerneliface.h +++ b/vg_kerneliface.h @@ -135,6 +135,10 @@ typedef /* Copied from /usr/src/linux-2.4.9-13/include/asm/errno.h */ #define VKI_EINVAL 22 /* Invalid argument */ +#define VKI_ENOMEM 12 /* Out of memory */ + +#define VKI_EWOULDBLOCK VKI_EAGAIN /* Operation would block */ +#define VKI_EAGAIN 11 /* Try again */ /* Gawd ... hack ... */ @@ -166,6 +170,108 @@ typedef struct vki__user_cap_data_struct { #define VKI_SIZEOF_STRUCT_TERMIO 17 +/* File descriptor sets, for doing select(). Copied from + /usr/src/linux-2.4.9-31/include/linux/posix_types.h +*/ +/* + * This allows for 1024 file descriptors: if NR_OPEN is ever grown + * beyond that you'll have to change this too. But 1024 fd's seem to be + * enough even for such "real" unices like OSF/1, so hopefully this is + * one limit that doesn't have to be changed [again]. + * + * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in + * (and thus ) - but this is a more logical + * place for them. Solved by having dummy defines in . + */ + +/* + * Those macros may have been defined in . But we always + * use the ones here. + */ +#undef VKI_NFDBITS +#define VKI_NFDBITS (8 * sizeof(unsigned long)) + +#undef VKI_FD_SETSIZE +#define VKI_FD_SETSIZE 1024 + +#undef VKI_FDSET_LONGS +#define VKI_FDSET_LONGS (VKI_FD_SETSIZE/VKI_NFDBITS) + +#undef VKI_FDELT +#define VKI_FDELT(d) ((d) / VKI_NFDBITS) + +#undef VKI_FDMASK +#define VKI_FDMASK(d) (1UL << ((d) % VKI_NFDBITS)) + +typedef struct { + unsigned long vki_fds_bits [VKI_FDSET_LONGS]; +} vki_fd_set; + + +/* Gawd ... + Copied from /usr/src/linux-2.4.9-31/./include/asm-i386/posix_types.h +*/ +#undef VKI_FD_SET +#define VKI_FD_SET(fd,fdsetp) \ + __asm__ __volatile__("btsl %1,%0": \ + "=m" (*(vki_fd_set *) (fdsetp)):"r" ((int) (fd))) + +#undef VKI_FD_CLR +#define VKI_FD_CLR(fd,fdsetp) \ + __asm__ __volatile__("btrl %1,%0": \ + "=m" (*(vki_fd_set *) (fdsetp)):"r" ((int) (fd))) + +#undef VKI_FD_ISSET +#define VKI_FD_ISSET(fd,fdsetp) (__extension__ ({ \ + unsigned char __result; \ + __asm__ __volatile__("btl %1,%2 ; setb %0" \ + :"=q" (__result) :"r" ((int) (fd)), \ + "m" (*(vki_fd_set *) (fdsetp))); \ + __result; })) + +#undef VKI_FD_ZERO +#define VKI_FD_ZERO(fdsetp) \ +do { \ + int __d0, __d1; \ + __asm__ __volatile__("cld ; rep ; stosl" \ + :"=m" (*(vki_fd_set *) (fdsetp)), \ + "=&c" (__d0), "=&D" (__d1) \ + :"a" (0), "1" (VKI_FDSET_LONGS), \ + "2" ((vki_fd_set *) (fdsetp)) : "memory"); \ +} while (0) + + + +/* +./include/asm-i386/posix_types.h:typedef long __kernel_suseconds_t; +./include/linux/types.h:typedef __kernel_suseconds_t suseconds_t; + +./include/asm-i386/posix_types.h:typedef long __kernel_time_t; +./include/linux/types.h:typedef __kernel_time_t time_t; +*/ + +struct vki_timeval { + /* time_t */ long tv_sec; /* seconds */ + /* suseconds_t */ long tv_usec; /* microseconds */ +}; + + + +/* For fcntl on fds .. + from ./include/asm-i386/fcntl.h */ +#define VKI_F_GETFL 3 /* get file->f_flags */ +#define VKI_F_SETFL 4 /* set file->f_flags */ + +#define VKI_O_NONBLOCK 04000 + +/* For nanosleep ... + from ./include/linux/time.h */ +struct vki_timespec { + /* time_t */ long tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +}; + + #endif /* ndef __VG_KERNELIFACE_H */ /*--------------------------------------------------------------------*/ diff --git a/vg_main.c b/vg_main.c index 2fb92cf204..6e5c01ab03 100644 --- a/vg_main.c +++ b/vg_main.c @@ -99,8 +99,6 @@ Int VGOFF_(helper_value_check4_fail) = INVALID_OFFSET; Int VGOFF_(helper_value_check2_fail) = INVALID_OFFSET; Int VGOFF_(helper_value_check1_fail) = INVALID_OFFSET; Int VGOFF_(helper_value_check0_fail) = INVALID_OFFSET; -Int VGOFF_(helper_do_syscall) = INVALID_OFFSET; -Int VGOFF_(helper_do_client_request) = INVALID_OFFSET; Int VGOFF_(helperc_LOADV4) = INVALID_OFFSET; Int VGOFF_(helperc_LOADV2) = INVALID_OFFSET; Int VGOFF_(helperc_LOADV1) = INVALID_OFFSET; @@ -110,7 +108,6 @@ Int VGOFF_(helperc_STOREV1) = INVALID_OFFSET; Int VGOFF_(handle_esp_assignment) = INVALID_OFFSET; Int VGOFF_(fpu_write_check) = INVALID_OFFSET; Int VGOFF_(fpu_read_check) = INVALID_OFFSET; -Int VGOFF_(helper_request_normal_exit) = INVALID_OFFSET; /* This is the actual defn of baseblock. */ @@ -305,14 +302,6 @@ static void vg_init_baseBlock ( void ) = alloc_BaB_1_set( (Addr) & VG_(helper_DAS) ); VGOFF_(helper_DAA) = alloc_BaB_1_set( (Addr) & VG_(helper_DAA) ); - - VGOFF_(helper_request_normal_exit) - = alloc_BaB_1_set( (Addr) & VG_(helper_request_normal_exit) ); - - VGOFF_(helper_do_syscall) - = alloc_BaB_1_set( (Addr) & VG_(helper_do_syscall) ); - VGOFF_(helper_do_client_request) - = alloc_BaB_1_set( (Addr) & VG_(helper_do_client_request) ); } @@ -336,17 +325,6 @@ Addr VG_(esp_saved_over_syscall_d2); /* Counts downwards in vg_run_innerloop. */ UInt VG_(dispatch_ctr); -/* If vg_dispatch_ctr is set to 1 to force a stop, its - previous value is saved here. */ -UInt VG_(dispatch_ctr_SAVED); - -/* This is why vg_run_innerloop() exited. */ -UInt VG_(interrupt_reason); - -/* vg_oursignalhandler() might longjmp(). Here's the jmp_buf. */ -jmp_buf VG_(toploop_jmpbuf); -/* ... and if so, here's the signal which caused it to do so. */ -Int VG_(longjmpd_on_signal); /* 64-bit counter for the number of basic blocks done. */ ULong VG_(bbs_done); @@ -423,10 +401,12 @@ UInt VG_(smc_discard_count) = 0; /* Counts pertaining to internal sanity checking. */ - UInt VG_(sanity_fast_count) = 0; UInt VG_(sanity_slow_count) = 0; +/* Counts pertaining to the scheduler. */ +UInt VG_(num_scheduling_events_MINOR) = 0; +UInt VG_(num_scheduling_events_MAJOR) = 0; /* --------------------------------------------------------------------- @@ -481,176 +461,6 @@ Char** VG_(client_envp); static Char vg_cmdline_copy[M_VG_CMDLINE_STRLEN]; -/* --------------------------------------------------------------------- - Top level simulation loop. - ------------------------------------------------------------------ */ - -/* Create a translation of the client basic block beginning at - orig_addr, and add it to the translation cache & translation table. - This probably doesn't really belong here, but, hey ... */ -void VG_(create_translation_for) ( Addr orig_addr ) -{ - Addr trans_addr; - TTEntry tte; - Int orig_size, trans_size; - /* Ensure there is space to hold a translation. */ - VG_(maybe_do_lru_pass)(); - VG_(translate)( orig_addr, &orig_size, &trans_addr, &trans_size ); - /* Copy data at trans_addr into the translation cache. - Returned pointer is to the code, not to the 4-byte - header. */ - /* Since the .orig_size and .trans_size fields are - UShort, be paranoid. */ - vg_assert(orig_size > 0 && orig_size < 65536); - vg_assert(trans_size > 0 && trans_size < 65536); - tte.orig_size = orig_size; - tte.orig_addr = orig_addr; - tte.trans_size = trans_size; - tte.trans_addr = VG_(copy_to_transcache) - ( trans_addr, trans_size ); - tte.mru_epoch = VG_(current_epoch); - /* Free the intermediary -- was allocated by VG_(emit_code). */ - VG_(jitfree)( (void*)trans_addr ); - /* Add to trans tab and set back pointer. */ - VG_(add_to_trans_tab) ( &tte ); - /* Update stats. */ - VG_(this_epoch_in_count) ++; - VG_(this_epoch_in_osize) += orig_size; - VG_(this_epoch_in_tsize) += trans_size; - VG_(overall_in_count) ++; - VG_(overall_in_osize) += orig_size; - VG_(overall_in_tsize) += trans_size; - /* Record translated area for SMC detection. */ - VG_(smc_mark_original) ( - VG_(baseBlock)[VGOFF_(m_eip)], orig_size ); -} - - -/* Runs the client program from %EIP (baseBlock[off_eip]) until it - asks to exit, or until vg_bbs_to_go jumps have happened (the latter - case is for debugging). */ - -void VG_(toploop) ( void ) -{ - volatile UInt dispatch_ctr_SAVED; - volatile Int done_this_time; - - /* For the LRU structures, records when the epoch began. */ - volatile ULong epoch_started_at = 0; - - while (True) { - next_outer_loop: - - /* Age the LRU structures if an epoch has been completed. */ - if (VG_(bbs_done) - epoch_started_at >= VG_BBS_PER_EPOCH) { - VG_(current_epoch)++; - epoch_started_at = VG_(bbs_done); - if (VG_(clo_verbosity) > 2) { - UInt tt_used, tc_used; - VG_(get_tt_tc_used) ( &tt_used, &tc_used ); - VG_(message)(Vg_UserMsg, - "%lu bbs, in: %d (%d -> %d), out %d (%d -> %d), TT %d, TC %d", - VG_(bbs_done), - VG_(this_epoch_in_count), - VG_(this_epoch_in_osize), - VG_(this_epoch_in_tsize), - VG_(this_epoch_out_count), - VG_(this_epoch_out_osize), - VG_(this_epoch_out_tsize), - tt_used, tc_used - ); - } - VG_(this_epoch_in_count) = 0; - VG_(this_epoch_in_osize) = 0; - VG_(this_epoch_in_tsize) = 0; - VG_(this_epoch_out_count) = 0; - VG_(this_epoch_out_osize) = 0; - VG_(this_epoch_out_tsize) = 0; - } - - /* Figure out how many bbs to ask vg_run_innerloop to do. */ - if (VG_(bbs_to_go) >= VG_SIGCHECK_INTERVAL) - VG_(dispatch_ctr) = 1 + VG_SIGCHECK_INTERVAL; - else - VG_(dispatch_ctr) = 1 + (UInt)VG_(bbs_to_go); - - /* ... and remember what we asked for. */ - dispatch_ctr_SAVED = VG_(dispatch_ctr); - - /* Now have a go at doing them. */ - VG_(interrupt_reason) = VG_Y_SIGCHECK; - if (__builtin_setjmp(VG_(toploop_jmpbuf)) == 0) { - /* try this ... */ - VG_(run_innerloop)(); - /* We get here if the client didn't take a fault. */ - switch (VG_(interrupt_reason)) { - case VG_Y_SIGCHECK: - /* The counter fell to zero and no other situation has - been detected. */ - vg_assert(VG_(dispatch_ctr) == 0); - done_this_time = dispatch_ctr_SAVED - 1; - VG_(bbs_to_go) -= (ULong)done_this_time; - VG_(bbs_done) += (ULong)done_this_time; - /* Exit if the debug run has ended. */ - if (VG_(bbs_to_go) == 0) goto debug_stop; - VG_(deliver_signals)(); - VG_(do_sanity_checks)(False); - goto next_outer_loop; - case VG_Y_EXIT: - /* The target program tried to exit. */ - done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr_SAVED); - done_this_time --; - VG_(bbs_to_go) -= (ULong)done_this_time; - VG_(bbs_done) += (ULong)done_this_time; - return; - case VG_Y_SMC: - /* A write to original code was detected. */ - done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr_SAVED); - VG_(bbs_to_go) -= (ULong)done_this_time; - VG_(bbs_done) += (ULong)done_this_time; - VG_(flush_transtab)(); - goto next_outer_loop; - case VG_Y_TRANSLATE: { - /* Need to provide a translation of code at vg_m_eip. */ - done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr); - vg_assert(done_this_time > 0); - done_this_time --; - VG_(bbs_to_go) -= (ULong)done_this_time; - VG_(bbs_done) += (ULong)done_this_time; - VG_(create_translation_for)(VG_(baseBlock)[VGOFF_(m_eip)]); - goto next_outer_loop; - } - default: - VG_(panic)("vg_toploop: invalid interrupt reason"); - } - } else { - /* We get here if the client took a fault, which caused our - signal handler to longjmp. */ - done_this_time = dispatch_ctr_SAVED - VG_(dispatch_ctr); - VG_(bbs_to_go) -= (ULong)done_this_time; - VG_(bbs_done) += (ULong)done_this_time; - if (VG_(interrupt_reason) == VG_Y_EXIT) return; - VG_(deliver_signals)(); - VG_(do_sanity_checks)(False); - VG_(unblock_host_signal)(VG_(longjmpd_on_signal)); - } - } - - /* NOTREACHED */ - - debug_stop: - /* If we exited because of a debug stop, print the translation - of the last block executed -- by translating it again, and - throwing away the result. */ - VG_(printf)( - "======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n"); - VG_(translate)( VG_(baseBlock)[VGOFF_(m_eip)], NULL, NULL, NULL ); - VG_(printf)("\n"); - VG_(printf)( - "======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n"); -} - - /* --------------------------------------------------------------------- Processing of command-line options. ------------------------------------------------------------------ */ @@ -705,7 +515,7 @@ static void process_cmd_line_options ( void ) VG_(clo_optimise) = True; VG_(clo_instrument) = True; VG_(clo_cleanup) = True; - VG_(clo_client_perms) = False; + VG_(clo_client_perms) = True; VG_(clo_smc_check) = /* VG_CLO_SMC_SOME */ VG_CLO_SMC_NONE; VG_(clo_trace_syscalls) = False; VG_(clo_trace_signals) = False; @@ -1014,6 +824,7 @@ static void process_cmd_line_options ( void ) bad_option("--gdb-attach=yes and --trace-children=yes"); } +#if 0 if (VG_(clo_client_perms) && !VG_(clo_instrument)) { VG_(message)(Vg_UserMsg, ""); VG_(message)(Vg_UserMsg, @@ -1023,6 +834,7 @@ static void process_cmd_line_options ( void ) if (VG_(clo_client_perms)) vg_assert(VG_(clo_instrument)); +#endif VG_(clo_logfile_fd) = eventually_logfile_fd; @@ -1106,8 +918,9 @@ void VG_(copy_m_state_static_to_baseBlock) ( void ) static void vg_show_counts ( void ) { VG_(message)(Vg_DebugMsg, - " dispatch: %lu basic blocks, %d tt_fast misses.", - VG_(bbs_done), VG_(tt_fast_misses)); + " lru: %d epochs, %d clearings.", + VG_(current_epoch), + VG_(number_of_lrus) ); VG_(message)(Vg_DebugMsg, "translate: new %d (%d -> %d), discard %d (%d -> %d).", VG_(overall_in_count), @@ -1117,9 +930,10 @@ static void vg_show_counts ( void ) VG_(overall_out_osize), VG_(overall_out_tsize) ); VG_(message)(Vg_DebugMsg, - " lru: %d epochs, %d clearings.", - VG_(current_epoch), - VG_(number_of_lrus) ); + " dispatch: %lu basic blocks, %d/%d sched events, %d tt_fast misses.", + VG_(bbs_done), VG_(num_scheduling_events_MAJOR), + VG_(num_scheduling_events_MINOR), + VG_(tt_fast_misses)); VG_(message)(Vg_DebugMsg, "reg-alloc: %d t-req-spill, " "%d+%d orig+spill uis, %d total-reg-r.", @@ -1150,7 +964,8 @@ static void vg_show_counts ( void ) void VG_(main) ( void ) { - Int i; + Int i; + VgSchedReturnCode src; /* Set up our stack sanity-check words. */ for (i = 0; i < 10; i++) { @@ -1211,11 +1026,18 @@ void VG_(main) ( void ) VG_(message)(Vg_UserMsg, ""); VG_(bbs_to_go) = VG_(clo_stop_after); - VG_(toploop)(); + + VG_(scheduler_init)(); + src = VG_(scheduler)(); if (VG_(clo_verbosity) > 0) VG_(message)(Vg_UserMsg, ""); + if (src == VgSrc_Deadlock) { + VG_(message)(Vg_UserMsg, + "Warning: pthread scheduler exited due to deadlock"); + } + if (VG_(clo_instrument)) { VG_(show_all_errors)(); VG_(clientmalloc_done)(); @@ -1226,8 +1048,9 @@ void VG_(main) ( void ) if (VG_(clo_leak_check)) VG_(detect_memory_leaks)(); } VG_(running_on_simd_CPU) = False; - - VG_(do_sanity_checks)(True /*include expensive checks*/ ); + + VG_(do_sanity_checks)( 0 /* root thread */, + True /*include expensive checks*/ ); if (VG_(clo_verbosity) > 1) vg_show_counts(); @@ -1262,6 +1085,7 @@ void VG_(main) ( void ) } /* Prepare to restore state to the real CPU. */ + VG_(load_thread_state)(0); VG_(copy_baseBlock_to_m_state_static)(); /* This pushes a return address on the simulator's stack, which @@ -1349,116 +1173,6 @@ extern void VG_(unimplemented) ( Char* msg ) } -/*-------------------------------------------------------------*/ -/*--- Replace some C lib things with equivs which don't get ---*/ -/*--- spurious value warnings. THEY RUN ON SIMD CPU! ---*/ -/*-------------------------------------------------------------*/ - -char* strrchr ( const char* s, int c ) -{ - UChar ch = (UChar)((UInt)c); - UChar* p = (UChar*)s; - UChar* last = NULL; - while (True) { - if (*p == ch) last = p; - if (*p == 0) return last; - p++; - } -} - -char* strchr ( const char* s, int c ) -{ - UChar ch = (UChar)((UInt)c); - UChar* p = (UChar*)s; - while (True) { - if (*p == ch) return p; - if (*p == 0) return NULL; - p++; - } -} - -char* strcat ( char* dest, const char* src ) -{ - Char* dest_orig = dest; - while (*dest) dest++; - while (*src) *dest++ = *src++; - *dest = 0; - return dest_orig; -} - -unsigned int strlen ( const char* str ) -{ - UInt i = 0; - while (str[i] != 0) i++; - return i; -} - -char* strcpy ( char* dest, const char* src ) -{ - Char* dest_orig = dest; - while (*src) *dest++ = *src++; - *dest = 0; - return dest_orig; -} - -int strncmp ( const char* s1, const char* s2, unsigned int nmax ) -{ - unsigned int n = 0; - while (True) { - if (n >= nmax) return 0; - if (*s1 == 0 && *s2 == 0) return 0; - if (*s1 == 0) return -1; - if (*s2 == 0) return 1; - - if (*(UChar*)s1 < *(UChar*)s2) return -1; - if (*(UChar*)s1 > *(UChar*)s2) return 1; - - s1++; s2++; n++; - } -} - -int strcmp ( const char* s1, const char* s2 ) -{ - while (True) { - if (*s1 == 0 && *s2 == 0) return 0; - if (*s1 == 0) return -1; - if (*s2 == 0) return 1; - - if (*(char*)s1 < *(char*)s2) return -1; - if (*(char*)s1 > *(char*)s2) return 1; - - s1++; s2++; - } -} - -void* memchr(const void *s, int c, unsigned int n) -{ - unsigned int i; - UChar c0 = (UChar)c; - UChar* p = (UChar*)s; - for (i = 0; i < n; i++) - if (p[i] == c0) return (void*)(&p[i]); - return NULL; -} - -void* memcpy( void *dst, const void *src, unsigned int len ) -{ - register char *d; - register char *s; - if ( dst > src ) { - d = (char *)dst + len - 1; - s = (char *)src + len - 1; - while ( len-- ) - *d-- = *s--; - } else if ( dst < src ) { - d = (char *)dst; - s = (char *)src; - while ( len-- ) - *d++ = *s++; - } - return dst; -} - /*--------------------------------------------------------------------*/ /*--- end vg_main.c ---*/ /*--------------------------------------------------------------------*/ diff --git a/vg_memory.c b/vg_memory.c index eea86acd61..b219a3a07f 100644 --- a/vg_memory.c +++ b/vg_memory.c @@ -2122,10 +2122,11 @@ Bool VG_(first_and_last_secondaries_look_plausible) ( void ) /* A fast sanity check -- suitable for calling circa once per millisecond. */ -void VG_(do_sanity_checks) ( Bool force_expensive ) +void VG_(do_sanity_checks) ( ThreadId tid, Bool force_expensive ) { - Int i; - Bool do_expensive_checks; + Int i; + Bool do_expensive_checks; + ThreadState* tst; if (VG_(sanity_level) < 1) return; @@ -2133,6 +2134,9 @@ void VG_(do_sanity_checks) ( Bool force_expensive ) VG_(sanity_fast_count)++; + tst = VG_(get_thread_state)(tid); + vg_assert(tst != NULL && tst->status != VgTs_Empty); + /* Check that we haven't overrun our private stack. */ for (i = 0; i < 10; i++) { vg_assert(VG_(stack)[i] @@ -2146,7 +2150,7 @@ void VG_(do_sanity_checks) ( Bool force_expensive ) if (VG_(clo_instrument)) { /* Check that the eflags tag is as expected. */ - UInt vv = VG_(baseBlock)[VGOFF_(sh_eflags)]; + UInt vv = tst->sh_eflags; vg_assert(vv == VGM_EFLAGS_VALID || VGM_EFLAGS_INVALID); /* Check that nobody has spuriously claimed that the first or @@ -2154,12 +2158,6 @@ void VG_(do_sanity_checks) ( Bool force_expensive ) vg_assert(VG_(first_and_last_secondaries_look_plausible)); } -# if 0 - if ( (VG_(baseBlock)[VGOFF_(sh_eflags)] & 1) == 1) - VG_(printf)("UNDEF\n") ; else - VG_(printf)("def\n") ; -# endif - /* --- Now some more expensive checks. ---*/ /* Once every 25 times, check some more expensive stuff. */ @@ -2233,6 +2231,9 @@ static void uint_to_bits ( UInt x, Char* str ) vg_assert(w == 36); } +/* Caution! Not vthread-safe; looks in VG_(baseBlock), not the thread + state table. */ + void VG_(show_reg_tags) ( void ) { Char buf1[36]; diff --git a/vg_mylibc.c b/vg_mylibc.c index 2ba0753d32..31f2b18928 100644 --- a/vg_mylibc.c +++ b/vg_mylibc.c @@ -232,7 +232,7 @@ Int VG_(ksignal)(Int signum, void (*sighandler)(Int)) /* --------------------------------------------------------------------- - mmap/munmap, exit + mmap/munmap, exit, fcntl ------------------------------------------------------------------ */ /* Returns -1 on failure. */ @@ -266,6 +266,43 @@ void VG_(exit)( Int status ) vg_assert(2+2 == 5); } +/* Returns -1 on error. */ +Int VG_(fcntl) ( Int fd, Int cmd, Int arg ) +{ + Int res = vg_do_syscall3(__NR_fcntl, fd, cmd, arg); + return VG_(is_kerror)(res) ? -1 : res; +} + +/* Returns -1 on error. */ +Int VG_(select)( Int n, + vki_fd_set* readfds, + vki_fd_set* writefds, + vki_fd_set* exceptfds, + struct vki_timeval * timeout ) +{ + Int res; + UInt args[5]; + args[0] = n; + args[1] = (UInt)readfds; + args[2] = (UInt)writefds; + args[3] = (UInt)exceptfds; + args[4] = (UInt)timeout; + res = vg_do_syscall1(__NR_select, (UInt)(&(args[0])) ); + return VG_(is_kerror)(res) ? -1 : res; + return res; +} + +/* Returns -1 on error, but 0 if ok or interrupted. */ +Int VG_(nanosleep)( const struct vki_timespec *req, + struct vki_timespec *rem ) +{ + Int res; + res = vg_do_syscall2(__NR_nanosleep, (UInt)req, (UInt)rem); + if (res == -VKI_EINVAL) return -1; + return 0; +} + + /* --------------------------------------------------------------------- printf implementation. The key function, vg_vprintf(), emits chars into a caller-supplied function. Distantly derived from: @@ -809,7 +846,6 @@ void VG_(assert_fail) ( Char* expr, Char* file, Int line, Char* fn ) "valgrind", file, line, fn, expr ); VG_(printf)("Please report this bug to me at: %s\n\n", EMAIL_ADDR); VG_(shutdown_logging)(); - /* vg_restore_SIGABRT(); */ VG_(exit)(1); } @@ -819,7 +855,6 @@ void VG_(panic) ( Char* str ) VG_(printf)("Basic block ctr is approximately %llu\n", VG_(bbs_done) ); VG_(printf)("Please report this bug to me at: %s\n\n", EMAIL_ADDR); VG_(shutdown_logging)(); - /* vg_restore_SIGABRT(); */ VG_(exit)(1); } @@ -900,6 +935,16 @@ Int VG_(getpid) ( void ) return res; } +/* Read a notional elapsed (wallclock-time) timer, giving a 64-bit + microseconds count. */ +ULong VG_(read_microsecond_timer)( void ) +{ + Int res; + struct vki_timeval tv; + res = vg_do_syscall2(__NR_gettimeofday, (UInt)&tv, (UInt)NULL); + vg_assert(!VG_(is_kerror)(res)); + return (1000000ULL * (ULong)(tv.tv_sec)) + (ULong)(tv.tv_usec); +} /* --------------------------------------------------------------------- Primitive support for bagging memory via mmap. diff --git a/vg_signals.c b/vg_signals.c index 2372fc4738..ea2826bebb 100644 --- a/vg_signals.c +++ b/vg_signals.c @@ -36,13 +36,6 @@ #include "vg_unsafe.h" -/* --------------------------------------------------------------------- - An implementation of signal sets and other grunge, identical to - that in the target kernels (Linux 2.2.X and 2.4.X). - ------------------------------------------------------------------ */ - - - /* --------------------------------------------------------------------- Signal state for this process. ------------------------------------------------------------------ */ @@ -64,8 +57,29 @@ void* VG_(sighandler)[VKI_KNSIG]; void* VG_(sigpending)[VKI_KNSIG]; -/* See decl in vg_include.h for explanation. */ -Int VG_(syscall_depth) = 0; + +/* --------------------------------------------------------------------- + Handy utilities to block/restore all host signals. + ------------------------------------------------------------------ */ + +/* Block all host signals, dumping the old mask in *saved_mask. */ +void VG_(block_all_host_signals) ( /* OUT */ vki_ksigset_t* saved_mask ) +{ + Int ret; + vki_ksigset_t block_procmask; + VG_(ksigfillset)(&block_procmask); + ret = VG_(ksigprocmask) + (VKI_SIG_SETMASK, &block_procmask, saved_mask); + vg_assert(ret == 0); +} + +/* Restore the blocking mask using the supplied saved one. */ +void VG_(restore_host_signals) ( /* IN */ vki_ksigset_t* saved_mask ) +{ + Int ret; + ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL); + vg_assert(ret == 0); +} /* --------------------------------------------------------------------- @@ -78,9 +92,14 @@ Int VG_(syscall_depth) = 0; typedef struct { - UInt retaddr; /* Sig handler's (bogus) return address */ - Int sigNo; /* The arg to the sig handler. */ + /* These are parameters to the signal handler. */ + UInt retaddr; /* Sig handler's (bogus) return address */ + Int sigNo; /* The arg to the sig handler. */ + Addr psigInfo; /* ptr to siginfo_t; NULL for now. */ + Addr puContext; /* ptr to ucontext; NULL for now. */ + /* Sanity check word. */ UInt magicPI; + /* Saved processor state. */ UInt fpustate[VG_SIZE_OF_FPUSTATE_W]; UInt eax; UInt ecx; @@ -92,9 +111,14 @@ typedef UInt edi; Addr eip; UInt eflags; + /* Scheduler-private stuff: what was the thread's status prior to + delivering this signal? */ + ThreadStatus status; + /* Sanity check word. Is the highest-addressed word; do not + move!*/ UInt magicE; } - VgSigContext; + VgSigFrame; @@ -113,35 +137,52 @@ void VG_(signalreturn_bogusRA) ( void ) handler. This includes the signal number and a bogus return address. */ static -void vg_push_signal_frame ( int sigNo ) +void vg_push_signal_frame ( ThreadId tid, int sigNo ) { Int i; - UInt esp; - VgSigContext sigctx; + Addr esp; + VgSigFrame* frame; + ThreadState* tst; + + tst = VG_(get_thread_state)(tid); + esp = tst->m_esp; + + esp -= sizeof(VgSigFrame); + frame = (VgSigFrame*)esp; + /* Assert that the frame is placed correctly. */ + vg_assert( (sizeof(VgSigFrame) & 0x3) == 0 ); + vg_assert( ((Char*)(&frame->magicE)) + sizeof(UInt) + == ((Char*)(tst->m_esp)) ); + + frame->retaddr = (UInt)(&VG_(signalreturn_bogusRA)); + frame->sigNo = sigNo; + frame->psigInfo = (Addr)NULL; + frame->puContext = (Addr)NULL; + frame->magicPI = 0x31415927; + for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++) - sigctx.fpustate[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i]; - - sigctx.magicPI = 0x31415927; - sigctx.magicE = 0x27182818; - sigctx.eax = VG_(baseBlock)[VGOFF_(m_eax)]; - sigctx.ecx = VG_(baseBlock)[VGOFF_(m_ecx)]; - sigctx.edx = VG_(baseBlock)[VGOFF_(m_edx)]; - sigctx.ebx = VG_(baseBlock)[VGOFF_(m_ebx)]; - sigctx.ebp = VG_(baseBlock)[VGOFF_(m_ebp)]; - sigctx.esp = VG_(baseBlock)[VGOFF_(m_esp)]; - sigctx.esi = VG_(baseBlock)[VGOFF_(m_esi)]; - sigctx.edi = VG_(baseBlock)[VGOFF_(m_edi)]; - sigctx.eflags = VG_(baseBlock)[VGOFF_(m_eflags)]; - sigctx.eip = VG_(baseBlock)[VGOFF_(m_eip)]; - sigctx.retaddr = (UInt)(&VG_(signalreturn_bogusRA)); - sigctx.sigNo = sigNo; - - esp = VG_(baseBlock)[VGOFF_(m_esp)]; - vg_assert((sizeof(VgSigContext) & 0x3) == 0); - - esp -= sizeof(VgSigContext); - for (i = 0; i < sizeof(VgSigContext)/4; i++) - ((UInt*)esp)[i] = ((UInt*)(&sigctx))[i]; + frame->fpustate[i] = tst->m_fpu[i]; + + frame->eax = tst->m_eax; + frame->ecx = tst->m_ecx; + frame->edx = tst->m_edx; + frame->ebx = tst->m_ebx; + frame->ebp = tst->m_ebp; + frame->esp = tst->m_esp; + frame->esi = tst->m_esi; + frame->edi = tst->m_edi; + frame->eip = tst->m_eip; + frame->eflags = tst->m_eflags; + + frame->status = tst->status; + + frame->magicE = 0x27182818; + + /* Set the thread so it will next run the handler. */ + tst->m_esp = esp; + tst->m_eip = (Addr)VG_(sigpending)[sigNo]; + /* This thread needs to be marked runnable, but we leave that the + caller to do. */ /* Make sigNo and retaddr fields readable -- at 0(%ESP) and 4(%ESP) */ if (VG_(clo_instrument)) { @@ -149,11 +190,9 @@ void vg_push_signal_frame ( int sigNo ) VGM_(make_readable) ( ((Addr)esp)+4 ,4 ); } - VG_(baseBlock)[VGOFF_(m_esp)] = esp; - VG_(baseBlock)[VGOFF_(m_eip)] = (Addr)VG_(sigpending)[sigNo]; /* VG_(printf)("pushed signal frame; %%ESP now = %p, next %%EBP = %p\n", - esp, VG_(baseBlock)[VGOFF_(m_eip)]); + esp, tst->m_eip); */ } @@ -162,43 +201,56 @@ void vg_push_signal_frame ( int sigNo ) simulated machine state, and return the signal number that the frame was for. */ static -Int vg_pop_signal_frame ( void ) +Int vg_pop_signal_frame ( ThreadId tid ) { - UInt esp; + Addr esp; Int sigNo, i; - VgSigContext* sigctx; - /* esp is now pointing at the magicPI word on the stack, viz, - eight bytes above the bottom of the vg_sigcontext. - */ - esp = VG_(baseBlock)[VGOFF_(m_esp)]; - sigctx = (VgSigContext*)(esp-4); + VgSigFrame* frame; + ThreadState* tst; + + tst = VG_(get_thread_state)(tid); - vg_assert(sigctx->magicPI == 0x31415927); - vg_assert(sigctx->magicE == 0x27182818); + /* esp is now pointing at the sigNo field in the signal frame. */ + esp = tst->m_esp; + frame = (VgSigFrame*)(esp-4); + + vg_assert(frame->magicPI == 0x31415927); + vg_assert(frame->magicE == 0x27182818); if (VG_(clo_trace_signals)) VG_(message)(Vg_DebugMsg, "vg_pop_signal_frame: valid magic"); /* restore machine state */ for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++) - VG_(baseBlock)[VGOFF_(m_fpustate) + i] = sigctx->fpustate[i]; + tst->m_fpu[i] = frame->fpustate[i]; - /* Mark the sigctx structure as nonaccessible. Has to happen - _before_ vg_m_state.m_esp is given a new value.*/ - if (VG_(clo_instrument)) - VGM_(handle_esp_assignment) ( sigctx->esp ); + /* Mark the frame structure as nonaccessible. Has to happen + _before_ vg_m_state.m_esp is given a new value. + handle_esp_assignment reads %ESP from baseBlock, so we park it + there first. Re-place the junk there afterwards. */ + if (VG_(clo_instrument)) { + vg_assert(VG_(baseBlock)[VGOFF_(m_esp)] == 0xDEADBEEF); + VG_(baseBlock)[VGOFF_(m_esp)] = tst->m_esp; + VGM_(handle_esp_assignment) ( frame->esp ); + VG_(baseBlock)[VGOFF_(m_esp)] = 0xDEADBEEF; + } /* Restore machine state from the saved context. */ - VG_(baseBlock)[VGOFF_(m_eax)] = sigctx->eax; - VG_(baseBlock)[VGOFF_(m_ecx)] = sigctx->ecx; - VG_(baseBlock)[VGOFF_(m_edx)] = sigctx->edx; - VG_(baseBlock)[VGOFF_(m_ebx)] = sigctx->ebx; - VG_(baseBlock)[VGOFF_(m_ebp)] = sigctx->ebp; - VG_(baseBlock)[VGOFF_(m_esp)] = sigctx->esp; - VG_(baseBlock)[VGOFF_(m_esi)] = sigctx->esi; - VG_(baseBlock)[VGOFF_(m_edi)] = sigctx->edi; - VG_(baseBlock)[VGOFF_(m_eflags)] = sigctx->eflags; - VG_(baseBlock)[VGOFF_(m_eip)] = sigctx->eip; - sigNo = sigctx->sigNo; + tst->m_eax = frame->eax; + tst->m_ecx = frame->ecx; + tst->m_edx = frame->edx; + tst->m_ebx = frame->ebx; + tst->m_ebp = frame->ebp; + tst->m_esp = frame->esp; + tst->m_esi = frame->esi; + tst->m_edi = frame->edi; + tst->m_eflags = frame->eflags; + tst->m_eip = frame->eip; + sigNo = frame->sigNo; + + /* And restore the thread's status to what it was before the signal + was delivered. */ + tst->status = frame->status; + return sigNo; } @@ -207,18 +259,17 @@ Int vg_pop_signal_frame ( void ) VgSigContext and continue with whatever was going on before the handler ran. */ -void VG_(signal_returns) ( void ) +void VG_(signal_returns) ( ThreadId tid ) { - Int sigNo, ret; - vki_ksigset_t block_procmask; + Int sigNo; vki_ksigset_t saved_procmask; /* Block host signals ... */ - VG_(ksigfillset)(&block_procmask); - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask); - vg_assert(ret == 0); + VG_(block_all_host_signals)( &saved_procmask ); - sigNo = vg_pop_signal_frame(); + /* Pop the signal frame and restore tid's status to what it was + before the signal was delivered. */ + sigNo = vg_pop_signal_frame(tid); /* You would have thought that the following assertion made sense here: @@ -242,40 +293,18 @@ void VG_(signal_returns) ( void ) VG_(sigpending)[sigNo] = VG_SIGIDLE; /* Unlock and return. */ - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL); - vg_assert(ret == 0); + VG_(restore_host_signals)( &saved_procmask ); - /* The main dispatch loop now continues at vg_m_eip. */ -} - - -/* Restore the default host behaviour of SIGABRT, and unblock it, - so we can exit the simulator cleanly by doing exit/abort/assert fail. -*/ -void VG_(restore_SIGABRT) ( void ) -{ - vki_ksigset_t set; - vki_ksigaction act; - act.ksa_flags = VKI_SA_RESTART; - act.ksa_handler = VKI_SIG_DFL; - VG_(ksigemptyset)(&act.ksa_mask); - - VG_(ksigemptyset)(&set); - VG_(ksigaddset)(&set,VKI_SIGABRT); - - /* If this doesn't work, tough. Don't check return code. */ - VG_(ksigaction)(VKI_SIGABRT, &act, NULL); - VG_(ksigprocmask)(VKI_SIG_UNBLOCK, &set, NULL); + /* Scheduler now can resume this thread, or perhaps some other. */ } /* Deliver all pending signals, by building stack frames for their handlers. */ -void VG_(deliver_signals) ( void ) +void VG_(deliver_signals) ( ThreadId tid ) { - vki_ksigset_t block_procmask; vki_ksigset_t saved_procmask; - Int ret, sigNo; + Int sigNo; Bool found; /* A cheap check. We don't need to have exclusive access @@ -295,10 +324,9 @@ void VG_(deliver_signals) ( void ) blocking all the host's signals. That means vg_oursignalhandler can't run whilst we are messing with stuff. */ - VG_(ksigfillset)(&block_procmask); - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask); - vg_assert(ret == 0); + VG_(block_all_host_signals)( &saved_procmask ); + /* Look for signals to deliver ... */ for (sigNo = 1; sigNo < VKI_KNSIG; sigNo++) { if (VG_(sigpending)[sigNo] == VG_SIGIDLE || VG_(sigpending)[sigNo] == VG_SIGRUNNING) continue; @@ -310,94 +338,19 @@ void VG_(deliver_signals) ( void ) %EIP so that when execution continues, we will enter the signal handler with the frame on top of the client's stack, as it expects. */ - vg_push_signal_frame ( sigNo ); - + vg_push_signal_frame ( tid, sigNo ); + VG_(get_thread_state)(tid)->status = VgTs_Runnable; + /* Signify that the signal has been delivered. */ VG_(sigpending)[sigNo] = VG_SIGRUNNING; } /* Unlock and return. */ - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL); - vg_assert(ret == 0); + VG_(restore_host_signals)( &saved_procmask ); return; } -/* ----------- HACK ALERT ----------- */ -/* Note carefully that this runs with all host signals disabled! */ -static -void vg_deliver_signal_immediately ( Int sigNo ) -{ - Int n_bbs_done; - Int sigNo2; - Addr next_orig_addr; - Addr next_trans_addr; - - if (VG_(clo_verbosity) > 0 - && (True || VG_(clo_trace_signals))) - VG_(message)(Vg_DebugExtraMsg, - "deliver signal %d immediately: BEGIN", sigNo ); - /* VG_(printf)("resumption addr is %p\n", - VG_(baseBlock)[VGOFF_(m_eip)]); */ - - vg_push_signal_frame ( sigNo ); - n_bbs_done = 0; - - /* Single-step the client (ie, run the handler) until it jumps to - VG_(signalreturn_bogusRA) */ - - while (True) { - - if (n_bbs_done >= VG_MAX_BBS_IN_IMMEDIATE_SIGNAL) - VG_(unimplemented)( - "handling signal whilst client blocked in syscall: " - "handler runs too long" - ); - - next_orig_addr = VG_(baseBlock)[VGOFF_(m_eip)]; - - if (next_orig_addr == (Addr)(&VG_(trap_here))) - VG_(unimplemented)( - "handling signal whilst client blocked in syscall: " - "handler calls malloc (et al)" - ); - - /* VG_(printf)("next orig addr = %p\n", next_orig_addr); */ - if (next_orig_addr == (Addr)(&VG_(signalreturn_bogusRA))) - break; - - next_trans_addr = VG_(search_transtab) ( next_orig_addr ); - if (next_trans_addr == (Addr)NULL) { - VG_(create_translation_for) ( next_orig_addr ); - next_trans_addr = VG_(search_transtab) ( next_orig_addr ); - } - - vg_assert(next_trans_addr != (Addr)NULL); - next_orig_addr = VG_(run_singleton_translation)(next_trans_addr); - VG_(baseBlock)[VGOFF_(m_eip)] = next_orig_addr; - n_bbs_done++; - } - - sigNo2 = vg_pop_signal_frame(); - vg_assert(sigNo2 == sigNo); - - if (VG_(clo_verbosity) > 0 - && (True || VG_(clo_trace_signals))) - VG_(message)(Vg_DebugExtraMsg, - "deliver signal %d immediately: END, %d bbs done", - sigNo, n_bbs_done ); - - /* Invalidate the tt_fast cache. We've been (potentially) adding - translations and even possibly doing LRUs without keeping it up - to date, so we'd better nuke it before going any further, to - avoid inconsistencies with the main TT/TC structure. */ - VG_(invalidate_tt_fast)(); -} - - -/* ----------- end of HACK ALERT ----------- */ - - /* Receive a signal from the host, and either discard it or park it in the queue of pending signals. All other signals will be blocked when this handler runs. Runs with all host signals blocked, so as @@ -405,8 +358,7 @@ void vg_deliver_signal_immediately ( Int sigNo ) static void VG_(oursignalhandler) ( Int sigNo ) { - Int ret; - vki_ksigset_t block_procmask; + Int dummy_local; vki_ksigset_t saved_procmask; if (VG_(clo_trace_signals)) { @@ -418,20 +370,24 @@ static void VG_(oursignalhandler) ( Int sigNo ) /* Sanity check. Ensure we're really running on the signal stack we asked for. */ if ( !( - ((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&ret)) + ((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&dummy_local)) && - ((Char*)(&ret) < (Char*)(&(VG_(sigstack)[10000]))) + ((Char*)(&dummy_local) < (Char*)(&(VG_(sigstack)[10000]))) ) ) { - VG_(message)(Vg_DebugMsg, "FATAL: signal delivered on the wrong stack?!"); - VG_(message)(Vg_DebugMsg, "A possible workaround follows. Please tell me"); - VG_(message)(Vg_DebugMsg, "(jseward@acm.org) if the suggested workaround doesn't help."); + VG_(message)(Vg_DebugMsg, + "FATAL: signal delivered on the wrong stack?!"); + VG_(message)(Vg_DebugMsg, + "A possible workaround follows. Please tell me"); + VG_(message)(Vg_DebugMsg, + "(jseward@acm.org) if the suggested workaround doesn't help."); VG_(unimplemented) - ("support for progs compiled with -p/-pg; rebuild your prog without -p/-pg"); + ("support for progs compiled with -p/-pg; " + "rebuild your prog without -p/-pg"); } - vg_assert((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&ret)); - vg_assert((Char*)(&ret) < (Char*)(&(VG_(sigstack)[10000]))); + vg_assert((Char*)(&(VG_(sigstack)[0])) <= (Char*)(&dummy_local)); + vg_assert((Char*)(&dummy_local) < (Char*)(&(VG_(sigstack)[10000]))); if (sigNo == VKI_SIGABRT && VG_(sighandler)[sigNo] == NULL) { /* We get here if SIGABRT is delivered and the client hasn't @@ -442,21 +398,19 @@ static void VG_(oursignalhandler) ( Int sigNo ) VG_(end_msg)(); } VG_(ksignal)(VKI_SIGABRT, VKI_SIG_DFL); - VG_(interrupt_reason) = VG_Y_EXIT; VG_(longjmpd_on_signal) = VKI_SIGABRT; - __builtin_longjmp(VG_(toploop_jmpbuf),1); + __builtin_longjmp(VG_(scheduler_jmpbuf),1); } - /* Block all host signals. */ - VG_(ksigfillset)(&block_procmask); - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask); - vg_assert(ret == 0); + VG_(block_all_host_signals)( &saved_procmask ); if (VG_(sighandler)[sigNo] == NULL) { if (VG_(clo_trace_signals)) { VG_(add_to_msg)("unexpected!"); VG_(end_msg)(); } + /* Note: we panic with all signals blocked here. Don't think + that matters. */ VG_(panic)("vg_oursignalhandler: unexpected signal"); } @@ -478,47 +432,26 @@ static void VG_(oursignalhandler) ( Int sigNo ) } } else { - /* Ok, we'd better deliver it to the client, one way or another. */ + /* Ok, we'd better deliver it to the client. */ vg_assert(VG_(sigpending)[sigNo] == VG_SIGIDLE); - - if (VG_(syscall_depth) == 0) { - /* The usual case; delivering a signal to the client, and the - client is not currently in a syscall. Queue it up for - delivery at some point in the future. */ - VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo]; - if (VG_(clo_trace_signals)) { - VG_(add_to_msg)("queued" ); - VG_(end_msg)(); - } - } else { - /* The nasty case, which was causing kmail to freeze up: the - client is (presumably blocked) in a syscall. We have to - deliver the signal right now, because it may be that - running the sighandler is the only way that the syscall - will be able to return. In which case, if we don't do - that, the client will deadlock. */ - if (VG_(clo_trace_signals)) { - VG_(add_to_msg)("delivering immediately" ); - VG_(end_msg)(); - } - /* Note that this runs with all host signals blocked. */ - VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo]; - vg_deliver_signal_immediately(sigNo); - VG_(sigpending)[sigNo] = VG_SIGIDLE; - /* VG_(printf)("resuming at %p\n", VG_(baseBlock)[VGOFF_(m_eip)]); */ + /* Queue it up for delivery at some point in the future. */ + VG_(sigpending)[sigNo] = VG_(sighandler)[sigNo]; + if (VG_(clo_trace_signals)) { + VG_(add_to_msg)("queued" ); + VG_(end_msg)(); } } - /* We've finished messing with the queue, so re-enable host signals. */ - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL); + /* We've finished messing with the queue, so re-enable host + signals. */ + VG_(restore_host_signals)( &saved_procmask ); - vg_assert(ret == 0); if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS || sigNo == VKI_SIGFPE || sigNo == VKI_SIGILL) { - /* Can't continue; must longjmp and thus enter the sighandler - immediately. */ + /* Can't continue; must longjmp back to the scheduler and thus + enter the sighandler immediately. */ VG_(longjmpd_on_signal) = sigNo; - __builtin_longjmp(VG_(toploop_jmpbuf),1); + __builtin_longjmp(VG_(scheduler_jmpbuf),1); } } @@ -559,17 +492,14 @@ void VG_(sigstartup_actions) ( void ) { Int i, ret; - vki_ksigset_t block_procmask; vki_ksigset_t saved_procmask; vki_kstack_t altstack_info; vki_ksigaction sa; - /* VG_(printf)("SIGSTARTUP\n"); */ + /* VG_(printf)("SIGSTARTUP\n"); */ /* Block all signals. saved_procmask remembers the previous mask. */ - VG_(ksigfillset)(&block_procmask); - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask); - vg_assert(ret == 0); + VG_(block_all_host_signals)( &saved_procmask ); /* Register an alternative stack for our own signal handler to run on. */ @@ -615,8 +545,7 @@ void VG_(sigstartup_actions) ( void ) VG_(ksignal)(VKI_SIGABRT, &VG_(oursignalhandler)); /* Finally, restore the blocking mask. */ - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL); - vg_assert(ret == 0); + VG_(restore_host_signals)( &saved_procmask ); } @@ -635,14 +564,10 @@ void VG_(sigshutdown_actions) ( void ) { Int i, ret; - vki_ksigset_t block_procmask; vki_ksigset_t saved_procmask; vki_ksigaction sa; - /* Block all signals. */ - VG_(ksigfillset)(&block_procmask); - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &block_procmask, &saved_procmask); - vg_assert(ret == 0); + VG_(block_all_host_signals)( &saved_procmask ); /* copy the sim signal actions to the real ones. */ for (i = 1; i < VKI_KNSIG; i++) { @@ -654,9 +579,7 @@ void VG_(sigshutdown_actions) ( void ) ret = VG_(ksigaction)(i, &sa, NULL); } - /* Finally, copy the simulated process mask to the real one. */ - ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, &saved_procmask, NULL); - vg_assert(ret == 0); + VG_(restore_host_signals)( &saved_procmask ); } @@ -665,18 +588,16 @@ void VG_(sigshutdown_actions) ( void ) ------------------------------------------------------------------ */ /* Do more error checking? */ -void VG_(do__NR_sigaction) ( void ) +void VG_(do__NR_sigaction) ( ThreadId tid ) { UInt res; void* our_old_handler; vki_ksigaction* new_action; vki_ksigaction* old_action; - UInt param1 - = VG_(baseBlock)[VGOFF_(m_ebx)]; /* int sigNo */ - UInt param2 - = VG_(baseBlock)[VGOFF_(m_ecx)]; /* k_sigaction* new_action */ - UInt param3 - = VG_(baseBlock)[VGOFF_(m_edx)]; /* k_sigaction* old_action */ + ThreadState* tst = VG_(get_thread_state)( tid ); + UInt param1 = tst->m_ebx; /* int sigNo */ + UInt param2 = tst->m_ecx; /* k_sigaction* new_action */ + UInt param3 = tst->m_edx; /* k_sigaction* old_action */ new_action = (vki_ksigaction*)param2; old_action = (vki_ksigaction*)param3; @@ -722,7 +643,7 @@ void VG_(do__NR_sigaction) ( void ) } } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); /* VG_(printf)("RES = %d\n", res); */ /* If the client asks for the old handler, maintain our fiction @@ -750,7 +671,7 @@ void VG_(do__NR_sigaction) ( void ) goto good; good: - VG_(baseBlock)[VGOFF_(m_eax)] = (UInt)0; + tst->m_eax = (UInt)0; return; bad_signo: diff --git a/vg_syscall.S b/vg_syscall.S index 210328a690..c07d95567c 100644 --- a/vg_syscall.S +++ b/vg_syscall.S @@ -41,10 +41,6 @@ # m_state_static, and back afterwards. VG_(do_syscall): - cmpl $2, VG_(syscall_depth) - jz do_syscall_DEPTH_2 - - # depth 1 copy follows ... # Save all the int registers of the real machines state on the # simulators stack. pushal @@ -104,76 +100,6 @@ VG_(do_syscall): ret - - - - - - - -do_syscall_DEPTH_2: - - # depth 2 copy follows ... - # Save all the int registers of the real machines state on the - # simulators stack. - pushal - - # and save the real FPU state too - fwait - fnsave VG_(real_fpu_state_saved_over_syscall_d2) - frstor VG_(real_fpu_state_saved_over_syscall_d2) - - # remember what the simulators stack pointer is - movl %esp, VG_(esp_saved_over_syscall_d2) - - # Now copy the simulated machines state into the real one - # esp still refers to the simulators stack - frstor VG_(m_state_static)+40 - movl VG_(m_state_static)+32, %eax - pushl %eax - popfl - movl VG_(m_state_static)+0, %eax - movl VG_(m_state_static)+4, %ecx - movl VG_(m_state_static)+8, %edx - movl VG_(m_state_static)+12, %ebx - movl VG_(m_state_static)+16, %esp - movl VG_(m_state_static)+20, %ebp - movl VG_(m_state_static)+24, %esi - movl VG_(m_state_static)+28, %edi - - # esp now refers to the simulatees stack - # Do the actual system call - int $0x80 - - # restore stack as soon as possible - # esp refers to simulatees stack - movl %esp, VG_(m_state_static)+16 - movl VG_(esp_saved_over_syscall_d2), %esp - # esp refers to simulators stack - - # ... and undo everything else. - # Copy real state back to simulated state. - movl %eax, VG_(m_state_static)+0 - movl %ecx, VG_(m_state_static)+4 - movl %edx, VG_(m_state_static)+8 - movl %ebx, VG_(m_state_static)+12 - movl %ebp, VG_(m_state_static)+20 - movl %esi, VG_(m_state_static)+24 - movl %edi, VG_(m_state_static)+28 - pushfl - popl %eax - movl %eax, VG_(m_state_static)+32 - fwait - fnsave VG_(m_state_static)+40 - frstor VG_(m_state_static)+40 - - # Restore the state of the simulator - frstor VG_(real_fpu_state_saved_over_syscall_d2) - popal - - ret - - ##--------------------------------------------------------------------## ##--- end vg_syscall.S ---## ##--------------------------------------------------------------------## diff --git a/vg_syscall_mem.c b/vg_syscall_mem.c index d78141f1b7..7578a7bbf0 100644 --- a/vg_syscall_mem.c +++ b/vg_syscall_mem.c @@ -272,31 +272,24 @@ void msghdr_foreachfield ( struct msghdr *msg, Addr VGM_(curr_dataseg_end); + /* The Main Entertainment ... */ -void VG_(wrap_syscall) ( void ) +void VG_(perform_assumed_nonblocking_syscall) ( ThreadId tid ) { - Bool sane_before_call = True; - Bool sane_after_call = True; - - UInt syscallno = VG_(baseBlock)[VGOFF_(m_eax)]; - UInt arg1 = VG_(baseBlock)[VGOFF_(m_ebx)]; - UInt arg2 = VG_(baseBlock)[VGOFF_(m_ecx)]; - UInt arg3 = VG_(baseBlock)[VGOFF_(m_edx)]; - UInt arg4 = VG_(baseBlock)[VGOFF_(m_esi)]; - UInt arg5 = VG_(baseBlock)[VGOFF_(m_edi)]; + Bool sane_before_call = True; + Bool sane_after_call = True; + ThreadState* tst = VG_(get_thread_state)( tid ); + UInt syscallno = tst->m_eax; + UInt arg1 = tst->m_ebx; + UInt arg2 = tst->m_ecx; + UInt arg3 = tst->m_edx; + UInt arg4 = tst->m_esi; + UInt arg5 = tst->m_edi; /* Do not make this unsigned! */ Int res; - /* Keep track of nested syscalls, and do some sanity checks. */ - Int syscall_depth_saved = VG_(syscall_depth); - if (VG_(syscall_depth) > 1) - VG_(unimplemented) - ("recursion between blocked syscalls and signal handlers"); - vg_assert( VG_(syscall_depth) == 0 || VG_(syscall_depth) == 1 ); - VG_(syscall_depth) ++; - VGP_PUSHCC(VgpSyscall); /* Since buggy syscall wrappers sometimes break this, we may as well @@ -322,8 +315,8 @@ void VG_(wrap_syscall) ( void ) */ if (VG_(clo_trace_syscalls)) - VG_(printf)("SYSCALL[%d, %d](%3d): ", - VG_(syscall_depth), VG_(getpid)(), syscallno); + VG_(printf)("SYSCALL[%d,%d](%3d): ", + VG_(getpid)(), tid, syscallno); switch (syscallno) { @@ -350,7 +343,7 @@ void VG_(wrap_syscall) ( void ) /* int nice(int inc); */ if (VG_(clo_trace_syscalls)) VG_(printf)("nice ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; /* !!!!!!!!!! New, untested syscalls, 14 Mar 02 !!!!!!!!!! */ @@ -360,7 +353,7 @@ void VG_(wrap_syscall) ( void ) /* int setresgid(gid_t rgid, gid_t egid, gid_t sgid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setresgid32 ( %d, %d, %d )\n", arg1, arg2, arg3); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -369,7 +362,7 @@ void VG_(wrap_syscall) ( void ) /* int setfsuid(uid_t fsuid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setfsuid ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -380,7 +373,7 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("_sysctl ( %p )\n", arg1 ); must_be_writable ( "_sysctl(args)", arg1, sizeof(struct __sysctl_args) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) make_readable ( arg1, sizeof(struct __sysctl_args) ); break; @@ -391,7 +384,7 @@ void VG_(wrap_syscall) ( void ) /* int sched_getscheduler(pid_t pid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("sched_getscheduler ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -404,7 +397,7 @@ void VG_(wrap_syscall) ( void ) if (arg3 != (UInt)NULL) must_be_readable( "sched_setscheduler(struct sched_param *p)", arg3, sizeof(struct sched_param)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -413,7 +406,7 @@ void VG_(wrap_syscall) ( void ) /* int mlockall(int flags); */ if (VG_(clo_trace_syscalls)) VG_(printf)("mlockall ( %x )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -422,7 +415,7 @@ void VG_(wrap_syscall) ( void ) /* int munlockall(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("munlockall ( )\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -431,7 +424,7 @@ void VG_(wrap_syscall) ( void ) /* int sched_get_priority_max(int policy); */ if (VG_(clo_trace_syscalls)) VG_(printf)("sched_get_priority_max ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -440,7 +433,7 @@ void VG_(wrap_syscall) ( void ) /* int setfsgid(gid_t gid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setfsgid ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -449,7 +442,7 @@ void VG_(wrap_syscall) ( void ) /* int setregid(gid_t rgid, gid_t egid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setregid ( %d, %d )\n", arg1, arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -458,7 +451,7 @@ void VG_(wrap_syscall) ( void ) /* int setresuid(uid_t ruid, uid_t euid, uid_t suid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setresuid ( %d, %d, %d )\n", arg1, arg2, arg3); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -467,7 +460,7 @@ void VG_(wrap_syscall) ( void ) /* int setfsuid(uid_t uid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setfsuid ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -480,7 +473,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("sendfile ( %d, %d, %p, %d )\n",arg1,arg2,arg3,arg4); must_be_writable( "sendfile(offset)", arg3, sizeof(off_t) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) { make_readable( arg3, sizeof( off_t ) ); } @@ -496,7 +489,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("pwrite ( %d, %p, %d, %d )\n", arg1, arg2, arg3, arg4); must_be_readable( "pwrite(buf)", arg2, arg3 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -506,7 +499,7 @@ void VG_(wrap_syscall) ( void ) /* int sync(); */ if (VG_(clo_trace_syscalls)) VG_(printf)("sync ( )\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_fstatfs: /* syscall 100 */ @@ -514,7 +507,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("fstatfs ( %d, %p )\n",arg1,arg2); must_be_writable( "stat(buf)", arg2, sizeof(struct statfs) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) make_readable( arg2, sizeof(struct statfs) ); break; @@ -525,14 +518,14 @@ void VG_(wrap_syscall) ( void ) /* int pause(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("pause ( )\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_getsid: /* syscall 147 */ /* pid_t getsid(pid_t pid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getsid ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_pread) @@ -541,10 +534,10 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("pread ( %d, %p, %d, %d ) ...\n",arg1,arg2,arg3,arg4); must_be_writable( "pread(buf)", arg2, arg3 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (VG_(clo_trace_syscalls)) - VG_(printf)("SYSCALL[%d, %d] pread ( %d, %p, %d, %d ) --> %d\n", - VG_(syscall_depth), VG_(getpid)(), + VG_(printf)("SYSCALL[%d] pread ( %d, %p, %d, %d ) --> %d\n", + VG_(getpid)(), arg1, arg2, arg3, arg4, res); if (!VG_(is_kerror)(res) && res > 0) { make_readable( arg2, res ); @@ -559,14 +552,14 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("mknod ( %p, 0x%x, 0x%x )\n", arg1, arg2, arg3 ); must_be_readable_asciiz( "mknod(pathname)", arg1 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_flock: /* syscall 143 */ /* int flock(int fd, int operation); */ if (VG_(clo_trace_syscalls)) VG_(printf)("flock ( %d, %d )\n", arg1, arg2 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_rt_sigsuspend) @@ -581,7 +574,7 @@ void VG_(wrap_syscall) ( void ) must_be_readable( "sigsuspend(mask)", arg1, sizeof(vki_ksigset_t) ); } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -592,14 +585,14 @@ void VG_(wrap_syscall) ( void ) must_be_readable_asciiz( "init_module(name)", arg1 ); must_be_readable( "init_module(image)", arg2, sizeof(struct module) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_ioperm: /* syscall 101 */ /* int ioperm(unsigned long from, unsigned long num, int turn_on); */ if (VG_(clo_trace_syscalls)) VG_(printf)("ioperm ( %d, %d, %d )\n", arg1, arg2, arg3 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_capget: /* syscall 184 */ @@ -610,7 +603,7 @@ void VG_(wrap_syscall) ( void ) sizeof(vki_cap_user_header_t) ); must_be_writable( "capget(data)", arg2, sizeof( vki_cap_user_data_t) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && arg2 != (Addr)NULL) make_readable ( arg2, sizeof( vki_cap_user_data_t) ); break; @@ -635,7 +628,7 @@ void VG_(wrap_syscall) ( void ) } } } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); /* Should we still be alive here? Don't think so. */ /* Actually, above comment is wrong. execve can fail, just like any other syscall -- typically the file to exec does @@ -651,7 +644,7 @@ void VG_(wrap_syscall) ( void ) "Warning: client exiting by calling exit(%d). Bye!", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); /* Definitely should not be alive here :) */ break; @@ -662,14 +655,14 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("access ( %p, %d )\n", arg1,arg2); must_be_readable_asciiz( "access(pathname)", arg1 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_alarm: /* syscall 27 */ /* unsigned int alarm(unsigned int seconds); */ if (VG_(clo_trace_syscalls)) VG_(printf)("alarm ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_brk: /* syscall 45 */ @@ -677,7 +670,7 @@ void VG_(wrap_syscall) ( void ) /* int brk(void *end_data_segment); */ if (VG_(clo_trace_syscalls)) VG_(printf)("brk ( %p ) --> ",arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (VG_(clo_trace_syscalls)) VG_(printf)("0x%x\n", res); @@ -710,7 +703,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("chdir ( %p )\n", arg1); must_be_readable_asciiz( "chdir(path)", arg1 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_chmod: /* syscall 15 */ @@ -718,7 +711,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("chmod ( %p, %d )\n", arg1,arg2); must_be_readable_asciiz( "chmod(path)", arg1 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_chown32) @@ -732,7 +725,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("chown ( %p, 0x%x, 0x%x )\n", arg1,arg2,arg3); must_be_readable_asciiz( "chown(path)", arg1 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_close: /* syscall 6 */ @@ -750,7 +743,7 @@ void VG_(wrap_syscall) ( void ) " Use --logfile-fd= to select an " "alternative logfile fd." ); } else { - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); } break; @@ -758,7 +751,7 @@ void VG_(wrap_syscall) ( void ) /* int dup(int oldfd); */ if (VG_(clo_trace_syscalls)) VG_(printf)("dup ( %d ) --> ", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (VG_(clo_trace_syscalls)) VG_(printf)("%d\n", res); break; @@ -767,25 +760,25 @@ void VG_(wrap_syscall) ( void ) /* int dup2(int oldfd, int newfd); */ if (VG_(clo_trace_syscalls)) VG_(printf)("dup2 ( %d, %d ) ...\n", arg1,arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (VG_(clo_trace_syscalls)) - VG_(printf)("SYSCALL[%d, %d] dup2 ( %d, %d ) = %d\n", - VG_(syscall_depth), VG_(getpid)(), + VG_(printf)("SYSCALL[%d] dup2 ( %d, %d ) = %d\n", + VG_(getpid)(), arg1, arg2, res); break; case __NR_fcntl: /* syscall 55 */ - /* int fcntl(int fd, int cmd); */ + /* int fcntl(int fd, int cmd, int arg); */ if (VG_(clo_trace_syscalls)) - VG_(printf)("fcntl ( %d, %d )\n",arg1,arg2); - KERNEL_DO_SYSCALL(res); + VG_(printf)("fcntl ( %d, %d, %d )\n",arg1,arg2,arg3); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_fchdir: /* syscall 133 */ /* int fchdir(int fd); */ if (VG_(clo_trace_syscalls)) VG_(printf)("fchdir ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_fchown32) @@ -795,14 +788,14 @@ void VG_(wrap_syscall) ( void ) /* int fchown(int filedes, uid_t owner, gid_t group); */ if (VG_(clo_trace_syscalls)) VG_(printf)("fchown ( %d, %d, %d )\n", arg1,arg2,arg3); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_fchmod: /* syscall 94 */ /* int fchmod(int fildes, mode_t mode); */ if (VG_(clo_trace_syscalls)) VG_(printf)("fchmod ( %d, %d )\n", arg1,arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_fcntl64) @@ -811,7 +804,7 @@ void VG_(wrap_syscall) ( void ) /* ??? int fcntl(int fd, int cmd); */ if (VG_(clo_trace_syscalls)) VG_(printf)("fcntl64 (?!) ( %d, %d )\n", arg1,arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -820,7 +813,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("fstat ( %d, %p )\n",arg1,arg2); must_be_writable( "fstat", arg2, sizeof(struct stat) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) make_readable( arg2, sizeof(struct stat) ); break; @@ -832,27 +825,27 @@ void VG_(wrap_syscall) ( void ) /* KLUDGE: we prefer to do a fork rather than vfork. vfork gives a SIGSEGV, and the stated semantics looks pretty much impossible for us. */ - VG_(baseBlock)[VGOFF_(m_eax)] = __NR_fork; + tst->m_eax = __NR_fork; /* fall through ... */ case __NR_fork: /* syscall 2 */ /* pid_t fork(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("fork ()\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_fsync: /* syscall 118 */ /* int fsync(int fd); */ if (VG_(clo_trace_syscalls)) VG_(printf)("fsync ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_ftruncate: /* syscall 93 */ /* int ftruncate(int fd, size_t length); */ if (VG_(clo_trace_syscalls)) VG_(printf)("ftruncate ( %d, %d )\n", arg1,arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_ftruncate64) @@ -861,7 +854,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("ftruncate64 ( %d, %lld )\n", arg1,arg2|((long long) arg3 << 32)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -871,7 +864,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("getdents ( %d, %p, %d )\n",arg1,arg2,arg3); must_be_writable( "getdents(dirp)", arg2, arg3 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res > 0) make_readable( arg2, res ); break; @@ -883,7 +876,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("getdents64 ( %d, %p, %d )\n",arg1,arg2,arg3); must_be_writable( "getdents64(dirp)", arg2, arg3 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res > 0) make_readable( arg2, res ); break; @@ -899,7 +892,7 @@ void VG_(wrap_syscall) ( void ) if (arg1 > 0) must_be_writable ( "getgroups(list)", arg2, arg1 * sizeof(gid_t) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (arg1 > 0 && !VG_(is_kerror)(res) && res > 0) make_readable ( arg2, res * sizeof(gid_t) ); break; @@ -909,7 +902,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("getcwd ( %p, %d )\n",arg1,arg2); must_be_writable( "getcwd(buf)", arg1, arg2 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res != (Addr)NULL) make_readable ( arg1, arg2 ); /* Not really right -- really we should have the asciiz @@ -921,7 +914,7 @@ void VG_(wrap_syscall) ( void ) /* uid_t geteuid(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("geteuid ( )\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_geteuid32) @@ -929,7 +922,7 @@ void VG_(wrap_syscall) ( void ) /* ?? uid_t geteuid32(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("geteuid32(?) ( )\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -937,7 +930,7 @@ void VG_(wrap_syscall) ( void ) /* gid_t getegid(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getegid ()\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_getegid32) @@ -945,7 +938,7 @@ void VG_(wrap_syscall) ( void ) /* gid_t getegid32(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getegid32 ()\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -953,7 +946,7 @@ void VG_(wrap_syscall) ( void ) /* gid_t getgid(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getgid ()\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_getgid32) @@ -961,7 +954,7 @@ void VG_(wrap_syscall) ( void ) /* gid_t getgid32(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getgid32 ()\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -969,28 +962,28 @@ void VG_(wrap_syscall) ( void ) /* pid_t getpid(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getpid ()\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_getpgid: /* syscall 132 */ /* pid_t getpgid(pid_t pid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getpgid ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_getpgrp: /* syscall 65 */ /* pid_t getpprp(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getpgrp ()\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_getppid: /* syscall 64 */ /* pid_t getppid(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getppid ()\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_getresgid: /* syscall 171 */ @@ -1000,7 +993,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable ( "getresgid(rgid)", arg1, sizeof(gid_t) ); must_be_writable ( "getresgid(egid)", arg2, sizeof(gid_t) ); must_be_writable ( "getresgid(sgid)", arg3, sizeof(gid_t) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) { make_readable ( arg1, sizeof(gid_t) ); make_readable ( arg2, sizeof(gid_t) ); @@ -1016,7 +1009,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable ( "getresgid32(rgid)", arg1, sizeof(gid_t) ); must_be_writable ( "getresgid32(egid)", arg2, sizeof(gid_t) ); must_be_writable ( "getresgid32(sgid)", arg3, sizeof(gid_t) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) { make_readable ( arg1, sizeof(gid_t) ); make_readable ( arg2, sizeof(gid_t) ); @@ -1032,7 +1025,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable ( "getresuid(ruid)", arg1, sizeof(uid_t) ); must_be_writable ( "getresuid(euid)", arg2, sizeof(uid_t) ); must_be_writable ( "getresuid(suid)", arg3, sizeof(uid_t) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) { make_readable ( arg1, sizeof(uid_t) ); make_readable ( arg2, sizeof(uid_t) ); @@ -1048,7 +1041,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable ( "getresuid32(ruid)", arg1, sizeof(uid_t) ); must_be_writable ( "getresuid32(euid)", arg2, sizeof(uid_t) ); must_be_writable ( "getresuid32(suid)", arg3, sizeof(uid_t) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) { make_readable ( arg1, sizeof(uid_t) ); make_readable ( arg2, sizeof(uid_t) ); @@ -1065,7 +1058,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("getrlimit ( %d, %p )\n", arg1,arg2); must_be_writable( "getrlimit(rlim)", arg2, sizeof(struct rlimit) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable( arg2, sizeof(struct rlimit) ); break; @@ -1075,7 +1068,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("getrusage ( %d, %p )\n", arg1,arg2); must_be_writable( "getrusage(usage)", arg2, sizeof(struct rusage) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable(arg2, sizeof(struct rusage) ); break; @@ -1088,7 +1081,7 @@ void VG_(wrap_syscall) ( void ) if (arg2 != 0) must_be_writable( "gettimeofday(tz)", arg2, sizeof(struct timezone) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) { make_readable( arg1, sizeof(struct timeval) ); if (arg2 != 0) @@ -1100,7 +1093,7 @@ void VG_(wrap_syscall) ( void ) /* uid_t getuid(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getuid ( )\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_getuid32) @@ -1108,7 +1101,7 @@ void VG_(wrap_syscall) ( void ) /* ???uid_t getuid32(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("getuid32 ( )\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -1116,7 +1109,7 @@ void VG_(wrap_syscall) ( void ) /* int ipc ( unsigned int call, int first, int second, int third, void *ptr, long fifth); */ { - UInt arg6 = VG_(baseBlock)[VGOFF_(m_ebp)]; + UInt arg6 = tst->m_ebp; if (VG_(clo_trace_syscalls)) VG_(printf)("ipc ( %d, %d, %d, %d, %p, %d )\n", @@ -1125,11 +1118,11 @@ void VG_(wrap_syscall) ( void ) case 1: /* IPCOP_semop */ must_be_readable ( "semop(sops)", arg5, arg3 * sizeof(struct sembuf) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case 2: /* IPCOP_semget */ case 3: /* IPCOP_semctl */ - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case 11: /* IPCOP_msgsnd */ { @@ -1141,7 +1134,7 @@ void VG_(wrap_syscall) ( void ) must_be_readable ( "msgsnd(msgp->mtext)", (UInt)msgp->mtext, msgsz ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; } case 12: /* IPCOP_msgrcv */ @@ -1154,7 +1147,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable ( "msgsnd(msgp->mtext)", (UInt)msgp->mtext, msgsz ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if ( !VG_(is_kerror)(res) && res > 0 ) { make_readable ( (UInt)&msgp->mtype, sizeof(msgp->mtype) ); @@ -1163,7 +1156,7 @@ void VG_(wrap_syscall) ( void ) break; } case 13: /* IPCOP_msgget */ - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case 14: /* IPCOP_msgctl */ { @@ -1171,7 +1164,7 @@ void VG_(wrap_syscall) ( void ) case IPC_STAT: must_be_writable ( "msgctl(buf)", arg5, sizeof(struct msqid_ds) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if ( !VG_(is_kerror)(res) && res > 0 ) { make_readable ( arg5, sizeof(struct msqid_ds) ); } @@ -1179,12 +1172,12 @@ void VG_(wrap_syscall) ( void ) case IPC_SET: must_be_readable ( "msgctl(buf)", arg5, sizeof(struct msqid_ds) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case IPC_STAT|IPC_64: must_be_writable ( "msgctl(buf)", arg5, sizeof(struct msqid64_ds) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if ( !VG_(is_kerror)(res) && res > 0 ) { make_readable ( arg5, sizeof(struct msqid64_ds) ); } @@ -1192,10 +1185,10 @@ void VG_(wrap_syscall) ( void ) case IPC_SET|IPC_64: must_be_readable ( "msgctl(buf)", arg5, sizeof(struct msqid64_ds) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; default: - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; } break; @@ -1206,7 +1199,7 @@ void VG_(wrap_syscall) ( void ) Int shmflag = arg3; UInt addr; - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if ( VG_(is_kerror) ( res ) ) break; @@ -1229,7 +1222,7 @@ void VG_(wrap_syscall) ( void ) break; } case 22: /* IPCOP_shmdt */ - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); /* ### FIXME: this should call make_noaccess on the * area passed to shmdt. But there's no way to * figure out the size of the shared memory segment @@ -1237,7 +1230,7 @@ void VG_(wrap_syscall) ( void ) * copy of the exiting mappings inside valgrind? */ break; case 23: /* IPCOP_shmget */ - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case 24: /* IPCOP_shmctl */ { @@ -1250,7 +1243,7 @@ void VG_(wrap_syscall) ( void ) sizeof( struct shmid_ds ) ); } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; } default: @@ -1282,24 +1275,24 @@ void VG_(wrap_syscall) ( void ) case TCSETSF: must_be_readable( "ioctl(TCSETSW)", arg3, VKI_SIZEOF_STRUCT_TERMIOS ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case TCGETS: must_be_writable( "ioctl(TCGETS)", arg3, VKI_SIZEOF_STRUCT_TERMIOS ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable ( arg3, VKI_SIZEOF_STRUCT_TERMIOS ); break; case TCSETA: must_be_readable( "ioctl(TCSETA)", arg3, VKI_SIZEOF_STRUCT_TERMIO ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case TCGETA: must_be_writable( "ioctl(TCGETA)", arg3, VKI_SIZEOF_STRUCT_TERMIO ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable ( arg3, VKI_SIZEOF_STRUCT_TERMIO ); break; @@ -1307,52 +1300,52 @@ void VG_(wrap_syscall) ( void ) case TCSBRKP: case TCFLSH: /* These just take an int by value */ - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case TIOCGWINSZ: must_be_writable( "ioctl(TIOCGWINSZ)", arg3, sizeof(struct winsize) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable ( arg3, sizeof(struct winsize) ); break; case TIOCSWINSZ: must_be_readable( "ioctl(TIOCSWINSZ)", arg3, sizeof(struct winsize) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case TIOCGPGRP: /* Get process group ID for foreground processing group. */ must_be_writable( "ioctl(TIOCGPGRP)", arg3, sizeof(pid_t) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable ( arg3, sizeof(pid_t) ); case TIOCGPTN: /* Get Pty Number (of pty-mux device) */ must_be_writable("ioctl(TIOCGPTN)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable ( arg3, sizeof(int)); break; case TIOCSCTTY: /* Just takes an int value. */ - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case TIOCSPTLCK: /* Lock/unlock Pty */ must_be_readable( "ioctl(TIOCSPTLCK)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case FIONBIO: must_be_readable( "ioctl(FIONBIO)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case FIOASYNC: must_be_readable( "ioctl(FIOASYNC)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case FIONREAD: must_be_writable( "ioctl(FIONREAD)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable( arg3, sizeof(int) ); break; @@ -1363,13 +1356,13 @@ void VG_(wrap_syscall) ( void ) # if 1 case SG_SET_COMMAND_Q: must_be_readable( "ioctl(SG_SET_COMMAND_Q)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(SG_IO) case SG_IO: must_be_writable( "ioctl(SG_IO)", arg3, sizeof(struct sg_io_hdr) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable (arg3, sizeof(struct sg_io_hdr)); break; @@ -1378,36 +1371,36 @@ void VG_(wrap_syscall) ( void ) /* Note: sometimes sg_scsi_id is called sg_scsi_id_t */ must_be_writable( "ioctl(SG_GET_SCSI_ID)", arg3, sizeof(struct sg_scsi_id) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable (arg3, sizeof(struct sg_scsi_id)); break; case SG_SET_RESERVED_SIZE: must_be_readable( "ioctl(SG_SET_RESERVED_SIZE)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SG_SET_TIMEOUT: must_be_readable( "ioctl(SG_SET_TIMEOUT)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SG_GET_RESERVED_SIZE: must_be_writable( "ioctl(SG_GET_RESERVED_SIZE)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable (arg3, sizeof(int)); break; case SG_GET_TIMEOUT: must_be_writable( "ioctl(SG_GET_TIMEOUT)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable (arg3, sizeof(int)); break; case SG_GET_VERSION_NUM: must_be_readable( "ioctl(SG_GET_VERSION_NUM)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -1420,7 +1413,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable( "ioctl(IIOCGETCPS)", arg3, ISDN_MAX_CHANNELS * 2 * sizeof(unsigned long) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable ( arg3, ISDN_MAX_CHANNELS * 2 * sizeof(unsigned long) ); @@ -1431,7 +1424,7 @@ void VG_(wrap_syscall) ( void ) sizeof(((isdn_net_ioctl_phone *)arg3)->name) ); must_be_writable( "ioctl(IIOCNETGPN)", arg3, sizeof(isdn_net_ioctl_phone) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable ( arg3, sizeof(isdn_net_ioctl_phone) ); break; @@ -1451,7 +1444,7 @@ void VG_(wrap_syscall) ( void ) case SIOCGIFNAME: /* get iface name */ must_be_writable("ioctl(SIOCGIFINDEX)", arg3, sizeof(struct ifreq)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable (arg3, sizeof(struct ifreq)); break; @@ -1459,7 +1452,7 @@ void VG_(wrap_syscall) ( void ) /* WAS: must_be_writable("ioctl(SIOCGIFCONF)", arg3, sizeof(struct ifconf)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable (arg3, sizeof(struct ifconf)); */ @@ -1472,7 +1465,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable("ioctl(SIOCGIFCONF).ifc_buf", (Addr)(ifc->ifc_buf), (UInt)(ifc->ifc_len) ); } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0 && arg3 ) { struct ifconf *ifc = (struct ifconf *) arg3; make_readable ( (Addr)(ifc->ifc_buf), (UInt)(ifc->ifc_len) ); @@ -1481,7 +1474,7 @@ void VG_(wrap_syscall) ( void ) case SIOCGSTAMP: must_be_writable("ioctl(SIOCGSTAMP)", arg3, sizeof(struct timeval)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable (arg3, sizeof(struct timeval)); break; @@ -1489,7 +1482,7 @@ void VG_(wrap_syscall) ( void ) case SIOCGARP: /* get ARP table entry */ must_be_writable("ioctl(SIOCGARP)", arg3, sizeof(struct arpreq)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable (arg3, sizeof(struct arpreq)); break; @@ -1506,14 +1499,14 @@ void VG_(wrap_syscall) ( void ) case SIOCSIFHWADDR: /* set hardware address */ must_be_readable("ioctl(SIOCSIFFLAGS)", arg3, sizeof(struct ifreq)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; /* Routing table calls. */ case SIOCADDRT: /* add routing table entry */ case SIOCDELRT: /* delete routing table entry */ must_be_readable("ioctl(SIOCADDRT/DELRT)", arg3, sizeof(struct rtentry)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; /* RARP cache control calls. */ @@ -1524,12 +1517,12 @@ void VG_(wrap_syscall) ( void ) case SIOCDARP: /* delete ARP table entry */ must_be_readable("ioctl(SIOCSIFFLAGS)", arg3, sizeof(struct ifreq)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SIOCSPGRP: must_be_readable( "ioctl(SIOCSPGRP)", arg3, sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; /* linux/soundcard interface (OSS) */ @@ -1555,7 +1548,7 @@ void VG_(wrap_syscall) ( void ) case SOUND_PCM_READ_FILTER: must_be_writable("ioctl(SNDCTL_XXX|SOUND_XXX (SIOR, int))", arg3, sizeof(int)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable (arg3, sizeof(int)); break; @@ -1582,21 +1575,21 @@ void VG_(wrap_syscall) ( void ) arg3, sizeof(int)); must_be_writable("ioctl(SNDCTL_XXX|SOUND_XXX (SIOWR, int))", arg3, sizeof(int)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SNDCTL_DSP_GETOSPACE: case SNDCTL_DSP_GETISPACE: must_be_writable("ioctl(SNDCTL_XXX|SOUND_XXX " "(SIOR, audio_buf_info))", arg3, sizeof(audio_buf_info)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable (arg3, sizeof(audio_buf_info)); break; case SNDCTL_DSP_SETTRIGGER: must_be_readable("ioctl(SNDCTL_XXX|SOUND_XXX (SIOW, int))", arg3, sizeof(int)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; /* We don't have any specific information on it, so @@ -1623,7 +1616,7 @@ void VG_(wrap_syscall) ( void ) if ((dir & _IOC_WRITE) && size > 0) must_be_writable("ioctl(generic)", arg3, size); } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (size > 0 && (dir & _IOC_WRITE) && !VG_(is_kerror)(res) && res == 0) make_readable (arg3, size); @@ -1636,7 +1629,7 @@ void VG_(wrap_syscall) ( void ) /* int kill(pid_t pid, int sig); */ if (VG_(clo_trace_syscalls)) VG_(printf)("kill ( %d, %d )\n", arg1,arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_link: /* syscall 9 */ @@ -1645,14 +1638,14 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("link ( %p, %p)\n", arg1, arg2); must_be_readable_asciiz( "link(oldpath)", arg1); must_be_readable_asciiz( "link(newpath)", arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_lseek: /* syscall 19 */ /* off_t lseek(int fildes, off_t offset, int whence); */ if (VG_(clo_trace_syscalls)) VG_(printf)("lseek ( %d, %d, %d )\n",arg1,arg2,arg3); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR__llseek: /* syscall 140 */ @@ -1663,7 +1656,7 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("llseek ( %d, 0x%x, 0x%x, %p, %d )\n", arg1,arg2,arg3,arg4,arg5); must_be_writable( "llseek(result)", arg4, sizeof(loff_t)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) make_readable( arg4, sizeof(loff_t) ); break; @@ -1674,7 +1667,7 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("lstat ( %p, %p )\n",arg1,arg2); must_be_readable_asciiz( "lstat(file_name)", arg1 ); must_be_writable( "lstat(buf)", arg2, sizeof(struct stat) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) { make_readable( arg2, sizeof(struct stat) ); } @@ -1687,7 +1680,7 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("lstat64 ( %p, %p )\n",arg1,arg2); must_be_readable_asciiz( "lstat64(file_name)", arg1 ); must_be_writable( "lstat64(buf)", arg2, sizeof(struct stat64) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0) { make_readable( arg2, sizeof(struct stat64) ); } @@ -1699,7 +1692,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("mkdir ( %p, %d )\n", arg1,arg2); must_be_readable_asciiz( "mkdir(pathname)", arg1 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_mmap2) @@ -1711,11 +1704,11 @@ void VG_(wrap_syscall) ( void ) int flags, int fd, off_t offset); */ { - UInt arg6 = VG_(baseBlock)[VGOFF_(m_ebp)]; + UInt arg6 = tst->m_ebp; if (VG_(clo_trace_syscalls)) VG_(printf)("mmap2 ( %p, %d, %d, %d, %d, %d )\n", arg1, arg2, arg3, arg4, arg5, arg6 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); /* !!! shouldn't we also be doing the symtab loading stuff as in __NR_mmap ? */ if (!VG_(is_kerror)(res)) @@ -1747,7 +1740,7 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("mmap ( %p, %d, %d, %d, %d, %d )\n", arg1, arg2, arg3, arg4, arg5, arg6 ); } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (arg_block_readable && !VG_(is_kerror)(res)) approximate_mmap_permissions( (Addr)res, arg2, arg3 ); if (arg_block_readable && !VG_(is_kerror)(res) @@ -1768,7 +1761,7 @@ void VG_(wrap_syscall) ( void ) /* should addr .. addr+len-1 be checked before the call? */ if (VG_(clo_trace_syscalls)) VG_(printf)("mprotect ( %p, %d, %d )\n", arg1,arg2,arg3); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) approximate_mmap_permissions ( arg1, arg2, arg3 ); break; @@ -1778,7 +1771,7 @@ void VG_(wrap_syscall) ( void ) /* should start .. start+length-1 be checked before the call? */ if (VG_(clo_trace_syscalls)) VG_(printf)("munmap ( %p, %d )\n", arg1,arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) { /* Mash around start and length so that the area passed to make_noaccess() exactly covers an integral number of @@ -1811,7 +1804,7 @@ void VG_(wrap_syscall) ( void ) if (arg2 != (UInt)NULL) must_be_writable ( "nanosleep(rem)", arg2, sizeof(struct timespec) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); /* Somewhat bogus ... is only written by the kernel if res == -1 && errno == EINTR. */ if (!VG_(is_kerror)(res) && arg2 != (UInt)NULL) @@ -1838,7 +1831,7 @@ void VG_(wrap_syscall) ( void ) if (arg5 != 0) must_be_readable( "newselect(timeout)", arg5, sizeof(struct timeval) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_open: /* syscall 5 */ @@ -1846,7 +1839,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("open ( %p(%s), %d ) --> ",arg1,arg1,arg2); must_be_readable_asciiz( "open(pathname)", arg1 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (VG_(clo_trace_syscalls)) VG_(printf)("%d\n",res); break; @@ -1856,12 +1849,12 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("pipe ( %p ) ...\n", arg1); must_be_writable( "pipe(filedes)", arg1, 2*sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) make_readable ( arg1, 2*sizeof(int) ); if (VG_(clo_trace_syscalls) && !VG_(is_kerror)(res)) - VG_(printf)("SYSCALL[%d, %d] pipe --> (rd %d, wr %d)\n", - VG_(syscall_depth), VG_(getpid)(), + VG_(printf)("SYSCALL[%d] pipe --> (rd %d, wr %d)\n", + VG_(getpid)(), ((UInt*)arg1)[0], ((UInt*)arg1)[1] ); break; @@ -1879,7 +1872,7 @@ void VG_(wrap_syscall) ( void ) /* In fact some parts of this struct should be readable too. This should be fixed properly. */ must_be_writable( "poll(ufds)", arg1, arg2 * sizeof(struct pollfd) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res > 0) { Int i; struct pollfd * arr = (struct pollfd *)arg1; @@ -1888,28 +1881,13 @@ void VG_(wrap_syscall) ( void ) } break; - case __NR_read: /* syscall 3 */ - /* size_t read(int fd, void *buf, size_t count); */ - if (VG_(clo_trace_syscalls)) - VG_(printf)("read ( %d, %p, %d ) ...\n",arg1,arg2,arg3); - must_be_writable( "read(buf)", arg2, arg3 ); - KERNEL_DO_SYSCALL(res); - if (VG_(clo_trace_syscalls)) - VG_(printf)("SYSCALL[%d, %d] read ( %d, %p, %d ) --> %d\n", - VG_(syscall_depth), VG_(getpid)(), - arg1, arg2, arg3, res); - if (!VG_(is_kerror)(res) && res > 0) { - make_readable( arg2, res ); - } - break; - case __NR_readlink: /* syscall 85 */ /* int readlink(const char *path, char *buf, size_t bufsiz); */ if (VG_(clo_trace_syscalls)) VG_(printf)("readlink ( %p, %p, %d )\n", arg1,arg2,arg3); must_be_readable_asciiz( "readlink(path)", arg1 ); must_be_writable ( "readlink(buf)", arg2,arg3 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res > 0) { make_readable ( arg2, res ); } @@ -1928,7 +1906,7 @@ void VG_(wrap_syscall) ( void ) for (i = 0; i < arg3; i++) must_be_writable( "readv(vector[...])", (UInt)vec[i].iov_base,vec[i].iov_len ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res > 0) { /* res holds the number of bytes read. */ for (i = 0; i < arg3; i++) { @@ -1948,7 +1926,7 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("rename ( %p, %p )\n", arg1, arg2 ); must_be_readable_asciiz( "rename(oldpath)", arg1 ); must_be_readable_asciiz( "rename(newpath)", arg2 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_rmdir: /* syscall 40 */ @@ -1956,7 +1934,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("rmdir ( %p )\n", arg1); must_be_readable_asciiz( "rmdir(pathname)", arg1 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_sched_setparam: @@ -1971,7 +1949,7 @@ void VG_(wrap_syscall) ( void ) " This could cause spurious value errors" " to appear."); } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_select: /* syscall 82 */ @@ -2013,7 +1991,7 @@ void VG_(wrap_syscall) ( void ) sizeof(struct timeval) ); } } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_setitimer: /* syscall 104 */ @@ -2026,7 +2004,7 @@ void VG_(wrap_syscall) ( void ) if (arg3 != (Addr)NULL) must_be_writable("setitimer(ovalue)", arg3, sizeof(struct itimerval)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && arg3 != (Addr)NULL) { make_readable(arg3, sizeof(struct itimerval)); } @@ -2037,7 +2015,7 @@ void VG_(wrap_syscall) ( void ) /* int setfsgid(uid_t fsgid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setfsgid ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -2048,14 +2026,14 @@ void VG_(wrap_syscall) ( void ) /* int setgid(gid_t gid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setgid ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_setsid: /* syscall 66 */ /* pid_t setsid(void); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setsid ()\n"); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_setgroups32) @@ -2068,14 +2046,14 @@ void VG_(wrap_syscall) ( void ) if (arg1 > 0) must_be_readable ( "setgroups(list)", arg2, arg1 * sizeof(gid_t) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_setpgid: /* syscall 57 */ /* int setpgid(pid_t pid, pid_t pgid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setpgid ( %d, %d )\n", arg1, arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_setregid32) @@ -2083,7 +2061,7 @@ void VG_(wrap_syscall) ( void ) /* int setregid(gid_t rgid, gid_t egid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setregid32(?) ( %d, %d )\n", arg1, arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -2092,7 +2070,7 @@ void VG_(wrap_syscall) ( void ) /* int setresuid(uid_t ruid, uid_t euid, uid_t suid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setresuid32(?) ( %d, %d, %d )\n", arg1, arg2, arg3); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # endif @@ -2103,7 +2081,7 @@ void VG_(wrap_syscall) ( void ) /* int setreuid(uid_t ruid, uid_t euid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setreuid ( 0x%x, 0x%x )\n", arg1, arg2); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_setrlimit: /* syscall 75 */ @@ -2111,7 +2089,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("setrlimit ( %d, %p )\n", arg1,arg2); must_be_readable( "setrlimit(rlim)", arg2, sizeof(struct rlimit) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_setuid32) @@ -2121,7 +2099,7 @@ void VG_(wrap_syscall) ( void ) /* int setuid(uid_t uid); */ if (VG_(clo_trace_syscalls)) VG_(printf)("setuid ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_socketcall: /* syscall 102 */ @@ -2136,7 +2114,7 @@ void VG_(wrap_syscall) ( void ) arg2, 4*sizeof(Addr) ); must_be_writable( "socketcall.socketpair(sv)", ((UInt*)arg2)[3], 2*sizeof(int) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) make_readable ( ((UInt*)arg2)[3], 2*sizeof(int) ); break; @@ -2145,7 +2123,7 @@ void VG_(wrap_syscall) ( void ) /* int socket(int domain, int type, int protocol); */ must_be_readable( "socketcall.socket(args)", arg2, 3*sizeof(Addr) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SYS_BIND: @@ -2155,14 +2133,14 @@ void VG_(wrap_syscall) ( void ) arg2, 3*sizeof(Addr) ); must_be_readable( "socketcall.bind(my_addr)", ((UInt*)arg2)[1], ((UInt*)arg2)[2] ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SYS_LISTEN: /* int listen(int s, int backlog); */ must_be_readable( "socketcall.listen(args)", arg2, 2*sizeof(Addr) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SYS_ACCEPT: { @@ -2181,7 +2159,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable ( "socketcall.accept(addr)", addr, addrlen_in ); } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res >= 0 && p_addrlen != (Addr)NULL) { addrlen_out = safe_dereference( p_addrlen, 0 ); if (addrlen_out > 0) @@ -2202,7 +2180,7 @@ void VG_(wrap_syscall) ( void ) must_be_readable( "socketcall.sendto(to)", ((UInt*)arg2)[4], /* to */ ((UInt*)arg2)[5] /* tolen */ ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SYS_SEND: @@ -2212,7 +2190,7 @@ void VG_(wrap_syscall) ( void ) must_be_readable( "socketcall.send(msg)", ((UInt*)arg2)[1], /* msg */ ((UInt*)arg2)[2] /* len */ ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SYS_RECVFROM: @@ -2232,7 +2210,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable( "socketcall.recvfrom(buf)", ((UInt*)arg2)[1], /* buf */ ((UInt*)arg2)[2] /* len */ ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res >= 0) { make_readable( ((UInt*)arg2)[1], /* buf */ ((UInt*)arg2)[2] /* len */ ); @@ -2257,7 +2235,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable( "socketcall.recv(buf)", ((UInt*)arg2)[1], /* buf */ ((UInt*)arg2)[2] /* len */ ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res >= 0 && ((UInt*)arg2)[1] != (UInt)NULL) { make_readable( ((UInt*)arg2)[1], /* buf */ @@ -2286,7 +2264,7 @@ void VG_(wrap_syscall) ( void ) must_be_readable( "socketcall.connect(serv_addr)", ((UInt*)arg2)[1], /* serv_addr */ ((UInt*)arg2)[2] /* addrlen */ ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; } @@ -2298,7 +2276,7 @@ void VG_(wrap_syscall) ( void ) must_be_readable( "socketcall.setsockopt(optval)", ((UInt*)arg2)[3], /* optval */ ((UInt*)arg2)[4] /* optlen */ ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SYS_GETSOCKOPT: @@ -2315,7 +2293,7 @@ void VG_(wrap_syscall) ( void ) if (optlen > 0) must_be_writable( "socketcall.getsockopt(optval)", optval_p, optlen ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); optlen_after = safe_dereference ( optlen_p, 0 ); if (!VG_(is_kerror)(res) && optlen > 0 && optlen_after > 0) make_readable( optval_p, optlen_after ); @@ -2332,7 +2310,7 @@ void VG_(wrap_syscall) ( void ) if (namelen > 0) must_be_writable( "socketcall.getsockname(name)", ((UInt*)arg2)[1], namelen ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) { namelen = safe_dereference( (Addr) ((UInt*)arg2)[2], 0); if (namelen > 0 @@ -2352,7 +2330,7 @@ void VG_(wrap_syscall) ( void ) if (namelen > 0) must_be_writable( "socketcall.getpeername(name)", ((UInt*)arg2)[1], namelen ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) { namelen = safe_dereference( (Addr) ((UInt*)arg2)[2], 0); if (namelen > 0 @@ -2366,7 +2344,7 @@ void VG_(wrap_syscall) ( void ) /* int shutdown(int s, int how); */ must_be_readable( "socketcall.shutdown(args)", arg2, 2*sizeof(Addr) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case SYS_SENDMSG: @@ -2382,7 +2360,7 @@ void VG_(wrap_syscall) ( void ) struct msghdr *msg = (struct msghdr *)((UInt *)arg2)[ 1 ]; msghdr_foreachfield ( msg, must_be_readable_sendmsg ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; } @@ -2399,7 +2377,7 @@ void VG_(wrap_syscall) ( void ) struct msghdr *msg = (struct msghdr *)((UInt *)arg2)[ 1 ]; msghdr_foreachfield ( msg, must_be_writable_recvmsg ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if ( !VG_(is_kerror)( res ) ) msghdr_foreachfield( msg, make_readable_recvmsg ); @@ -2420,7 +2398,7 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("stat ( %p, %p )\n",arg1,arg2); must_be_readable_asciiz( "stat(file_name)", arg1 ); must_be_writable( "stat(buf)", arg2, sizeof(struct stat) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) make_readable( arg2, sizeof(struct stat) ); break; @@ -2431,7 +2409,7 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("statfs ( %p, %p )\n",arg1,arg2); must_be_readable_asciiz( "statfs(path)", arg1 ); must_be_writable( "stat(buf)", arg2, sizeof(struct statfs) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) make_readable( arg2, sizeof(struct statfs) ); break; @@ -2442,7 +2420,7 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("symlink ( %p, %p )\n",arg1,arg2); must_be_readable_asciiz( "symlink(oldpath)", arg1 ); must_be_readable_asciiz( "symlink(newpath)", arg2 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; # if defined(__NR_stat64) @@ -2452,7 +2430,7 @@ void VG_(wrap_syscall) ( void ) VG_(printf)("stat64 ( %p, %p )\n",arg1,arg2); must_be_readable_asciiz( "stat64(file_name)", arg1 ); must_be_writable( "stat64(buf)", arg2, sizeof(struct stat64) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) make_readable( arg2, sizeof(struct stat64) ); break; @@ -2464,7 +2442,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("fstat64 ( %d, %p )\n",arg1,arg2); must_be_writable( "fstat64(buf)", arg2, sizeof(struct stat64) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) make_readable( arg2, sizeof(struct stat64) ); break; @@ -2475,7 +2453,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("sysinfo ( %p )\n",arg1); must_be_writable( "sysinfo(info)", arg1, sizeof(struct sysinfo) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) make_readable( arg1, sizeof(struct sysinfo) ); break; @@ -2487,7 +2465,7 @@ void VG_(wrap_syscall) ( void ) if (arg1 != (UInt)NULL) { must_be_writable( "time", arg1, sizeof(time_t) ); } - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && arg1 != (UInt)NULL) { make_readable( arg1, sizeof(time_t) ); } @@ -2498,7 +2476,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("times ( %p )\n",arg1); must_be_writable( "times(buf)", arg1, sizeof(struct tms) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && arg1 != (UInt)NULL) { make_readable( arg1, sizeof(struct tms) ); } @@ -2509,14 +2487,14 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("truncate ( %p, %d )\n", arg1,arg2); must_be_readable_asciiz( "truncate(path)", arg1 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_umask: /* syscall 60 */ /* mode_t umask(mode_t mask); */ if (VG_(clo_trace_syscalls)) VG_(printf)("umask ( %d )\n", arg1); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_unlink: /* syscall 10 */ @@ -2524,7 +2502,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("ulink ( %p )\n",arg1); must_be_readable_asciiz( "unlink(pathname)", arg1 ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_uname: /* syscall 122 */ @@ -2532,7 +2510,7 @@ void VG_(wrap_syscall) ( void ) if (VG_(clo_trace_syscalls)) VG_(printf)("uname ( %p )\n",arg1); must_be_writable( "uname(buf)", arg1, sizeof(struct utsname) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && arg1 != (UInt)NULL) { make_readable( arg1, sizeof(struct utsname) ); } @@ -2546,7 +2524,7 @@ void VG_(wrap_syscall) ( void ) if (arg2 != (UInt)NULL) must_be_readable( "utime(buf)", arg2, sizeof(struct utimbuf) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; case __NR_wait4: /* syscall 114 */ @@ -2559,7 +2537,7 @@ void VG_(wrap_syscall) ( void ) must_be_writable( "wait4(status)", arg2, sizeof(int) ); if (arg4 != (Addr)NULL) must_be_writable( "wait4(rusage)", arg4, sizeof(struct rusage) ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res)) { if (arg2 != (Addr)NULL) make_readable( arg2, sizeof(int) ); @@ -2568,14 +2546,6 @@ void VG_(wrap_syscall) ( void ) } break; - case __NR_write: /* syscall 4 */ - /* size_t write(int fd, const void *buf, size_t count); */ - if (VG_(clo_trace_syscalls)) - VG_(printf)("write ( %d, %p, %d )\n",arg1,arg2,arg3); - must_be_readable( "write(buf)", arg2, arg3 ); - KERNEL_DO_SYSCALL(res); - break; - case __NR_writev: { /* syscall 146 */ /* int writev(int fd, const struct iovec * vector, size_t count); */ UInt i; @@ -2589,7 +2559,7 @@ void VG_(wrap_syscall) ( void ) for (i = 0; i < arg3; i++) must_be_readable( "writev(vector[...])", (UInt)vec[i].iov_base,vec[i].iov_len ); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); break; } @@ -2614,11 +2584,11 @@ void VG_(wrap_syscall) ( void ) arg3, sizeof(vki_ksigaction)); /* We do this one ourselves! */ # if SIGNAL_SIMULATION - VG_(do__NR_sigaction)(); - res = VG_(baseBlock)[VGOFF_(m_eax)]; + VG_(do__NR_sigaction)(tid); + res = tst->m_eax; # else /* debugging signals; when we don't handle them. */ - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); # endif if (!VG_(is_kerror)(res) && res == 0 && arg3 != (UInt)NULL) make_readable( arg3, sizeof(vki_ksigaction)); @@ -2636,7 +2606,7 @@ void VG_(wrap_syscall) ( void ) if (arg3 != (UInt)NULL) must_be_writable( "sigprocmask(oldset)", arg3, sizeof(vki_ksigset_t)); - KERNEL_DO_SYSCALL(res); + KERNEL_DO_SYSCALL(tid,res); if (!VG_(is_kerror)(res) && res == 0 && arg3 != (UInt)NULL) make_readable( arg3, sizeof(vki_ksigset_t)); # if SIGNAL_SIMULATION @@ -2659,20 +2629,13 @@ void VG_(wrap_syscall) ( void ) break; /*NOTREACHED*/ } - /* Tell the signal handler machinery that we've finished the - syscall. */ - VG_(syscall_depth) --; - /* { void zzzmemscan(void); zzzmemscan(); } */ - /* Finish off with some sanity checks. */ - vg_assert( VG_(syscall_depth) == syscall_depth_saved ); - if (! VG_(first_and_last_secondaries_look_plausible)) sane_before_call = False; if (sane_before_call && (!sane_after_call)) { - VG_(message)(Vg_DebugMsg, "valgrind syscall handler: "); + VG_(message)(Vg_DebugMsg, "perform_assumed_nonblocking_syscall: "); VG_(message)(Vg_DebugMsg, "probable sanity check failure for syscall number %d\n", syscallno ); @@ -2683,6 +2646,111 @@ void VG_(wrap_syscall) ( void ) } + +/* Perform pre- and post- actions for a blocking syscall, but do not + do the syscall itself. If res is NULL, the pre-syscall actions are + to be performed. If res is non-NULL, the post-syscall actions are + to be performed, and *res is assumed to hold the result of the + syscall. This slightly strange scheme makes it impossible to + mistakenly use the value of *res in the pre-syscall actions. + + This doesn't actually do the syscall itself, it is important to + observe. + + Because %eax is used both for the syscall number before the call + and the result value afterwards, we can't reliably use it to get + the syscall number. So the caller has to pass it explicitly. +*/ +void VG_(check_known_blocking_syscall) ( ThreadId tid, + Int syscallno, + Int* /*IN*/ res ) +{ + Bool sane_before_post = True; + Bool sane_after_post = True; + ThreadState* tst = VG_(get_thread_state)( tid ); + UInt arg1 = tst->m_ebx; + UInt arg2 = tst->m_ecx; + UInt arg3 = tst->m_edx; + /* + UInt arg4 = tst->m_esi; + UInt arg5 = tst->m_edi; + */ + VGP_PUSHCC(VgpSyscall); + + if (res != NULL + && ! VG_(first_and_last_secondaries_look_plausible)) + sane_before_post = False; + + switch (syscallno) { + + case __NR_read: /* syscall 3 */ + /* size_t read(int fd, void *buf, size_t count); */ + if (res == NULL) { + /* PRE */ + if (VG_(clo_trace_syscalls)) + VG_(printf)( + "SYSCALL--PRE[%d,%d] read ( %d, %p, %d )\n", + VG_(getpid)(), tid, + arg1, arg2, arg3); + must_be_writable( "read(buf)", arg2, arg3 ); + } else { + /* POST */ + if (VG_(clo_trace_syscalls)) + VG_(printf)( + "SYSCALL-POST[%d,%d] read ( %d, %p, %d ) --> %d\n", + VG_(getpid)(), tid, + arg1, arg2, arg3, *res); + if (!VG_(is_kerror)(*res) && *res > 0) { + make_readable( arg2, *res ); + } + } + break; + + case __NR_write: /* syscall 4 */ + /* size_t write(int fd, const void *buf, size_t count); */ + if (res == NULL) { + /* PRE */ + if (VG_(clo_trace_syscalls)) + VG_(printf)( + "SYSCALL--PRE[%d,%d] write ( %d, %p, %d )\n", + VG_(getpid)(), tid, + arg1, arg2, arg3); + must_be_readable( "write(buf)", arg2, arg3 ); + } else { + /* POST */ + if (VG_(clo_trace_syscalls)) + VG_(printf)( + "SYSCALL-POST[%d,%d] write ( %d, %p, %d ) --> %d\n", + VG_(getpid)(), tid, + arg1, arg2, arg3, *res); + } + break; + + default: + VG_(printf)("check_known_blocking_syscall: unexpected %d\n", + syscallno); + VG_(panic)("check_known_blocking_syscall"); + /*NOTREACHED*/ + break; + } + + if (res != NULL) { /* only check after syscall */ + if (! VG_(first_and_last_secondaries_look_plausible)) + sane_after_post = False; + + if (sane_before_post && (!sane_after_post)) { + VG_(message)(Vg_DebugMsg, "perform_known_blocking_syscall: "); + VG_(message)(Vg_DebugMsg, + "probable sanity check failure for syscall number %d\n", + syscallno ); + VG_(panic)("aborting due to the above ... bye!"); + } + } + + VGP_POPCC; +} + + /*--------------------------------------------------------------------*/ /*--- end vg_syscall_mem.c ---*/ /*--------------------------------------------------------------------*/ diff --git a/vg_to_ucode.c b/vg_to_ucode.c index b3bd3c367c..c45ad81f8e 100644 --- a/vg_to_ucode.c +++ b/vg_to_ucode.c @@ -1607,7 +1607,7 @@ Addr dis_Grp5 ( UCodeBlock* cb, Int sz, Addr eip, Bool* isEnd ) SMC_IF_ALL(cb); uInstr1(cb, JMP, 0, TempReg, t1); uCond(cb, CondAlways); - LAST_UINSTR(cb).call_dispatch = True; + LAST_UINSTR(cb).jmpkind = JmpCall; *isEnd = True; break; case 4: /* jmp Ev */ @@ -1654,7 +1654,7 @@ Addr dis_Grp5 ( UCodeBlock* cb, Int sz, Addr eip, Bool* isEnd ) SMC_IF_ALL(cb); uInstr1(cb, JMP, 0, TempReg, t1); uCond(cb, CondAlways); - LAST_UINSTR(cb).call_dispatch = True; + LAST_UINSTR(cb).jmpkind = JmpCall; *isEnd = True; break; case 4: /* JMP Ev */ @@ -2859,32 +2859,6 @@ Addr dis_xadd_G_E ( UCodeBlock* cb, } -/* Push %ECX, %EBX and %EAX, call helper_do_client_request, and put - the resulting %EAX value back. */ -static -void dis_ClientRequest ( UCodeBlock* cb ) -{ - Int tmpc = newTemp(cb); - Int tmpb = newTemp(cb); - Int tmpa = newTemp(cb); - uInstr2(cb, GET, 4, ArchReg, R_ECX, TempReg, tmpc); - uInstr2(cb, GET, 4, ArchReg, R_EBX, TempReg, tmpb); - uInstr2(cb, GET, 4, ArchReg, R_EAX, TempReg, tmpa); - uInstr0(cb, CALLM_S, 0); - uInstr1(cb, PUSH, 4, TempReg, tmpc); - uInstr1(cb, PUSH, 4, TempReg, tmpb); - uInstr1(cb, PUSH, 4, TempReg, tmpa); - uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_do_client_request)); - uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty); - uInstr1(cb, POP, 4, TempReg, tmpa); - uInstr1(cb, CLEAR, 0, Lit16, 8); - uInstr0(cb, CALLM_E, 0); - uInstr2(cb, PUT, 4, TempReg, tmpa, ArchReg, R_EAX); - if (dis) - VG_(printf)("%%eax = client_request ( %%eax, %%ebx, %%ecx )\n"); -} - - /*------------------------------------------------------------*/ /*--- Disassembling entire basic blocks ---*/ /*------------------------------------------------------------*/ @@ -2909,21 +2883,31 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd ) if (dis) VG_(printf)("\t0x%x: ", eip); /* Spot the client-request magic sequence, if required. */ - if (VG_(clo_client_perms)) { + if (1 /*VG_(clo_client_perms)*/) { UChar* myeip = (UChar*)eip; /* Spot this: C1C01D roll $29, %eax C1C003 roll $3, %eax - C1C01B roll $27, %eax - C1C005 roll $5, %eax + C1C81B rorl $27, %eax + C1C805 rorl $5, %eax + C1C00D roll $13, %eax + C1C013 roll $19, %eax */ - if (myeip[0] == 0xC1 && myeip[1] == 0xC0 && myeip[2] == 0x1D && - myeip[3] == 0xC1 && myeip[4] == 0xC0 && myeip[5] == 0x03 && - myeip[6] == 0xC1 && myeip[7] == 0xC0 && myeip[8] == 0x1B && - myeip[9] == 0xC1 && myeip[10] == 0xC0 && myeip[11] == 0x05) { - vg_assert(VG_(clo_instrument)); - dis_ClientRequest(cb); - eip += 12; + if (myeip[ 0] == 0xC1 && myeip[ 1] == 0xC0 && myeip[ 2] == 0x1D && + myeip[ 3] == 0xC1 && myeip[ 4] == 0xC0 && myeip[ 5] == 0x03 && + myeip[ 6] == 0xC1 && myeip[ 7] == 0xC8 && myeip[ 8] == 0x1B && + myeip[ 9] == 0xC1 && myeip[10] == 0xC8 && myeip[11] == 0x05 && + myeip[12] == 0xC1 && myeip[13] == 0xC0 && myeip[14] == 0x0D && + myeip[15] == 0xC1 && myeip[16] == 0xC0 && myeip[17] == 0x13 + ) { + eip += 18; + uInstr1(cb, JMP, 0, Literal, 0); + uLiteral(cb, eip); + uCond(cb, CondAlways); + LAST_UINSTR(cb).jmpkind = JmpClientReq; + *isEnd = True; + if (dis) + VG_(printf)("%%edx = client_request ( %%eax )\n"); return eip; } } @@ -2978,9 +2962,7 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd ) uInstr2(cb, PUT, 4, TempReg, t1, ArchReg, R_ESP); uInstr1(cb, JMP, 0, TempReg, t2); uCond(cb, CondAlways); - - if (d32 == 0) - LAST_UINSTR(cb).ret_dispatch = True; + LAST_UINSTR(cb).jmpkind = JmpRet; *isEnd = True; if (dis) { @@ -2992,22 +2974,6 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd ) case 0xE8: /* CALL J4 */ d32 = getUDisp32(eip); eip += 4; d32 += eip; /* eip now holds return-to addr, d32 is call-to addr */ - if (d32 == (Addr)&VG_(shutdown)) { - /* Set vg_dispatch_ctr to 1, vg_interrupt_reason to VG_Y_EXIT, - and get back to the dispatch loop. We ask for a jump to this - CALL insn because vg_dispatch will ultimately transfer control - to the real CPU, and we want this call to be the first insn - it does. */ - uInstr0(cb, CALLM_S, 0); - uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_request_normal_exit)); - uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty); - uInstr0(cb, CALLM_E, 0); - uInstr1(cb, JMP, 0, Literal, 0); - uLiteral(cb, eip-5); - uCond(cb, CondAlways); - *isEnd = True; - if (dis) VG_(printf)("call 0x%x\n",d32); - } else if (d32 == eip && getUChar(eip) >= 0x58 && getUChar(eip) <= 0x5F) { /* Specially treat the position-independent-code idiom @@ -3040,7 +3006,7 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd ) uInstr1(cb, JMP, 0, Literal, 0); uLiteral(cb, d32); uCond(cb, CondAlways); - LAST_UINSTR(cb).call_dispatch = True; + LAST_UINSTR(cb).jmpkind = JmpCall; *isEnd = True; if (dis) VG_(printf)("call 0x%x\n",d32); } @@ -3179,14 +3145,10 @@ static Addr disInstr ( UCodeBlock* cb, Addr eip, Bool* isEnd ) /* It's important that all ArchRegs carry their up-to-date value at this point. So we declare an end-of-block here, which forces any TempRegs caching ArchRegs to be flushed. */ - t1 = newTemp(cb); - uInstr0(cb, CALLM_S, 0); - uInstr1(cb, CALLM, 0, Lit16, VGOFF_(helper_do_syscall) ); - uFlagsRWU(cb, FlagsEmpty, FlagsEmpty, FlagsEmpty); - uInstr0(cb, CALLM_E, 0); uInstr1(cb, JMP, 0, Literal, 0); uLiteral(cb, eip); uCond(cb, CondAlways); + LAST_UINSTR(cb).jmpkind = JmpSyscall; *isEnd = True; if (dis) VG_(printf)("int $0x80\n"); break; diff --git a/vg_translate.c b/vg_translate.c index 1423b8d3da..d2a8571cda 100644 --- a/vg_translate.c +++ b/vg_translate.c @@ -153,8 +153,8 @@ void VG_(emptyUInstr) ( UInstr* u ) u->val1 = u->val2 = u->val3 = 0; u->tag1 = u->tag2 = u->tag3 = NoValue; u->flags_r = u->flags_w = FlagsEmpty; - u->call_dispatch = False; - u->smc_check = u->signed_widen = u->ret_dispatch = False; + u->jmpkind = JmpBoring; + u->smc_check = u->signed_widen = False; u->lit32 = 0; u->opcode = 0; u->size = 0; @@ -259,8 +259,7 @@ void copyAuxInfoFromTo ( UInstr* src, UInstr* dst ) dst->extra4b = src->extra4b; dst->smc_check = src->smc_check; dst->signed_widen = src->signed_widen; - dst->ret_dispatch = src->ret_dispatch; - dst->call_dispatch = src->call_dispatch; + dst->jmpkind = src->jmpkind; dst->flags_r = src->flags_r; dst->flags_w = src->flags_w; } @@ -917,10 +916,15 @@ void VG_(ppUInstr) ( Int instrNo, UInstr* u ) case JMP: case CC2VAL: case PUSH: case POP: case CLEAR: case CALLM: - if (u->opcode == JMP && u->ret_dispatch) - VG_(printf)("-r"); - if (u->opcode == JMP && u->call_dispatch) - VG_(printf)("-c"); + if (u->opcode == JMP) { + switch (u->jmpkind) { + case JmpCall: VG_(printf)("-c"); break; + case JmpRet: VG_(printf)("-r"); break; + case JmpSyscall: VG_(printf)("-sys"); break; + case JmpClientReq: VG_(printf)("-cli"); break; + default: break; + } + } VG_(printf)("\t"); ppUOperand(u, 1, u->size, False); break; diff --git a/vg_transtab.c b/vg_transtab.c index 1580f03928..34c35cc4e1 100644 --- a/vg_transtab.c +++ b/vg_transtab.c @@ -533,9 +533,9 @@ void VG_(smc_check4) ( Addr a ) /* Force an exit before the next basic block, so the translation cache can be flushed appropriately. */ - VG_(dispatch_ctr_SAVED) = VG_(dispatch_ctr); - VG_(dispatch_ctr) = 1; - VG_(interrupt_reason) = VG_Y_SMC; + // VG_(dispatch_ctr_SAVED) = VG_(dispatch_ctr); + //VG_(dispatch_ctr) = 1; + //VG_(interrupt_reason) = VG_Y_SMC; } -- 2.47.3