unmodified. Haaaack!
Also mash the how value so that the SIG_ constants from glibc
- do not have to be included into vg_scheduler.c. */
+ constants to VKI_ constants, so that the former do not have to
+ be included into vg_scheduler.c. */
ensure_valgrind("pthread_sigmask");
switch (how) {
- case SIG_SETMASK: how = 1; break;
- case SIG_BLOCK: how = 2; break;
- case SIG_UNBLOCK: how = 3; break;
+ case SIG_SETMASK: how = VKI_SIG_SETMASK; break;
+ case SIG_BLOCK: how = VKI_SIG_BLOCK; break;
+ case SIG_UNBLOCK: how = VKI_SIG_UNBLOCK; break;
default: return EINVAL;
}
}
+int pthread_kill(pthread_t thread, int signo)
+{
+ int res;
+ ensure_valgrind("pthread_kill");
+ VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
+ VG_USERREQ__PTHREAD_KILL,
+ thread, signo, 0, 0);
+ return res;
+}
+
+
/* ---------------------------------------------------
THREAD-SPECIFICs
------------------------------------------------ */
//void pthread_join ( void ) { unimp("pthread_join"); }
//void pthread_key_create ( void ) { unimp("pthread_key_create"); }
//void pthread_key_delete ( void ) { unimp("pthread_key_delete"); }
-void pthread_kill ( void ) { unimp("pthread_kill"); }
+//void pthread_kill ( void ) { unimp("pthread_kill"); }
//void pthread_mutex_destroy ( void ) { unimp("pthread_mutex_destroy"); }
//void pthread_mutex_init ( void ) { unimp("pthread_mutex_init"); }
//void pthread_mutex_lock ( void ) { unimp("pthread_mutex_lock"); }
#define VG_USERREQ__READ_MILLISECOND_TIMER 0x3011
#define VG_USERREQ__PTHREAD_SIGMASK 0x3012
#define VG_USERREQ__SIGWAIT 0x3013
+#define VG_USERREQ__PTHREAD_KILL 0x3014
/* Cosmetic ... */
#define VG_USERREQ__GET_PTHREAD_TRACE_LEVEL 0x3101
ThreadState;
-/* Trivial range check on tid. */
+/* The thread table. */
+extern ThreadState VG_(threads)[VG_N_THREADS];
+
+/* Check that tid is in range and denotes a non-Empty thread. */
extern Bool VG_(is_valid_tid) ( ThreadId tid );
+/* Check that tid is in range. */
+extern Bool VG_(is_valid_or_empty_tid) ( ThreadId tid );
+
/* Copy the specified thread's state into VG_(baseBlock) in
preparation for running it. */
extern void VG_(load_thread_state)( ThreadId );
VG_(baseBlock) with junk, for sanity-check reasons. */
extern void VG_(save_thread_state)( ThreadId );
-/* Get the thread state block for the specified thread. */
-extern ThreadState* VG_(get_thread_state)( ThreadId );
-extern ThreadState* VG_(get_thread_state_UNCHECKED)( ThreadId );
-
/* And for the currently running one, if valid. */
extern ThreadState* VG_(get_current_thread_state) ( void );
(VG_AR_CLIENT_STACKBASE_REDZONE_SZW * VKI_BYTES_PER_WORD)
+/* Write a value to the client's %EDX (request return value register)
+ and set the shadow to indicate it is defined. */
+#define SET_EDX(zztid, zzval) \
+ do { VG_(threads)[zztid].m_edx = (zzval); \
+ VG_(threads)[zztid].sh_edx = VGM_WORD_VALID; \
+ } while (0)
+
+#define SET_EAX(zztid, zzval) \
+ do { VG_(threads)[zztid].m_eax = (zzval); \
+ VG_(threads)[zztid].sh_eax = VGM_WORD_VALID; \
+ } while (0)
+
/* ---------------------------------------------------------------------
Exports of vg_signals.c
extern Bool VG_(deliver_signals) ( void );
extern void VG_(unblock_host_signal) ( Int sigNo );
-extern void VG_(notify_signal_machinery_of_thread_exit) ( ThreadId tid );
-extern void VG_(update_sigstate_following_WaitSIG_change) ( void );
+extern void VG_(handle_SCSS_change) ( Bool force_update );
+
/* Fake system calls for signal handling. */
extern void VG_(do__NR_sigaction) ( ThreadId tid );
-extern void VG_(do__NR_sigprocmask) ( Int how, vki_ksigset_t* set );
+extern void VG_(do__NR_sigprocmask) ( ThreadId tid,
+ Int how,
+ vki_ksigset_t* set,
+ vki_ksigset_t* oldset );
+extern void VG_(do_pthread_sigmask_SCSS_upd) ( ThreadId tid,
+ Int how,
+ vki_ksigset_t* set,
+ vki_ksigset_t* oldset );
+extern void VG_(send_signal_to_thread) ( ThreadId thread,
+ Int signo );
/* Modify the current thread's state once we have detected it is
returning from a signal handler. */
/* Handy utilities to block/restore all host signals. */
extern void VG_(block_all_host_signals)
( /* OUT */ vki_ksigset_t* saved_mask );
-extern void VG_(restore_host_signals)
+extern void VG_(restore_all_host_signals)
( /* IN */ vki_ksigset_t* saved_mask );
/* ---------------------------------------------------------------------
definitions, which are different in places from those that glibc
defines. Since we're operating right at the kernel interface,
glibc's view of the world is entirely irrelevant. */
+
+/* --- Signal set ops --- */
extern Int VG_(ksigfillset)( vki_ksigset_t* set );
extern Int VG_(ksigemptyset)( vki_ksigset_t* set );
+
+extern Bool VG_(kisfullsigset)( vki_ksigset_t* set );
extern Bool VG_(kisemptysigset)( vki_ksigset_t* set );
+
extern Int VG_(ksigaddset)( vki_ksigset_t* set, Int signum );
+extern Int VG_(ksigdelset)( vki_ksigset_t* set, Int signum );
+extern Int VG_(ksigismember) ( vki_ksigset_t* set, Int signum );
-extern Int VG_(ksigprocmask)( Int how, const vki_ksigset_t* set,
- vki_ksigset_t* oldset );
-extern Int VG_(ksigaction) ( Int signum,
- const vki_ksigaction* act,
- vki_ksigaction* oldact );
-extern Int VG_(ksigismember) ( vki_ksigset_t* set, Int signum );
extern void VG_(ksigaddset_from_set)( vki_ksigset_t* dst,
vki_ksigset_t* src );
extern void VG_(ksigdelset_from_set)( vki_ksigset_t* dst,
vki_ksigset_t* src );
+/* --- Mess with the kernel's sig state --- */
+extern Int VG_(ksigprocmask)( Int how, const vki_ksigset_t* set,
+ vki_ksigset_t* oldset );
+extern Int VG_(ksigaction) ( Int signum,
+ const vki_ksigaction* act,
+ vki_ksigaction* oldact );
extern Int VG_(ksignal)(Int signum, void (*sighandler)(Int));
extern Int VG_(ksigaltstack)( const vki_kstack_t* ss, vki_kstack_t* oss );
+extern Int VG_(kill)( Int pid, Int signo );
/* ---------------------------------------------------------------------
extern Bool VG_(is_kerror) ( Int res );
-#define KERNEL_DO_SYSCALL(thread_id, result_lvalue) \
- VG_(load_thread_state)(thread_id); \
- VG_(copy_baseBlock_to_m_state_static)(); \
- VG_(do_syscall)(); \
- VG_(copy_m_state_static_to_baseBlock)(); \
- VG_(save_thread_state)(thread_id); \
- result_lvalue = VG_(get_thread_state)(thread_id)->m_eax;
+#define KERNEL_DO_SYSCALL(thread_id, result_lvalue) \
+ VG_(load_thread_state)(thread_id); \
+ VG_(copy_baseBlock_to_m_state_static)(); \
+ VG_(do_syscall)(); \
+ VG_(copy_m_state_static_to_baseBlock)(); \
+ VG_(save_thread_state)(thread_id); \
+ VG_(threads)[thread_id].sh_eax = VGM_WORD_VALID; \
+ result_lvalue = VG_(threads)[thread_id].m_eax;
/* ---------------------------------------------------------------------
#define VKI_SA_ONSTACK 0x08000000
#define VKI_SA_RESTART 0x10000000
-#if 0
#define VKI_SA_NOCLDSTOP 0x00000001
+#define VKI_SA_RESETHAND 0x80000000
+#define VKI_SA_ONESHOT VKI_SA_RESETHAND
+#define VKI_SA_NODEFER 0x40000000
+#define VKI_SA_NOMASK VKI_SA_NODEFER
+#if 0
#define VKI_SA_NOCLDWAIT 0x00000002 /* not supported yet */
#define VKI_SA_SIGINFO 0x00000004
-#define VKI_SA_NODEFER 0x40000000
-#define VKI_SA_RESETHAND 0x80000000
-#define VKI_SA_NOMASK SA_NODEFER
-#define VKI_SA_ONESHOT SA_RESETHAND
#define VKI_SA_INTERRUPT 0x20000000 /* dummy -- ignored */
#define VKI_SA_RESTORER 0x04000000
#endif
#define VKI_EINTR 4 /* Interrupted system call */
#define VKI_EINVAL 22 /* Invalid argument */
#define VKI_ENOMEM 12 /* Out of memory */
+#define VKI_EFAULT 14 /* Bad address */
+#define VKI_ESRCH 3 /* No such process */
#define VKI_EWOULDBLOCK VKI_EAGAIN /* Operation would block */
#define VKI_EAGAIN 11 /* Try again */
unmodified. Haaaack!
Also mash the how value so that the SIG_ constants from glibc
- do not have to be included into vg_scheduler.c. */
+ constants to VKI_ constants, so that the former do not have to
+ be included into vg_scheduler.c. */
ensure_valgrind("pthread_sigmask");
switch (how) {
- case SIG_SETMASK: how = 1; break;
- case SIG_BLOCK: how = 2; break;
- case SIG_UNBLOCK: how = 3; break;
+ case SIG_SETMASK: how = VKI_SIG_SETMASK; break;
+ case SIG_BLOCK: how = VKI_SIG_BLOCK; break;
+ case SIG_UNBLOCK: how = VKI_SIG_UNBLOCK; break;
default: return EINVAL;
}
}
+int pthread_kill(pthread_t thread, int signo)
+{
+ int res;
+ ensure_valgrind("pthread_kill");
+ VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
+ VG_USERREQ__PTHREAD_KILL,
+ thread, signo, 0, 0);
+ return res;
+}
+
+
/* ---------------------------------------------------
THREAD-SPECIFICs
------------------------------------------------ */
//void pthread_join ( void ) { unimp("pthread_join"); }
//void pthread_key_create ( void ) { unimp("pthread_key_create"); }
//void pthread_key_delete ( void ) { unimp("pthread_key_delete"); }
-void pthread_kill ( void ) { unimp("pthread_kill"); }
+//void pthread_kill ( void ) { unimp("pthread_kill"); }
//void pthread_mutex_destroy ( void ) { unimp("pthread_mutex_destroy"); }
//void pthread_mutex_init ( void ) { unimp("pthread_mutex_init"); }
//void pthread_mutex_lock ( void ) { unimp("pthread_mutex_lock"); }
/* Process Valgrind's command-line opts (from env var VG_OPTS). */
process_cmd_line_options();
- /* Initialise the signal handling subsystem. */
+ /* Initialise the scheduler, and copy the client's state from
+ baseBlock into VG_(threads)[1]. This has to come before signal
+ initialisations. */
+ VG_(scheduler_init)();
+
+ /* Initialise the signal handling subsystem, temporarily parking
+ the saved blocking-mask in saved_sigmask. */
VG_(sigstartup_actions)();
+ /* Perhaps we're profiling Valgrind? */
# ifdef VG_PROFILE
VGP_(init_profiling)();
# endif
VG_(bbs_to_go) = VG_(clo_stop_after);
+ /* Run! */
VGP_PUSHCC(VgpSched);
- VG_(scheduler_init)();
src = VG_(scheduler)();
VGP_POPCC;
case VgSrc_ExitSyscall: /* the normal way out */
vg_assert(VG_(last_run_tid) > 0
&& VG_(last_run_tid) < VG_N_THREADS);
- tst = VG_(get_thread_state)(VG_(last_run_tid));
+ tst = & VG_(threads)[VG_(last_run_tid)];
vg_assert(tst->status == VgTs_Runnable);
/* The thread's %EBX will hold the arg to exit(), so we just
do exit with that arg. */
return True;
}
+Bool VG_(kisfullsigset)( vki_ksigset_t* set )
+{
+ Int i;
+ vg_assert(set != NULL);
+ for (i = 0; i < VKI_KNSIG_WORDS; i++)
+ if (set->ws[i] != ~0x0) return False;
+ return True;
+}
+
+
Int VG_(ksigaddset)( vki_ksigset_t* set, Int signum )
{
if (set == NULL)
return 0;
}
+Int VG_(ksigdelset)( vki_ksigset_t* set, Int signum )
+{
+ if (set == NULL)
+ return -1;
+ if (signum < 1 && signum > VKI_KNSIG)
+ return -1;
+ signum--;
+ set->ws[signum / VKI_KNSIG_BPW] &= ~(1 << (signum % VKI_KNSIG_BPW));
+ return 0;
+}
+
Int VG_(ksigismember) ( vki_ksigset_t* set, Int signum )
{
if (set == NULL)
= vg_do_syscall4(__NR_rt_sigaction,
signum, (UInt)act, (UInt)oldact,
VKI_KNSIG_WORDS * VKI_BYTES_PER_WORD);
+ /* VG_(printf)("res = %d\n",res); */
return VG_(is_kerror)(res) ? -1 : 0;
}
}
+Int VG_(kill)( Int pid, Int signo )
+{
+ Int res = vg_do_syscall2(__NR_kill, pid, signo);
+ return VG_(is_kerror)(res) ? -1 : 0;
+}
+
+
/* ---------------------------------------------------------------------
mmap/munmap, exit, fcntl
------------------------------------------------------------------ */
void VG_(assert_fail) ( Char* expr, Char* file, Int line, Char* fn )
{
+ static Bool entered = False;
+ if (entered)
+ VG_(exit)(2);
+ entered = True;
VG_(printf)("\n%s: %s:%d (%s): Assertion `%s' failed.\n",
"valgrind", file, line, fn, expr );
VG_(pp_sched_status)();
/* struct ThreadState is defined in vg_include.h. */
-/* Private globals. A statically allocated array of threads. NOTE:
- [0] is never used, to simplify the simulation of initialisers for
+/* Globals. A statically allocated array of threads. NOTE: [0] is
+ never used, to simplify the simulation of initialisers for
LinuxThreads. */
-static ThreadState vg_threads[VG_N_THREADS];
+ThreadState VG_(threads)[VG_N_THREADS];
/* The tid of the thread currently in VG_(baseBlock). */
static Int vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
__inline__
Bool VG_(is_valid_tid) ( ThreadId tid )
+{
+ /* tid is unsigned, hence no < 0 test. */
+ if (tid == 0) return False;
+ if (tid >= VG_N_THREADS) return False;
+ if (VG_(threads)[tid].status == VgTs_Empty) return False;
+ return True;
+}
+
+
+__inline__
+Bool VG_(is_valid_or_empty_tid) ( ThreadId tid )
{
/* tid is unsigned, hence no < 0 test. */
if (tid == 0) return False;
if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
tid = vg_tid_currently_in_baseBlock;
if (VG_(baseBlock)[VGOFF_(m_esp)] <= a
- && a <= vg_threads[tid].stack_highest_word)
+ && a <= VG_(threads)[tid].stack_highest_word)
return tid;
else
tid_to_skip = tid;
}
for (tid = 1; tid < VG_N_THREADS; tid++) {
- if (vg_threads[tid].status == VgTs_Empty) continue;
+ if (VG_(threads)[tid].status == VgTs_Empty) continue;
if (tid == tid_to_skip) continue;
- if (vg_threads[tid].m_esp <= a
- && a <= vg_threads[tid].stack_highest_word)
+ if (VG_(threads)[tid].m_esp <= a
+ && a <= VG_(threads)[tid].stack_highest_word)
return tid;
}
return VG_INVALID_THREADID;
Int i;
VG_(printf)("\nsched status:\n");
for (i = 1; i < VG_N_THREADS; i++) {
- if (vg_threads[i].status == VgTs_Empty) continue;
+ if (VG_(threads)[i].status == VgTs_Empty) continue;
VG_(printf)("\nThread %d: status = ", i);
- switch (vg_threads[i].status) {
+ switch (VG_(threads)[i].status) {
case VgTs_Runnable: VG_(printf)("Runnable"); break;
case VgTs_WaitFD: VG_(printf)("WaitFD"); break;
case VgTs_WaitJoiner: VG_(printf)("WaitJoiner(%d)",
- vg_threads[i].joiner); break;
+ VG_(threads)[i].joiner); break;
case VgTs_WaitJoinee: VG_(printf)("WaitJoinee"); break;
case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
default: VG_(printf)("???"); break;
}
VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
- vg_threads[i].associated_mx,
- vg_threads[i].associated_cv );
+ VG_(threads)[i].associated_mx,
+ VG_(threads)[i].associated_cv );
VG_(pp_ExeContext)(
- VG_(get_ExeContext)( False, vg_threads[i].m_eip,
- vg_threads[i].m_ebp ));
+ VG_(get_ExeContext)( False, VG_(threads)[i].m_eip,
+ VG_(threads)[i].m_ebp ));
}
VG_(printf)("\n");
}
Int orig_size, trans_size;
/* Ensure there is space to hold a translation. */
VG_(maybe_do_lru_pass)();
- VG_(translate)( &vg_threads[tid],
+ VG_(translate)( &VG_(threads)[tid],
orig_addr, &orig_size, &trans_addr, &trans_size );
/* Copy data at trans_addr into the translation cache.
Returned pointer is to the code, not to the 4-byte
{
Int i;
for (i = 1; i < VG_N_THREADS; i++) {
- if (vg_threads[i].status == VgTs_Empty)
+ if (VG_(threads)[i].status == VgTs_Empty)
return i;
}
VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
}
-ThreadState* VG_(get_thread_state_UNCHECKED) ( ThreadId tid )
-{
- vg_assert(VG_(is_valid_tid)(tid));
- return & vg_threads[tid];
-}
-
-
-ThreadState* VG_(get_thread_state) ( ThreadId tid )
-{
- vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status != VgTs_Empty);
- return & vg_threads[tid];
-}
-
-
ThreadState* VG_(get_current_thread_state) ( void )
{
- vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
- return VG_(get_thread_state) ( vg_tid_currently_in_baseBlock );
+ vg_assert(VG_(is_valid_tid)(vg_tid_currently_in_baseBlock));
+ return & VG_(threads)[vg_tid_currently_in_baseBlock];
}
ThreadId VG_(get_current_tid) ( void )
{
- vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
+ vg_assert(VG_(is_valid_tid)(vg_tid_currently_in_baseBlock));
return vg_tid_currently_in_baseBlock;
}
Int i;
vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
- VG_(baseBlock)[VGOFF_(m_eax)] = vg_threads[tid].m_eax;
- VG_(baseBlock)[VGOFF_(m_ebx)] = vg_threads[tid].m_ebx;
- VG_(baseBlock)[VGOFF_(m_ecx)] = vg_threads[tid].m_ecx;
- VG_(baseBlock)[VGOFF_(m_edx)] = vg_threads[tid].m_edx;
- VG_(baseBlock)[VGOFF_(m_esi)] = vg_threads[tid].m_esi;
- VG_(baseBlock)[VGOFF_(m_edi)] = vg_threads[tid].m_edi;
- VG_(baseBlock)[VGOFF_(m_ebp)] = vg_threads[tid].m_ebp;
- VG_(baseBlock)[VGOFF_(m_esp)] = vg_threads[tid].m_esp;
- VG_(baseBlock)[VGOFF_(m_eflags)] = vg_threads[tid].m_eflags;
- VG_(baseBlock)[VGOFF_(m_eip)] = vg_threads[tid].m_eip;
+ VG_(baseBlock)[VGOFF_(m_eax)] = VG_(threads)[tid].m_eax;
+ VG_(baseBlock)[VGOFF_(m_ebx)] = VG_(threads)[tid].m_ebx;
+ VG_(baseBlock)[VGOFF_(m_ecx)] = VG_(threads)[tid].m_ecx;
+ VG_(baseBlock)[VGOFF_(m_edx)] = VG_(threads)[tid].m_edx;
+ VG_(baseBlock)[VGOFF_(m_esi)] = VG_(threads)[tid].m_esi;
+ VG_(baseBlock)[VGOFF_(m_edi)] = VG_(threads)[tid].m_edi;
+ VG_(baseBlock)[VGOFF_(m_ebp)] = VG_(threads)[tid].m_ebp;
+ VG_(baseBlock)[VGOFF_(m_esp)] = VG_(threads)[tid].m_esp;
+ VG_(baseBlock)[VGOFF_(m_eflags)] = VG_(threads)[tid].m_eflags;
+ VG_(baseBlock)[VGOFF_(m_eip)] = VG_(threads)[tid].m_eip;
for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
- VG_(baseBlock)[VGOFF_(m_fpustate) + i] = vg_threads[tid].m_fpu[i];
-
- VG_(baseBlock)[VGOFF_(sh_eax)] = vg_threads[tid].sh_eax;
- VG_(baseBlock)[VGOFF_(sh_ebx)] = vg_threads[tid].sh_ebx;
- VG_(baseBlock)[VGOFF_(sh_ecx)] = vg_threads[tid].sh_ecx;
- VG_(baseBlock)[VGOFF_(sh_edx)] = vg_threads[tid].sh_edx;
- VG_(baseBlock)[VGOFF_(sh_esi)] = vg_threads[tid].sh_esi;
- VG_(baseBlock)[VGOFF_(sh_edi)] = vg_threads[tid].sh_edi;
- VG_(baseBlock)[VGOFF_(sh_ebp)] = vg_threads[tid].sh_ebp;
- VG_(baseBlock)[VGOFF_(sh_esp)] = vg_threads[tid].sh_esp;
- VG_(baseBlock)[VGOFF_(sh_eflags)] = vg_threads[tid].sh_eflags;
+ VG_(baseBlock)[VGOFF_(m_fpustate) + i] = VG_(threads)[tid].m_fpu[i];
+
+ VG_(baseBlock)[VGOFF_(sh_eax)] = VG_(threads)[tid].sh_eax;
+ VG_(baseBlock)[VGOFF_(sh_ebx)] = VG_(threads)[tid].sh_ebx;
+ VG_(baseBlock)[VGOFF_(sh_ecx)] = VG_(threads)[tid].sh_ecx;
+ VG_(baseBlock)[VGOFF_(sh_edx)] = VG_(threads)[tid].sh_edx;
+ VG_(baseBlock)[VGOFF_(sh_esi)] = VG_(threads)[tid].sh_esi;
+ VG_(baseBlock)[VGOFF_(sh_edi)] = VG_(threads)[tid].sh_edi;
+ VG_(baseBlock)[VGOFF_(sh_ebp)] = VG_(threads)[tid].sh_ebp;
+ VG_(baseBlock)[VGOFF_(sh_esp)] = VG_(threads)[tid].sh_esp;
+ VG_(baseBlock)[VGOFF_(sh_eflags)] = VG_(threads)[tid].sh_eflags;
vg_tid_currently_in_baseBlock = tid;
}
vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
- vg_threads[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
- vg_threads[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
- vg_threads[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
- vg_threads[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
- vg_threads[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
- vg_threads[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
- vg_threads[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
- vg_threads[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
- vg_threads[tid].m_eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
- vg_threads[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
+ VG_(threads)[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
+ VG_(threads)[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
+ VG_(threads)[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
+ VG_(threads)[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
+ VG_(threads)[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
+ VG_(threads)[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
+ VG_(threads)[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
+ VG_(threads)[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
+ VG_(threads)[tid].m_eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
+ VG_(threads)[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
- vg_threads[tid].m_fpu[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
-
- vg_threads[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
- vg_threads[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
- vg_threads[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
- vg_threads[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
- vg_threads[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
- vg_threads[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
- vg_threads[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
- vg_threads[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
- vg_threads[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
+ VG_(threads)[tid].m_fpu[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
+
+ VG_(threads)[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
+ VG_(threads)[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
+ VG_(threads)[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
+ VG_(threads)[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
+ VG_(threads)[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
+ VG_(threads)[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
+ VG_(threads)[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
+ VG_(threads)[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
+ VG_(threads)[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
/* Fill it up with junk. */
VG_(baseBlock)[VGOFF_(m_eax)] = junk;
{
volatile UInt trc = 0;
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status == VgTs_Runnable);
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
vg_assert(VG_(bbs_to_go) > 0);
VGP_PUSHCC(VgpRun);
}
for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
- vg_threads[i].status = VgTs_Empty;
- vg_threads[i].stack_size = 0;
- vg_threads[i].stack_base = (Addr)NULL;
- vg_threads[i].tid = i;
- VG_(ksigemptyset)(&vg_threads[i].sig_mask);
- VG_(ksigemptyset)(&vg_threads[i].sigs_waited_for);
+ VG_(threads)[i].status = VgTs_Empty;
+ VG_(threads)[i].stack_size = 0;
+ VG_(threads)[i].stack_base = (Addr)NULL;
+ VG_(threads)[i].tid = i;
+ VG_(ksigemptyset)(&VG_(threads)[i].sig_mask);
+ VG_(ksigemptyset)(&VG_(threads)[i].sigs_waited_for);
}
for (i = 0; i < VG_N_WAITING_FDS; i++)
tid_main = vg_alloc_ThreadState();
vg_assert(tid_main == 1);
- vg_threads[tid_main].status = VgTs_Runnable;
- vg_threads[tid_main].joiner = VG_INVALID_THREADID;
- vg_threads[tid_main].associated_mx = NULL;
- vg_threads[tid_main].associated_cv = NULL;
- vg_threads[tid_main].retval = NULL; /* not important */
+ VG_(threads)[tid_main].status = VgTs_Runnable;
+ VG_(threads)[tid_main].joiner = VG_INVALID_THREADID;
+ VG_(threads)[tid_main].associated_mx = NULL;
+ VG_(threads)[tid_main].associated_cv = NULL;
+ VG_(threads)[tid_main].retval = NULL; /* not important */
for (i = 0; i < VG_N_THREAD_KEYS; i++)
- vg_threads[tid_main].specifics[i] = NULL;
+ VG_(threads)[tid_main].specifics[i] = NULL;
/* Copy VG_(baseBlock) state to tid_main's slot. */
vg_tid_currently_in_baseBlock = tid_main;
VG_(save_thread_state) ( tid_main );
- vg_threads[tid_main].stack_highest_word
- = vg_threads[tid_main].m_esp /* -4 ??? */;
+ VG_(threads)[tid_main].stack_highest_word
+ = VG_(threads)[tid_main].m_esp /* -4 ??? */;
/* So now ... */
vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
return True; \
}
- ThreadState* tst = &vg_threads[tid];
+ ThreadState* tst = &VG_(threads)[tid];
UInt* arg = (UInt*)(tst->m_eax);
UInt req_no = arg[0];
Int i, waiters;
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status == VgTs_WaitFD);
- vg_assert(vg_threads[tid].m_eax == __NR_read
- || vg_threads[tid].m_eax == __NR_write);
+ vg_assert(VG_(threads)[tid].status == VgTs_WaitFD);
+ vg_assert(VG_(threads)[tid].m_eax == __NR_read
+ || VG_(threads)[tid].m_eax == __NR_write);
/* Excessively paranoidly ... find the fd this op was waiting
for, and mark it as not being waited on. */
for (i = 0; i < VG_N_WAITING_FDS; i++) {
if (vg_waiting_fds[i].tid == tid) {
waiters++;
- vg_assert(vg_waiting_fds[i].syscall_no == vg_threads[tid].m_eax);
+ vg_assert(vg_waiting_fds[i].syscall_no == VG_(threads)[tid].m_eax);
}
}
vg_assert(waiters == 1);
/* Easy; we don't have to do anything. */
return;
- if (vg_threads[tid].status == VgTs_WaitFD
- && (vg_threads[tid].m_eax == __NR_read
- || vg_threads[tid].m_eax == __NR_write)) {
+ if (VG_(threads)[tid].status == VgTs_WaitFD
+ && (VG_(threads)[tid].m_eax == __NR_read
+ || VG_(threads)[tid].m_eax == __NR_write)) {
/* read() or write() interrupted. Force a return with EINTR. */
cleanup_waiting_fd_table(tid);
- vg_threads[tid].m_eax = -VKI_EINTR;
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].m_eax = -VKI_EINTR;
+ VG_(threads)[tid].status = VgTs_Runnable;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
return;
}
- if (vg_threads[tid].status == VgTs_WaitFD
- && vg_threads[tid].m_eax == __NR_nanosleep) {
+ if (VG_(threads)[tid].status == VgTs_WaitFD
+ && VG_(threads)[tid].m_eax == __NR_nanosleep) {
/* We interrupted a nanosleep(). The right thing to do is to
write the unused time to nanosleep's second param and return
EINTR, but I'm too lazy for that. */
return;
}
- if (vg_threads[tid].status == VgTs_WaitFD) {
+ if (VG_(threads)[tid].status == VgTs_WaitFD) {
VG_(panic)("handle_signal_return: unknown interrupted syscall");
}
Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status == VgTs_Runnable);
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
- syscall_no = vg_threads[tid].m_eax; /* syscall number */
+ syscall_no = VG_(threads)[tid].m_eax; /* syscall number */
if (syscall_no == __NR_nanosleep) {
UInt t_now, t_awaken;
struct vki_timespec* req;
- req = (struct vki_timespec*)vg_threads[tid].m_ebx; /* arg1 */
+ req = (struct vki_timespec*)VG_(threads)[tid].m_ebx; /* arg1 */
t_now = VG_(read_millisecond_timer)();
t_awaken
= t_now
+ (UInt)1000ULL * (UInt)(req->tv_sec)
+ (UInt)(req->tv_nsec) / 1000000;
- vg_threads[tid].status = VgTs_Sleeping;
- vg_threads[tid].awaken_at = t_awaken;
+ VG_(threads)[tid].status = VgTs_Sleeping;
+ VG_(threads)[tid].awaken_at = t_awaken;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
t_now, t_awaken-t_now);
immediately, in order to lodge a request with the Linux kernel.
We later poll for I/O completion using select(). */
- fd = vg_threads[tid].m_ebx /* arg1 */;
+ fd = VG_(threads)[tid].m_ebx /* arg1 */;
orig_fd_blockness = fd_is_blockful(fd);
set_fd_nonblocking(fd);
vg_assert(!fd_is_blockful(fd));
VG_(check_known_blocking_syscall)(tid, syscall_no, NULL /* PRE */);
/* This trashes the thread's %eax; we have to preserve it. */
- saved_eax = vg_threads[tid].m_eax;
+ saved_eax = VG_(threads)[tid].m_eax;
KERNEL_DO_SYSCALL(tid,res);
/* Restore original blockfulness of the fd. */
*/
VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
/* We're still runnable. */
- vg_assert(vg_threads[tid].status == VgTs_Runnable);
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
} else {
/* It would have blocked. First, restore %EAX to what it was
before our speculative call. */
- vg_threads[tid].m_eax = saved_eax;
+ VG_(threads)[tid].m_eax = saved_eax;
/* Put this fd in a table of fds on which we are waiting for
completion. The arguments for select() later are constructed
from this table. */
add_waiting_fd(tid, fd, saved_eax /* which holds the syscall # */);
/* Deschedule thread until an I/O completion happens. */
- vg_threads[tid].status = VgTs_WaitFD;
+ VG_(threads)[tid].status = VgTs_WaitFD;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,"block until I/O ready on fd %d", fd);
print_sched_event(tid, msg_buf);
/* Awaken any sleeping threads whose sleep has expired. */
for (tid = 1; tid < VG_N_THREADS; tid++)
- if (vg_threads[tid].status == VgTs_Sleeping)
+ if (VG_(threads)[tid].status == VgTs_Sleeping)
break;
/* Avoid pointless calls to VG_(read_millisecond_timer). */
if (tid < VG_N_THREADS) {
t_now = VG_(read_millisecond_timer)();
for (tid = 1; tid < VG_N_THREADS; tid++) {
- if (vg_threads[tid].status != VgTs_Sleeping)
+ if (VG_(threads)[tid].status != VgTs_Sleeping)
continue;
- if (t_now >= vg_threads[tid].awaken_at) {
+ if (t_now >= VG_(threads)[tid].awaken_at) {
/* Resume this thread. Set to zero the remaining-time
(second) arg of nanosleep, since it's used up all its
time. */
- vg_assert(vg_threads[tid].m_eax == __NR_nanosleep);
- rem = (struct vki_timespec *)vg_threads[tid].m_ecx; /* arg2 */
+ vg_assert(VG_(threads)[tid].m_eax == __NR_nanosleep);
+ rem = (struct vki_timespec *)VG_(threads)[tid].m_ecx; /* arg2 */
if (rem != NULL) {
rem->tv_sec = 0;
rem->tv_nsec = 0;
}
/* Make the syscall return 0 (success). */
- vg_threads[tid].m_eax = 0;
+ VG_(threads)[tid].m_eax = 0;
/* Reschedule this thread. */
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf, "at %d: nanosleep done",
t_now);
}
/* UNBLOCK ALL SIGNALS */
- VG_(restore_host_signals)( &saved_procmask );
+ VG_(restore_all_host_signals)( &saved_procmask );
/* VG_(printf)("poll_for_io_completions: %d fs ready\n", n_ready); */
/* The thread actually has to be waiting for the I/O event it
requested before we can deliver the result! */
- if (vg_threads[tid].status != VgTs_WaitFD)
+ if (VG_(threads)[tid].status != VgTs_WaitFD)
continue;
/* Ok, actually do it! We can safely use %EAX as the syscall
call would have blocked. */
syscall_no = vg_waiting_fds[i].syscall_no;
- vg_assert(syscall_no == vg_threads[tid].m_eax);
+ vg_assert(syscall_no == VG_(threads)[tid].m_eax);
KERNEL_DO_SYSCALL(tid,res);
VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
/* Reschedule. */
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
/* Mark slot as no longer in use. */
vg_waiting_fds[i].fd = -1;
/* pp_sched_status(); */
{
Int i, now;
for (i = 1; i < VG_N_THREADS; i++) {
- if (vg_threads[i].status != VgTs_WaitCV)
+ if (VG_(threads)[i].status != VgTs_WaitCV)
continue;
- if (vg_threads[i].awaken_at == 0xFFFFFFFF /* no timeout */)
+ if (VG_(threads)[i].awaken_at == 0xFFFFFFFF /* no timeout */)
continue;
now = VG_(read_millisecond_timer)();
- if (now >= vg_threads[i].awaken_at) {
+ if (now >= VG_(threads)[i].awaken_at) {
do_pthread_cond_timedwait_TIMEOUT(i);
}
}
while (True) {
tid_next++;
if (tid_next >= VG_N_THREADS) tid_next = 1;
- if (vg_threads[tid_next].status == VgTs_WaitFD
- || vg_threads[tid_next].status == VgTs_Sleeping
- || vg_threads[tid_next].status == VgTs_WaitSIG
- || (vg_threads[tid_next].status == VgTs_WaitCV
- && vg_threads[tid_next].awaken_at != 0xFFFFFFFF))
+ if (VG_(threads)[tid_next].status == VgTs_WaitFD
+ || VG_(threads)[tid_next].status == VgTs_Sleeping
+ || VG_(threads)[tid_next].status == VgTs_WaitSIG
+ || (VG_(threads)[tid_next].status == VgTs_WaitCV
+ && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
n_in_bounded_wait ++;
- if (vg_threads[tid_next].status == VgTs_Runnable)
+ if (VG_(threads)[tid_next].status == VgTs_Runnable)
break; /* We can run this one. */
if (tid_next == tid)
break; /* been all the way round */
}
tid = tid_next;
- if (vg_threads[tid].status == VgTs_Runnable) {
+ if (VG_(threads)[tid].status == VgTs_Runnable) {
/* Found a suitable candidate. Fall out of this loop, so
we can advance to stage 2 of the scheduler: actually
running the thread. */
dispatch_ctr_SAVED = VG_(dispatch_ctr);
/* paranoia ... */
- vg_assert(vg_threads[tid].tid == tid);
+ vg_assert(VG_(threads)[tid].tid == tid);
/* Actually run thread tid. */
while (True) {
# if 0
if (VG_(bbs_done) > 31700000 + 0) {
dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
- VG_(translate)(&vg_threads[tid], vg_threads[tid].m_eip,
+ VG_(translate)(&VG_(threads)[tid], VG_(threads)[tid].m_eip,
NULL,NULL,NULL);
}
- vg_assert(vg_threads[tid].m_eip != 0);
+ vg_assert(VG_(threads)[tid].m_eip != 0);
# endif
trc = run_thread_for_a_while ( tid );
# if 0
- if (0 == vg_threads[tid].m_eip) {
+ if (0 == VG_(threads)[tid].m_eip) {
VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
- vg_assert(0 != vg_threads[tid].m_eip);
+ vg_assert(0 != VG_(threads)[tid].m_eip);
}
# endif
/* Trivial event. Miss in the fast-cache. Do a full
lookup for it. */
trans_addr
- = VG_(search_transtab) ( vg_threads[tid].m_eip );
+ = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
if (trans_addr == (Addr)0) {
/* Not found; we need to request a translation. */
- create_translation_for( tid, vg_threads[tid].m_eip );
- trans_addr = VG_(search_transtab) ( vg_threads[tid].m_eip );
+ create_translation_for( tid, VG_(threads)[tid].m_eip );
+ trans_addr = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
if (trans_addr == (Addr)0)
VG_(panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
}
if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
Bool done;
/* VG_(printf)("request 0x%x\n",
- *(UInt*)(vg_threads[tid].m_eax)); */
+ *(UInt*)(VG_(threads)[tid].m_eax)); */
done = maybe_do_trivial_clientreq(tid);
if (done) {
/* The request is done. We try and continue with the
same thread if still runnable. If not, go back to
Stage 1 to select a new thread to run. */
- if (vg_threads[tid].status == VgTs_Runnable)
+ if (VG_(threads)[tid].status == VgTs_Runnable)
continue; /* with this thread */
else
goto stage1;
to exit. */
# if 0
{ UInt* esp; Int i;
- esp=(UInt*)vg_threads[tid].m_esp;
+ esp=(UInt*)VG_(threads)[tid].m_esp;
VG_(printf)("\nBEFORE\n");
for (i = 10; i >= -10; i--)
VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
}
# endif
- if (vg_threads[tid].m_eax == __NR_exit)
+ if (VG_(threads)[tid].m_eax == __NR_exit)
return VgSrc_ExitSyscall;
sched_do_syscall(tid);
# if 0
{ UInt* esp; Int i;
- esp=(UInt*)vg_threads[tid].m_esp;
+ esp=(UInt*)VG_(threads)[tid].m_esp;
VG_(printf)("AFTER\n");
for (i = 10; i >= -10; i--)
VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
}
# endif
- if (vg_threads[tid].status == VgTs_Runnable)
+ if (VG_(threads)[tid].status == VgTs_Runnable)
continue; /* with this thread */
else
goto stage1;
*/
/* The thread's %EAX points at an arg block, the first
word of which is the request code. */
- request_code = ((UInt*)(vg_threads[tid].m_eax))[0];
+ request_code = ((UInt*)(VG_(threads)[tid].m_eax))[0];
if (0) {
VG_(sprintf)(msg_buf, "request 0x%x", request_code );
print_sched_event(tid, msg_buf);
throwing away the result. */
VG_(printf)(
"======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
- VG_(translate)( &vg_threads[tid], vg_threads[tid].m_eip, NULL, NULL, NULL );
+ VG_(translate)( &VG_(threads)[tid],
+ VG_(threads)[tid].m_eip, NULL, NULL, NULL );
VG_(printf)("\n");
VG_(printf)(
"======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
typedef unsigned long int pthread_t;
*/
-/* Write a value to the client's %EDX (request return value register)
- and set the shadow to indicate it is defined. */
-#define SET_EDX(zztid, zzval) \
- do { vg_threads[zztid].m_edx = (zzval); \
- vg_threads[zztid].sh_edx = VGM_WORD_VALID; \
- } while (0)
-
/* -----------------------------------------------------------
Thread CREATION, JOINAGE and CANCELLATION.
static
void cleanup_after_thread_exited ( ThreadId tid )
{
- vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status == VgTs_Empty);
+ vg_assert(VG_(is_valid_or_empty_tid)(tid));
+ vg_assert(VG_(threads)[tid].status == VgTs_Empty);
/* Mark its stack no-access */
if (VG_(clo_instrument) && tid != 1)
- VGM_(make_noaccess)( vg_threads[tid].stack_base,
- vg_threads[tid].stack_size );
+ VGM_(make_noaccess)( VG_(threads)[tid].stack_base,
+ VG_(threads)[tid].stack_size );
/* Forget about any pending signals directed specifically at this
- thread. */
- VG_(notify_signal_machinery_of_thread_exit)( tid );
-
- /* Get rid of signal handlers specifically arranged for this
- thread. */
- VG_(update_sigstate_following_WaitSIG_change)();
+ thread, and get rid of signal handlers specifically arranged for
+ this thread. */
+ VG_(handle_SCSS_change)( False /* lazy update */ );
}
Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status != VgTs_Empty);
+ vg_assert(VG_(threads)[tid].status != VgTs_Empty);
if (!VG_(is_valid_tid)(tid_cancellee)
- || vg_threads[tid_cancellee].status == VgTs_Empty) {
+ || VG_(threads)[tid_cancellee].status == VgTs_Empty) {
SET_EDX(tid, ESRCH);
return;
}
VG_(sprintf)(msg_buf, "cancelled by %d", tid);
print_sched_event(tid_cancellee, msg_buf);
}
- vg_threads[tid_cancellee].m_eax = (UInt)PTHREAD_CANCELED;
- vg_threads[tid_cancellee].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
- vg_threads[tid_cancellee].status = VgTs_Runnable;
+ VG_(threads)[tid_cancellee].m_eax = (UInt)PTHREAD_CANCELED;
+ VG_(threads)[tid_cancellee].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
+ VG_(threads)[tid_cancellee].status = VgTs_Runnable;
/* We return with success (0). */
SET_EDX(tid, 0);
VG_(sprintf)(msg_buf, "exiting with %p", retval);
print_sched_event(tid, msg_buf);
}
- vg_threads[tid].m_eax = (UInt)retval;
- vg_threads[tid].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].m_eax = (UInt)retval;
+ VG_(threads)[tid].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
+ VG_(threads)[tid].status = VgTs_Runnable;
}
/* Mark it as not in use. Leave the stack in place so the next
user of this slot doesn't reallocate it. */
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status != VgTs_Empty);
+ vg_assert(VG_(threads)[tid].status != VgTs_Empty);
- vg_threads[tid].retval = retval;
+ VG_(threads)[tid].retval = retval;
- if (vg_threads[tid].joiner == VG_INVALID_THREADID) {
+ if (VG_(threads)[tid].joiner == VG_INVALID_THREADID) {
/* No one has yet done a join on me */
- vg_threads[tid].status = VgTs_WaitJoiner;
+ VG_(threads)[tid].status = VgTs_WaitJoiner;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
"root fn returns, waiting for a call pthread_join(%d)",
%EAX -- in order to extract the 2nd param of its pthread_join
call. TODO: free properly the slot (also below).
*/
- jnr = vg_threads[tid].joiner;
+ jnr = VG_(threads)[tid].joiner;
vg_assert(VG_(is_valid_tid)(jnr));
- vg_assert(vg_threads[jnr].status == VgTs_WaitJoinee);
- jnr_args = (UInt*)vg_threads[jnr].m_eax;
+ vg_assert(VG_(threads)[jnr].status == VgTs_WaitJoinee);
+ jnr_args = (UInt*)VG_(threads)[jnr].m_eax;
jnr_thread_return = (void**)(jnr_args[2]);
if (jnr_thread_return != NULL)
- *jnr_thread_return = vg_threads[tid].retval;
+ *jnr_thread_return = VG_(threads)[tid].retval;
SET_EDX(jnr, 0); /* success */
- vg_threads[jnr].status = VgTs_Runnable;
- vg_threads[tid].status = VgTs_Empty; /* bye! */
+ VG_(threads)[jnr].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Empty; /* bye! */
cleanup_after_thread_exited ( tid );
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
/* jee, the joinee, is the thread specified as an arg in thread
tid's call to pthread_join. So tid is the join-er. */
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status == VgTs_Runnable);
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
if (jee == tid) {
SET_EDX(tid, EDEADLK); /* libc constant, not a kernel one */
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
return;
}
if (jee < 0
|| jee >= VG_N_THREADS
- || vg_threads[jee].status == VgTs_Empty) {
+ || VG_(threads)[jee].status == VgTs_Empty) {
/* Invalid thread to join to. */
SET_EDX(tid, EINVAL);
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
return;
}
- if (vg_threads[jee].joiner != VG_INVALID_THREADID) {
+ if (VG_(threads)[jee].joiner != VG_INVALID_THREADID) {
/* Someone already did join on this thread */
SET_EDX(tid, EINVAL);
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
return;
}
- /* if (vg_threads[jee].detached) ... */
+ /* if (VG_(threads)[jee].detached) ... */
/* Perhaps the joinee has already finished? If so return
immediately with its return code, and free up the slot. TODO:
free it properly (also above). */
- if (vg_threads[jee].status == VgTs_WaitJoiner) {
- vg_assert(vg_threads[jee].joiner == VG_INVALID_THREADID);
+ if (VG_(threads)[jee].status == VgTs_WaitJoiner) {
+ vg_assert(VG_(threads)[jee].joiner == VG_INVALID_THREADID);
SET_EDX(tid, 0); /* success */
if (thread_return != NULL) {
- *thread_return = vg_threads[jee].retval;
+ *thread_return = VG_(threads)[jee].retval;
/* Not really right, since it makes the thread's return value
appear to be defined even if it isn't. */
if (VG_(clo_instrument))
VGM_(make_readable)( (Addr)thread_return, sizeof(void*) );
}
- vg_threads[tid].status = VgTs_Runnable;
- vg_threads[jee].status = VgTs_Empty; /* bye! */
+ VG_(threads)[tid].status = VgTs_Runnable;
+ VG_(threads)[jee].status = VgTs_Empty; /* bye! */
cleanup_after_thread_exited ( jee );
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
}
/* Ok, so we'll have to wait on jee. */
- vg_threads[jee].joiner = tid;
- vg_threads[tid].status = VgTs_WaitJoinee;
+ VG_(threads)[jee].joiner = tid;
+ VG_(threads)[tid].status = VgTs_WaitJoinee;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
"blocking on call of pthread_join(%d)", jee );
/* Paranoia ... */
vg_assert(sizeof(pthread_t) == sizeof(UInt));
- vg_assert(vg_threads[parent_tid].status != VgTs_Empty);
+ vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
tid = vg_alloc_ThreadState();
/* If we've created the main thread's tid, we're in deep trouble :) */
vg_assert(tid != 1);
- vg_assert(VG_(is_valid_tid)(tid));
+ vg_assert(VG_(is_valid_or_empty_tid)(tid));
/* Copy the parent's CPU state into the child's, in a roundabout
way (via baseBlock). */
is inadequate. */
new_stk_szb = VG_PTHREAD_STACK_MIN;
- if (new_stk_szb > vg_threads[tid].stack_size) {
+ if (new_stk_szb > VG_(threads)[tid].stack_size) {
/* Again, for good measure :) We definitely don't want to be
allocating a stack for the main thread. */
vg_assert(tid != 1);
/* for now, we don't handle the case of anything other than
assigning it for the first time. */
- vg_assert(vg_threads[tid].stack_size == 0);
- vg_assert(vg_threads[tid].stack_base == (Addr)NULL);
+ vg_assert(VG_(threads)[tid].stack_size == 0);
+ vg_assert(VG_(threads)[tid].stack_base == (Addr)NULL);
new_stack = (Addr)VG_(get_memory_from_mmap)( new_stk_szb );
- vg_threads[tid].stack_base = new_stack;
- vg_threads[tid].stack_size = new_stk_szb;
- vg_threads[tid].stack_highest_word
+ VG_(threads)[tid].stack_base = new_stack;
+ VG_(threads)[tid].stack_size = new_stk_szb;
+ VG_(threads)[tid].stack_highest_word
= new_stack + new_stk_szb
- VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
}
- vg_threads[tid].m_esp
- = vg_threads[tid].stack_base
- + vg_threads[tid].stack_size
+ VG_(threads)[tid].m_esp
+ = VG_(threads)[tid].stack_base
+ + VG_(threads)[tid].stack_size
- VG_AR_CLIENT_STACKBASE_REDZONE_SZB;
if (VG_(clo_instrument))
- VGM_(make_noaccess)( vg_threads[tid].m_esp,
+ VGM_(make_noaccess)( VG_(threads)[tid].m_esp,
VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
/* push arg */
- vg_threads[tid].m_esp -= 4;
- * (UInt*)(vg_threads[tid].m_esp) = (UInt)arg;
+ VG_(threads)[tid].m_esp -= 4;
+ * (UInt*)(VG_(threads)[tid].m_esp) = (UInt)arg;
/* push (magical) return address */
- vg_threads[tid].m_esp -= 4;
- * (UInt*)(vg_threads[tid].m_esp) = (UInt)VG_(pthreadreturn_bogusRA);
+ VG_(threads)[tid].m_esp -= 4;
+ * (UInt*)(VG_(threads)[tid].m_esp) = (UInt)VG_(pthreadreturn_bogusRA);
if (VG_(clo_instrument))
- VGM_(make_readable)( vg_threads[tid].m_esp, 2 * 4 );
+ VGM_(make_readable)( VG_(threads)[tid].m_esp, 2 * 4 );
/* this is where we start */
- vg_threads[tid].m_eip = (UInt)start_routine;
+ VG_(threads)[tid].m_eip = (UInt)start_routine;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
if (VG_(clo_instrument))
VGM_(make_readable)( (Addr)thread, sizeof(pthread_t) );
- vg_threads[tid].associated_mx = NULL;
- vg_threads[tid].associated_cv = NULL;
- vg_threads[tid].joiner = VG_INVALID_THREADID;
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].associated_mx = NULL;
+ VG_(threads)[tid].associated_cv = NULL;
+ VG_(threads)[tid].joiner = VG_INVALID_THREADID;
+ VG_(threads)[tid].status = VgTs_Runnable;
for (i = 0; i < VG_N_THREAD_KEYS; i++)
- vg_threads[tid].specifics[i] = NULL;
+ VG_(threads)[tid].specifics[i] = NULL;
- /* We inherit our parent's signal mask. (?!) */
- vg_threads[tid].sig_mask = vg_threads[parent_tid].sig_mask;
- VG_(ksigemptyset)(&vg_threads[i].sigs_waited_for);
+ /* We inherit our parent's signal mask. */
+ VG_(threads)[tid].sig_mask = VG_(threads)[parent_tid].sig_mask;
+ VG_(ksigemptyset)(&VG_(threads)[i].sigs_waited_for);
/* return zero */
SET_EDX(parent_tid, 0); /* success */
/* Find some arbitrary thread waiting on this mutex, and make it
runnable. If none are waiting, mark the mutex as not held. */
for (i = 1; i < VG_N_THREADS; i++) {
- if (vg_threads[i].status == VgTs_Empty)
+ if (VG_(threads)[i].status == VgTs_Empty)
continue;
- if (vg_threads[i].status == VgTs_WaitMX
- && vg_threads[i].associated_mx == mutex)
+ if (VG_(threads)[i].status == VgTs_WaitMX
+ && VG_(threads)[i].associated_mx == mutex)
break;
}
/* Notionally transfer the hold to thread i, whose
pthread_mutex_lock() call now returns with 0 (success). */
/* The .count is already == 1. */
- vg_assert(vg_threads[i].associated_mx == mutex);
+ vg_assert(VG_(threads)[i].associated_mx == mutex);
mutex->__m_owner = (_pthread_descr)i;
- vg_threads[i].status = VgTs_Runnable;
- vg_threads[i].associated_mx = NULL;
+ VG_(threads)[i].status = VgTs_Runnable;
+ VG_(threads)[i].associated_mx = NULL;
/* m_edx already holds pth_mx_lock() success (0) */
if (VG_(clo_trace_pthread_level) >= 1) {
/* Paranoia ... */
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
/* POSIX doesn't mandate this, but for sanity ... */
if (mutex == NULL) {
/* caller is polling; so return immediately. */
SET_EDX(tid, EBUSY);
} else {
- vg_threads[tid].status = VgTs_WaitMX;
- vg_threads[tid].associated_mx = mutex;
+ VG_(threads)[tid].status = VgTs_WaitMX;
+ VG_(threads)[tid].associated_mx = mutex;
SET_EDX(tid, 0); /* pth_mx_lock success value */
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
/* We get it! [for the first time]. */
mutex->__m_count = 1;
mutex->__m_owner = (_pthread_descr)tid;
- vg_assert(vg_threads[tid].associated_mx == NULL);
+ vg_assert(VG_(threads)[tid].associated_mx == NULL);
/* return 0 (success). */
SET_EDX(tid, 0);
}
/* Paranoia ... */
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (mutex == NULL) {
SET_EDX(tid, EINVAL);
pthread_cond_t* cv;
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_WaitCV
- && vg_threads[tid].awaken_at != 0xFFFFFFFF);
- mx = vg_threads[tid].associated_mx;
+ && VG_(threads)[tid].status == VgTs_WaitCV
+ && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
+ mx = VG_(threads)[tid].associated_mx;
vg_assert(mx != NULL);
- cv = vg_threads[tid].associated_cv;
+ cv = VG_(threads)[tid].associated_cv;
vg_assert(cv != NULL);
if (mx->__m_owner == VG_INVALID_THREADID) {
/* Currently unheld; hand it out to thread tid. */
vg_assert(mx->__m_count == 0);
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
- vg_threads[tid].associated_cv = NULL;
- vg_threads[tid].associated_mx = NULL;
+ VG_(threads)[tid].associated_cv = NULL;
+ VG_(threads)[tid].associated_mx = NULL;
mx->__m_owner = (_pthread_descr)tid;
mx->__m_count = 1;
} else {
/* Currently held. Make thread tid be blocked on it. */
vg_assert(mx->__m_count > 0);
- vg_threads[tid].status = VgTs_WaitMX;
+ VG_(threads)[tid].status = VgTs_WaitMX;
SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
- vg_threads[tid].associated_cv = NULL;
- vg_threads[tid].associated_mx = mx;
+ VG_(threads)[tid].associated_cv = NULL;
+ VG_(threads)[tid].associated_mx = mx;
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf,
"pthread_cond_timedwai cv %p: TIMEOUT -> BLOCK for mx %p",
/* Find a thread waiting on this CV. */
for (i = 1; i < VG_N_THREADS; i++) {
- if (vg_threads[i].status == VgTs_Empty)
+ if (VG_(threads)[i].status == VgTs_Empty)
continue;
- if (vg_threads[i].status == VgTs_WaitCV
- && vg_threads[i].associated_cv == cond)
+ if (VG_(threads)[i].status == VgTs_WaitCV
+ && VG_(threads)[i].associated_cv == cond)
break;
}
vg_assert(i <= VG_N_THREADS);
return;
}
- mx = vg_threads[i].associated_mx;
+ mx = VG_(threads)[i].associated_mx;
vg_assert(mx != NULL);
if (mx->__m_owner == VG_INVALID_THREADID) {
/* Currently unheld; hand it out to thread i. */
vg_assert(mx->__m_count == 0);
- vg_threads[i].status = VgTs_Runnable;
- vg_threads[i].associated_cv = NULL;
- vg_threads[i].associated_mx = NULL;
+ VG_(threads)[i].status = VgTs_Runnable;
+ VG_(threads)[i].associated_cv = NULL;
+ VG_(threads)[i].associated_mx = NULL;
mx->__m_owner = (_pthread_descr)i;
mx->__m_count = 1;
/* .m_edx already holds pth_cond_wait success value (0) */
} else {
/* Currently held. Make thread i be blocked on it. */
vg_assert(mx->__m_count > 0);
- vg_threads[i].status = VgTs_WaitMX;
- vg_threads[i].associated_cv = NULL;
- vg_threads[i].associated_mx = mx;
+ VG_(threads)[i].status = VgTs_WaitMX;
+ VG_(threads)[i].associated_cv = NULL;
+ VG_(threads)[i].associated_mx = mx;
SET_EDX(i, 0); /* pth_cond_wait success value */
if (VG_(clo_trace_pthread_level) >= 1) {
/* Paranoia ... */
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (mutex == NULL || cond == NULL) {
SET_EDX(tid, EINVAL);
}
/* Queue ourselves on the condition. */
- vg_threads[tid].status = VgTs_WaitCV;
- vg_threads[tid].associated_cv = cond;
- vg_threads[tid].associated_mx = mutex;
- vg_threads[tid].awaken_at = ms_end;
+ VG_(threads)[tid].status = VgTs_WaitCV;
+ VG_(threads)[tid].associated_cv = cond;
+ VG_(threads)[tid].associated_mx = mutex;
+ VG_(threads)[tid].awaken_at = ms_end;
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf,
/* Paranoia ... */
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (cond == NULL) {
SET_EDX(tid, EINVAL);
vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
for (i = 0; i < VG_N_THREAD_KEYS; i++)
if (!vg_thread_keys[i].inuse)
}
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
SET_EDX(tid, EINVAL);
functions correctly. */
# if 1
for (tid = 1; tid < VG_N_THREADS; tid++) {
- if (vg_threads[tid].status != VgTs_Empty)
- vg_threads[tid].specifics[key] = NULL;
+ if (VG_(threads)[tid].status != VgTs_Empty)
+ VG_(threads)[tid].specifics[key] = NULL;
}
# endif
}
}
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
SET_EDX(tid, (UInt)NULL);
return;
}
- SET_EDX(tid, (UInt)vg_threads[tid].specifics[key]);
+ SET_EDX(tid, (UInt)VG_(threads)[tid].specifics[key]);
}
}
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
SET_EDX(tid, EINVAL);
return;
}
- vg_threads[tid].specifics[key] = pointer;
+ VG_(threads)[tid].specifics[key] = pointer;
SET_EDX(tid, 0);
}
------------------------------------------------ */
/* See comment in vg_libthread.c:pthread_sigmask() regarding
- deliberate confusion of types sigset_t and vki_sigset_t. Also re
- meaning of the mashed_how value. Return 0 for OK and 1 for some
- kind of addressing error, which the vg_libpthread.c routine turns
- into return values 0 and EFAULT respectively. */
+ deliberate confusion of types sigset_t and vki_sigset_t. Return 0
+ for OK and 1 for some kind of addressing error, which the
+ vg_libpthread.c routine turns into return values 0 and EFAULT
+ respectively. */
static
void do_pthread_sigmask ( ThreadId tid,
- Int mashed_how,
+ Int vki_how,
vki_ksigset_t* newmask,
vki_ksigset_t* oldmask )
{
Char msg_buf[100];
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf,
- "pthread_sigmask m_how %d, newmask %p, oldmask %p",
- mashed_how, newmask, oldmask );
+ "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
+ vki_how, newmask, oldmask );
print_pthread_event(tid, msg_buf);
}
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (VG_(clo_instrument)) {
/* TODO check newmask/oldmask are addressible/defined */
}
- if (oldmask != NULL) {
- *oldmask = vg_threads[tid].sig_mask;
- if (VG_(clo_instrument)) {
- VGM_(make_readable)( (Addr)oldmask, sizeof(vki_ksigset_t) );
- }
- }
-
- switch (mashed_how) {
- case 1: /* SIG_SETMASK */
- vg_threads[tid].sig_mask = *newmask;
- break;
- case 2: /* SIG_BLOCK */
- VG_(ksigaddset_from_set)( & vg_threads[tid].sig_mask, newmask);
- break;
- case 3: /* SIG_UNBLOCK */
- VG_(ksigdelset_from_set)( & vg_threads[tid].sig_mask, newmask);
- break;
- default:
- VG_(panic)("do_pthread_sigmask: invalid mashed_how");
- /*NOTREACHED*/
- break;
- }
+ VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
+ /* Success. */
SET_EDX(tid, 0);
}
vki_ksigset_t* set,
Int* sig )
{
- Char msg_buf[100];
+ vki_ksigset_t irrelevant_sigmask;
+ Char msg_buf[100];
+
if (VG_(clo_trace_signals) || VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
"suspend due to sigwait(): set %p, sig %p",
}
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
+
+ /* Change SCSS */
+ VG_(threads)[tid].sigs_waited_for = *set;
+ VG_(threads)[tid].status = VgTs_WaitSIG;
- vg_threads[tid].sigs_waited_for = *set;
- vg_threads[tid].status = VgTs_WaitSIG;
- VG_(update_sigstate_following_WaitSIG_change)();
+ VG_(block_all_host_signals)( &irrelevant_sigmask );
+ VG_(handle_SCSS_change)( False /* lazy update */ );
+}
+
+
+static
+void do_pthread_kill ( ThreadId tid, /* me */
+ ThreadId thread, /* thread to signal */
+ Int sig )
+{
+ Char msg_buf[100];
+
+ if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
+ VG_(sprintf)(msg_buf,
+ "pthread_kill thread %d, signo %d",
+ thread, sig );
+ print_pthread_event(tid, msg_buf);
+ }
+
+ vg_assert(VG_(is_valid_tid)(tid)
+ && VG_(threads)[tid].status == VgTs_Runnable);
+
+ if (!VG_(is_valid_tid)(tid)) {
+ SET_EDX(tid, -VKI_ESRCH);
+ return;
+ }
+
+ if (sig < 1 || sig > VKI_KNSIG) {
+ SET_EDX(tid, -VKI_EINVAL);
+ return;
+ }
+
+ VG_(send_signal_to_thread)( thread, sig );
+ SET_EDX(tid, 0);
}
static
void do_nontrivial_clientreq ( ThreadId tid )
{
- UInt* arg = (UInt*)(vg_threads[tid].m_eax);
+ UInt* arg = (UInt*)(VG_(threads)[tid].m_eax);
UInt req_no = arg[0];
switch (req_no) {
(Int*)(arg[2]) );
break;
+ case VG_USERREQ__PTHREAD_KILL:
+ do_pthread_kill ( tid, arg[1], arg[2] );
+ break;
+
+
case VG_USERREQ__MAKE_NOACCESS:
case VG_USERREQ__MAKE_WRITABLE:
case VG_USERREQ__MAKE_READABLE:
case VG_USERREQ__DO_LEAK_CHECK:
SET_EDX(
tid,
- VG_(handle_client_request) ( &vg_threads[tid], arg )
+ VG_(handle_client_request) ( &VG_(threads)[tid], arg )
);
break;
/* VG_(printf)("scheduler_sanity\n"); */
for (i = 1; i < VG_N_THREADS; i++) {
- mx = vg_threads[i].associated_mx;
- cv = vg_threads[i].associated_cv;
- if (vg_threads[i].status == VgTs_WaitMX) {
+ mx = VG_(threads)[i].associated_mx;
+ cv = VG_(threads)[i].associated_cv;
+ if (VG_(threads)[i].status == VgTs_WaitMX) {
/* If we're waiting on a MX: (1) the mx is not null, (2, 3)
it's actually held by someone, since otherwise this thread
is deadlocked, (4) the mutex's owner is not us, since
/* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__m_owner));
/* 4 */ vg_assert(i != (ThreadId)mx->__m_owner);
} else
- if (vg_threads[i].status == VgTs_WaitCV) {
+ if (VG_(threads)[i].status == VgTs_WaitCV) {
vg_assert(cv != NULL);
vg_assert(mx != NULL);
} else {
/* vg_assert(mx == NULL); */
}
- if (vg_threads[i].status != VgTs_Empty) {
+ if (VG_(threads)[i].status != VgTs_Empty) {
Int
- stack_used = (Addr)vg_threads[i].stack_highest_word
- - (Addr)vg_threads[i].m_esp;
+ stack_used = (Addr)VG_(threads)[i].stack_highest_word
+ - (Addr)VG_(threads)[i].m_esp;
if (i > 1 /* not the root thread */
&& stack_used
>= (VG_PTHREAD_STACK_MIN - 1000 /* paranoia */)) {
VG_(exit)(1);
}
- if (vg_threads[i].status == VgTs_WaitSIG) {
+ if (VG_(threads)[i].status == VgTs_WaitSIG) {
vg_assert( ! VG_(kisemptysigset)(
- & vg_threads[i].sigs_waited_for) );
+ & VG_(threads)[i].sigs_waited_for) );
} else {
vg_assert( VG_(kisemptysigset)(
- & vg_threads[i].sigs_waited_for) );
+ & VG_(threads)[i].sigs_waited_for) );
}
}
#include "vg_unsafe.h"
#include "valgrind.h" /* for VALGRIND_MAGIC_SEQUENCE */
+/* Define to give more sanity checking for signals. */
+#define DEBUG_SIGNALS
+
+
+/* ---------------------------------------------------------------------
+ Forwards decls.
+ ------------------------------------------------------------------ */
+
+static void vg_oursignalhandler ( Int sigNo );
+
+
+/* ---------------------------------------------------------------------
+ HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
+ ------------------------------------------------------------------ */
+
/* ---------------------------------------------------------------------
Signal state for this process.
------------------------------------------------------------------ */
+
/* Base-ment of these arrays[VKI_KNSIG].
Valid signal numbers are 1 .. VKI_KNSIG inclusive.
and entry [0] is not used.
*/
-/* For each signal, the current action. Either:
-
- -- VG_SH_NOHANDLER if the client hasn't asked to handle the signal,
- and we havent surreptitiously installed any handler ourselves.
- -- VG_SH_FAKEHANDLER if the client hasn't asked to handle the signal
- directly, but has so indirectly via a sigwait() request. In this
- case we may need to install our own handler to catch signals which
- the sigwait-mask for some thread will accept, but for which the
- client hasn't actually installed a handler. These "fake" handlers
- are invisible to the client, so we need to be able to distinguish
- this case so that we can fake a suitable response if the client
- should enquire about the state of this signal using sigaction.
+/* -----------------------------------------------------
+ Static client signal state (SCSS). This is the state
+ that the client thinks it has the kernel in.
+ SCSS records verbatim the client's settings. These
+ are mashed around only when SKSS is calculated from it.
+ -------------------------------------------------- */
- -- Otherwise, the client has installed a signal handler, and this
- is the pointer to it.
+typedef
+ struct {
+ void* scss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
+ client's handler */
+ UInt scss_flags;
+ vki_ksigset_t scss_mask;
+ void* scss_restorer; /* god knows; we ignore it. */
+ }
+ SCSS_Per_Signal;
- Invariant: we never expect to receive a signal for which the
- vg_sighandler[] entry is VG_SH_NOHANDLER. If it is VG_SH_FAKEHANDLER
- we know that we should look for a thread in VgTs_WaitSIG state to
- release. Otherwise, we find a thread capable of handling this
- signal and run the specified handler on it.
+typedef
+ struct {
+ SCSS_Per_Signal scss_per_sig[1+VKI_KNSIG];
+ /* Additional elements to SCSS not stored here:
+ - for each thread, the thread's blocking mask
+ - for each thread in WaitSIG, the set of waited-on sigs
+ */
+ }
+ SCSS;
+
+static SCSS vg_scss;
+
+
+/* -----------------------------------------------------
+ Static kernel signal state (SKSS). This is the state
+ that we have the kernel in. It is computed from SCSS.
+ -------------------------------------------------- */
+
+/* Let's do:
+ sigprocmask assigns to all thread masks
+ so that at least everything is always consistent
+ Flags:
+ SA_NOCLDSTOP -- passed to kernel
+ SA_ONESHOT or SA_RESETHAND -- required; abort if not set
+ SA_RESTART -- we observe this but set our handlers always to restart
+ SA_NOMASK or SA_NODEFER -- required to not be set; abort if set
+ SA_ONSTACK -- currently not supported; abort if set.
*/
-#define VG_SH_NOHANDLER ((void*)0)
-#define VG_SH_FAKEHANDLER ((void*)1)
-void* vg_sighandler[1+VKI_KNSIG];
+typedef
+ struct {
+ void* skss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN
+ or ptr to our handler */
+ UInt skss_flags;
+ /* There is no skss_mask, since we know that we will always ask
+ for all signals to be blocked in our one-and-only
+ sighandler. */
+ /* Also there is no skss_restorer. */
+ }
+ SKSS_Per_Signal;
-/* For each signal, either:
- -- VG_SP_SIGIDLE if not pending and not running
- -- Handler address if pending AND real handler
- -- VG_SH_FAKEHANDLER if pending for sigwait
- -- VG_SP_SIGRUNNING if the handler is running and hasn't (returned or
- unblocked the signal using sigprocmask following a longjmp out
- of the handler).
- */
-#define VG_SP_SIGIDLE ((void*)0)
-#define VG_SP_SIGRUNNING ((void*)2)
+typedef
+ struct {
+ SKSS_Per_Signal skss_per_sig[1+VKI_KNSIG];
+ vki_ksigset_t skss_sigmask; /* process' blocked signal mask */
+ }
+ SKSS;
+
+static SKSS vg_skss;
+
+
+/* -----------------------------------------------------
+ Dynamic client signal state (DCSS). This holds transient
+ information about state of client signals.
+ -------------------------------------------------- */
+
+typedef
+ struct {
+ /* True iff a signal has been received but not yet passed to
+ client. */
+ Bool dcss_sigpending[1+VKI_KNSIG];
+ /* If sigpending[] is True, has meaning:
+ VG_INVALID_THREADID -- to be passed to any suitable thread
+ other -- to be passed only to the specified thread. */
+ ThreadId dcss_destthread[1+VKI_KNSIG];
+ }
+ DCSS;
+
+static DCSS vg_dcss;
+
+
+/* ---------------------------------------------------------------------
+ Compute the SKSS required by the current SCSS.
+ ------------------------------------------------------------------ */
+
+static
+void pp_SKSS ( void )
+{
+ Int sig;
+ VG_(printf)("\n\nSKSS:\n");
+ for (sig = 1; sig <= VKI_KNSIG; sig++) {
+ VG_(printf)("sig %d: handler 0x%x, flags 0x%x\n", sig,
+ vg_skss.skss_per_sig[sig].skss_handler,
+ vg_skss.skss_per_sig[sig].skss_flags );
+
+ }
+ VG_(printf)("Global sigmask (63 .. 0) = 0x%x 0x%x\n",
+ vg_skss.skss_sigmask.ws[1],
+ vg_skss.skss_sigmask.ws[0] );
+}
+
+static __inline__
+Bool is_WaitSIGd_by_any_thread ( Int sig )
+{
+ ThreadId tid;
+ for (tid = 1; tid < VG_N_THREADS; tid++) {
+ if (VG_(threads)[tid].status != VgTs_WaitSIG)
+ continue;
+ if (VG_(ksigismember)( &VG_(threads)[tid].sigs_waited_for, sig ))
+ return True;
+ }
+ return False;
+}
+
+static __inline__
+Bool is_blocked_by_all_threads ( Int sig )
+{
+ ThreadId tid;
+ for (tid = 1; tid < VG_N_THREADS; tid++) {
+ if (VG_(threads)[tid].status == VgTs_Empty)
+ continue;
+ if (! VG_(ksigismember)( &VG_(threads)[tid].sig_mask, sig ))
+ return False;
+ }
+ return True;
+}
+
+
+/* This is the core, clever bit. Computation is as follows:
+ For each signal
+ handler = if client has a handler, then our handler
+ else if is WaitSIG'd by any thread, then our handler
+ else if client is DFL, then DFL
+ else (client must be IGN) IGN
+
+ blocked = if is blocked by all threads and not WaitSIG'd by
+ any thread
+ then BLOCKED
+ else UNBLOCKED
+*/
static
-void* vg_sigpending[1+VKI_KNSIG];
+void calculate_SKSS_from_SCSS ( SKSS* dst )
+{
+ Int sig;
+ void* skss_handler;
+ void* scss_handler;
+ Bool iz_WaitSIGd_by_any_thread;
+ Bool iz_blocked_by_all_threads;
+ Bool skss_blocked;
+ UInt scss_flags;
+ UInt skss_flags;
+
+ VG_(ksigemptyset)( &dst->skss_sigmask );
+
+ for (sig = 1; sig <= VKI_KNSIG; sig++) {
+
+ /* Calculate kernel handler and blockedness for sig, as per rules
+ in above comment. */
+
+ iz_WaitSIGd_by_any_thread = is_WaitSIGd_by_any_thread(sig);
+ iz_blocked_by_all_threads = is_blocked_by_all_threads(sig);
+
+ scss_handler = vg_scss.scss_per_sig[sig].scss_handler;
+ scss_flags = vg_scss.scss_per_sig[sig].scss_flags;
+
+ /* Restorer */
+ /*
+ Doesn't seem like we can spin this one.
+ if (vg_scss.scss_per_sig[sig].scss_restorer != NULL)
+ VG_(unimplemented)
+ ("sigactions with non-NULL .sa_restorer field");
+ */
+
+ /* Handler */
+
+ if (scss_handler != VKI_SIG_DFL && scss_handler != VKI_SIG_IGN) {
+ skss_handler = &vg_oursignalhandler;
+ } else
+ if (iz_WaitSIGd_by_any_thread) {
+ skss_handler = &vg_oursignalhandler;
+ } else
+ if (scss_handler == VKI_SIG_DFL) {
+ skss_handler = VKI_SIG_DFL;
+ }
+ else {
+ vg_assert(scss_handler == VKI_SIG_IGN);
+ skss_handler = VKI_SIG_IGN;
+ }
+
+ /* Blockfulness */
+
+ skss_blocked
+ = iz_blocked_by_all_threads && !iz_WaitSIGd_by_any_thread;
+
+ /* Flags */
+
+ skss_flags = 0;
+ /* SA_NOCLDSTOP: pass to kernel */
+ if (scss_flags & VKI_SA_NOCLDSTOP)
+ skss_flags |= VKI_SA_NOCLDSTOP;
+ /* SA_ONESHOT: ignore client setting */
+ /*
+ if (!(scss_flags & VKI_SA_ONESHOT))
+ VG_(unimplemented)
+ ("sigactions without SA_ONESHOT");
+ vg_assert(scss_flags & VKI_SA_ONESHOT);
+ skss_flags |= VKI_SA_ONESHOT;
+ */
+ /* SA_RESTART: ignore client setting and set for us */
+ skss_flags |= VKI_SA_RESTART;
+ /* SA_NOMASK: not allowed */
+ /*
+ .. well, ignore it.
+ if (scss_flags & VKI_SA_NOMASK)
+ VG_(unimplemented)
+ ("sigactions with SA_NOMASK");
+ vg_assert(!(scss_flags & VKI_SA_NOMASK));
+ */
+ /* SA_ONSTACK: not allowed */
+ if (scss_flags & VKI_SA_ONSTACK)
+ VG_(unimplemented)
+ ("signals on an alternative stack (SA_ONSTACK)");
+ vg_assert(!(scss_flags & VKI_SA_ONSTACK));
+ /* ... but WE ask for on-stack ourselves ... */
+ skss_flags |= VKI_SA_ONSTACK;
+
+ /* Create SKSS entry for this signal. */
+
+ if (skss_blocked
+ && sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
+ VG_(ksigaddset)( &dst->skss_sigmask, sig );
+
+ dst->skss_per_sig[sig].skss_handler = skss_handler;
+ dst->skss_per_sig[sig].skss_flags = skss_flags;
+ }
+
+ /* Sanity checks. */
+ vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler
+ == VKI_SIG_DFL);
+ vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler
+ == VKI_SIG_DFL);
+ vg_assert(!VG_(ksigismember)( &dst->skss_sigmask, VKI_SIGKILL ));
+ vg_assert(!VG_(ksigismember)( &dst->skss_sigmask, VKI_SIGSTOP ));
+
+ if (0)
+ pp_SKSS();
+}
+
+
+/* ---------------------------------------------------------------------
+ After a possible SCSS change, update SKSS and the kernel itself.
+ ------------------------------------------------------------------ */
+
+/* IMPORTANT NOTE: to avoid race conditions, we must always enter here
+ with ALL KERNEL SIGNALS BLOCKED !
+*/
+void VG_(handle_SCSS_change) ( Bool force_update )
+{
+ Int res, sig;
+ SKSS skss_old;
+ vki_ksigaction ksa, ksa_old;
+
+# ifdef DEBUG_SIGNALS
+ vki_ksigset_t test_sigmask;
+ res = VG_(ksigprocmask)( VKI_SIG_SETMASK /*irrelevant*/,
+ NULL, &test_sigmask );
+ vg_assert(res == 0);
+ /* The kernel never says that SIGKILL or SIGSTOP are masked. It is
+ correct! So we fake it here for the purposes only of
+ assertion. */
+ VG_(ksigaddset)( &test_sigmask, VKI_SIGKILL );
+ VG_(ksigaddset)( &test_sigmask, VKI_SIGSTOP );
+ vg_assert(VG_(kisfullsigset)( &test_sigmask ));
+# endif
+
+ /* Remember old SKSS and calculate new one. */
+ skss_old = vg_skss;
+ calculate_SKSS_from_SCSS ( &vg_skss );
+
+ /* Compare the new SKSS entries vs the old ones, and update kernel
+ where they differ. */
+ for (sig = 1; sig <= VKI_KNSIG; sig++) {
+
+ /* Trying to do anything with SIGKILL is pointless; just ignore
+ it. */
+ if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
+ continue;
+
+ /* Aside: take the opportunity to clean up DCSS: forget about any
+ pending signals directed at dead threads. */
+ if (vg_dcss.dcss_sigpending[sig]
+ && vg_dcss.dcss_destthread[sig] != VG_INVALID_THREADID) {
+ ThreadId tid = vg_dcss.dcss_destthread[sig];
+ vg_assert(VG_(is_valid_or_empty_tid)(tid));
+ if (VG_(threads)[tid].status == VgTs_Empty) {
+ vg_dcss.dcss_sigpending[sig] = False;
+ vg_dcss.dcss_destthread[sig] = VG_INVALID_THREADID;
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "discarding pending signal %d due to thread %d exiting",
+ sig, tid );
+ }
+ }
+ /* End of the Aside. Now the Main Business. */
-/* For each signal, the thread id to which the signal should be
- delivered. This is only meaningful if the corresponding
- vg_sigpending entry actually points to a handler, ie, the signal
- is pending.
+ if (!force_update) {
+ if ((skss_old.skss_per_sig[sig].skss_handler
+ == vg_skss.skss_per_sig[sig].skss_handler)
+ && (skss_old.skss_per_sig[sig].skss_flags
+ == vg_skss.skss_per_sig[sig].skss_flags))
+ /* no difference */
+ continue;
+ }
+
+ ksa.ksa_handler = vg_skss.skss_per_sig[sig].skss_handler;
+ ksa.ksa_flags = vg_skss.skss_per_sig[sig].skss_flags;
+ vg_assert(ksa.ksa_flags & VKI_SA_ONSTACK);
+ VG_(ksigfillset)( &ksa.ksa_mask );
+ VG_(ksigdelset)( &ksa.ksa_mask, VKI_SIGKILL );
+ VG_(ksigdelset)( &ksa.ksa_mask, VKI_SIGSTOP );
+ ksa.ksa_restorer = NULL;
+
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "setting ksig %d to: hdlr 0x%x, flags 0x%x, "
+ "mask(63..0) 0x%x 0x%x",
+ sig, ksa.ksa_handler,
+ ksa.ksa_flags,
+ ksa.ksa_mask.ws[1],
+ ksa.ksa_mask.ws[0]
+ );
+
+ res = VG_(ksigaction)( sig, &ksa, &ksa_old );
+ vg_assert(res == 0);
+
+ /* Since we got the old sigaction more or less for free, might
+ as well extract the maximum sanity-check value from it. */
+ if (!force_update) {
+ vg_assert(ksa_old.ksa_handler
+ == skss_old.skss_per_sig[sig].skss_handler);
+ vg_assert(ksa_old.ksa_flags
+ == skss_old.skss_per_sig[sig].skss_flags);
+ vg_assert(ksa_old.ksa_restorer
+ == NULL);
+ VG_(ksigaddset)( &ksa_old.ksa_mask, VKI_SIGKILL );
+ VG_(ksigaddset)( &ksa_old.ksa_mask, VKI_SIGSTOP );
+ vg_assert(VG_(kisfullsigset)( &ksa_old.ksa_mask ));
+ }
+ }
+
+ /* Just set the new sigmask, even if it's no different from the
+ old, since we have to do this anyway, to unblock the host
+ signals. */
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "setting kmask(63..0) to 0x%x 0x%x",
+ vg_skss.skss_sigmask.ws[1],
+ vg_skss.skss_sigmask.ws[0]
+ );
+
+ VG_(restore_all_host_signals)( &vg_skss.skss_sigmask );
+}
+
+
+/* ---------------------------------------------------------------------
+ Update/query SCSS in accordance with client requests.
+ ------------------------------------------------------------------ */
+
+void VG_(do__NR_sigaction) ( ThreadId tid )
+{
+ Int signo;
+ vki_ksigaction* new_act;
+ vki_ksigaction* old_act;
+ vki_ksigset_t irrelevant_sigmask;
+
+ vg_assert(VG_(is_valid_tid)(tid));
+ signo = VG_(threads)[tid].m_ebx; /* int sigNo */
+ new_act = (vki_ksigaction*)(VG_(threads)[tid].m_ecx);
+ old_act = (vki_ksigaction*)(VG_(threads)[tid].m_edx);
+
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugExtraMsg,
+ "__NR_sigaction: tid %d, sigNo %d, "
+ "new 0x%x, old 0x%x, new flags 0x%x",
+ tid, signo, (UInt)new_act, (UInt)old_act,
+ (UInt)(new_act ? new_act->ksa_flags : 0) );
+
+ /* Rule out various error conditions. The aim is to ensure that if
+ when the call is passed to the kernel it will definitely
+ succeed. */
+
+ /* Reject out-of-range signal numbers. */
+ if (signo < 1 || signo > VKI_KNSIG) goto bad_signo;
+
+ /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
+ if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
+ && new_act
+ && new_act->ksa_handler != VKI_SIG_DFL)
+ goto bad_sigkill_or_sigstop;
+
+ /* If the client supplied non-NULL old_act, copy the relevant SCSS
+ entry into it. */
+ if (old_act) {
+ old_act->ksa_handler = vg_scss.scss_per_sig[signo].scss_handler;
+ old_act->ksa_flags = vg_scss.scss_per_sig[signo].scss_flags;
+ old_act->ksa_mask = vg_scss.scss_per_sig[signo].scss_mask;
+ old_act->ksa_restorer = vg_scss.scss_per_sig[signo].scss_restorer;
+ }
+
+ /* And now copy new SCSS entry from new_act. */
+ if (new_act) {
+ vg_scss.scss_per_sig[signo].scss_handler = new_act->ksa_handler;
+ vg_scss.scss_per_sig[signo].scss_flags = new_act->ksa_flags;
+ vg_scss.scss_per_sig[signo].scss_mask = new_act->ksa_mask;
+ vg_scss.scss_per_sig[signo].scss_restorer = new_act->ksa_restorer;
+ }
+
+ /* All happy bunnies ... */
+ if (new_act) {
+ VG_(block_all_host_signals)( &irrelevant_sigmask );
+ VG_(handle_SCSS_change)( False /* lazy update */ );
+ }
+ SET_EAX(tid, 0);
+ return;
+
+ bad_signo:
+ VG_(message)(Vg_UserMsg,
+ "Warning: bad signal number %d in __NR_sigaction.",
+ signo);
+ SET_EAX(tid, -VKI_EINVAL);
+ return;
+
+ bad_sigkill_or_sigstop:
+ VG_(message)(Vg_UserMsg,
+ "Warning: attempt to set %s handler in __NR_sigaction.",
+ signo == VKI_SIGKILL ? "SIGKILL" : "SIGSTOP" );
+
+ SET_EAX(tid, -VKI_EINVAL);
+ return;
+}
- In this case, the value VG_INVALID_THREADID indicates the signal is
- not directed at a specific thread and so should be delivered to any
- thread whose signal mask (ThreadState.sig_mask) field allows it.
- Any other value indicates that the signal should be delivered only
- to that specific thread, as some point in time when the thread has
- not blocked the signal. It remains pending until then. */
static
-ThreadId vg_sig_threadid[1+VKI_KNSIG];
+void do_sigprocmask_bitops ( Int vki_how,
+ vki_ksigset_t* orig_set,
+ vki_ksigset_t* modifier )
+{
+ switch (vki_how) {
+ case VKI_SIG_BLOCK:
+ VG_(ksigaddset_from_set)( orig_set, modifier );
+ break;
+ case VKI_SIG_UNBLOCK:
+ VG_(ksigdelset_from_set)( orig_set, modifier );
+ break;
+ case VKI_SIG_SETMASK:
+ *orig_set = *modifier;
+ break;
+ default:
+ VG_(panic)("do_sigprocmask_bitops");
+ break;
+ }
+}
+/* Handle blocking mask set/get uniformly for threads and process as a
+ whole. If tid==VG_INVALID_THREADID, this is really
+ __NR_sigprocmask, in which case we set the masks for all threads to
+ the "set" and return in "oldset" that from the root thread (1).
+ Otherwise, tid will denote a valid thread, in which case we just
+ set/get its mask.
-/* For each signal that the client installed a handler for (ie, for
- those for which the vg_sighandler entry is non-VG_SH_NOHANDLER and
- non-VG_SH_FAKEHANDLER), record whether or not the client asked for
- syscalls to be restartable (SA_RESTART) if interrupted by this
- signal. We need to consult this when a signal returns, if it
- should happen that the signal which we delivered has interrupted a
- system call. */
-static
-Bool vg_sig_sarestart[1+VKI_KNSIG];
+ Note that the thread signal masks are an implicit part of SCSS,
+ which is why this routine is allowed to mess with them.
+*/
+static
+void do_setmask ( ThreadId tid,
+ Int how,
+ vki_ksigset_t* newset,
+ vki_ksigset_t* oldset )
+{
+ vki_ksigset_t irrelevant_sigmask;
+
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "do_setmask: tid = %d (0 means ALL), how = %d (%s), set = %p",
+ tid,
+ how,
+ how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
+ how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
+ how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
+ newset
+ );
+
+ if (tid == VG_INVALID_THREADID) {
+ /* Behave as if __NR_sigprocmask. */
+ if (oldset) {
+ /* A bit fragile. Should do better here really. */
+ vg_assert(VG_(threads)[1].status != VgTs_Empty);
+ *oldset = VG_(threads)[1].sig_mask;
+ }
+ if (newset) {
+ ThreadId tidd;
+ for (tidd = 1; tidd < VG_N_THREADS; tidd++) {
+ if (VG_(threads)[tidd].status == VgTs_Empty)
+ continue;
+ do_sigprocmask_bitops (
+ how, &VG_(threads)[tidd].sig_mask, newset );
+ }
+ }
+ } else {
+ /* Just do this thread. */
+ vg_assert(VG_(is_valid_tid)(tid));
+ if (oldset)
+ *oldset = VG_(threads)[tid].sig_mask;
+ if (newset)
+ do_sigprocmask_bitops (
+ how, &VG_(threads)[tid].sig_mask, newset );
+ }
+
+ if (newset) {
+ VG_(block_all_host_signals)( &irrelevant_sigmask );
+ VG_(handle_SCSS_change)( False /* lazy update */ );
+ }
+}
+
+
+void VG_(do__NR_sigprocmask) ( ThreadId tid,
+ Int how,
+ vki_ksigset_t* set,
+ vki_ksigset_t* oldset )
+{
+ if (how == VKI_SIG_BLOCK || how == VKI_SIG_UNBLOCK
+ || how == VKI_SIG_SETMASK) {
+ vg_assert(VG_(is_valid_tid)(tid));
+ do_setmask ( VG_INVALID_THREADID, how, set, oldset );
+ /* Syscall returns 0 (success) to its thread. */
+ SET_EAX(tid, 0);
+ } else {
+ VG_(message)(Vg_DebugMsg,
+ "sigprocmask: unknown `how' field %d", how);
+ SET_EAX(tid, -VKI_EINVAL);
+ }
+}
+
+
+void VG_(do_pthread_sigmask_SCSS_upd) ( ThreadId tid,
+ Int how,
+ vki_ksigset_t* set,
+ vki_ksigset_t* oldset )
+{
+ /* Assume that how has been validated by caller. */
+ vg_assert(how == VKI_SIG_BLOCK || how == VKI_SIG_UNBLOCK
+ || how == VKI_SIG_SETMASK);
+ vg_assert(VG_(is_valid_tid)(tid));
+ do_setmask ( tid, how, set, oldset );
+ /* The request return code is set in do_pthread_sigmask */
+}
+
+
+void VG_(send_signal_to_thread) ( ThreadId thread, Int sig )
+{
+ Int res;
+ vg_assert(VG_(is_valid_tid)(thread));
+ vg_assert(sig >= 1 && sig <= VKI_KNSIG);
+
+ switch ((UInt)(vg_scss.scss_per_sig[sig].scss_handler)) {
+
+ case VKI_SIG_IGN:
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "send_signal %d to_thread %d: IGN, ignored", sig, thread );
+ break;
+
+ case VKI_SIG_DFL:
+ /* This is the tricky case. Since we don't handle default
+ actions, the simple thing is to send someone round to the
+ front door and signal there. Then the kernel will do
+ whatever it does with the default action. */
+ res = VG_(kill)( VG_(getpid)(), sig );
+ vg_assert(res == 0);
+ break;
+
+ default:
+ if (!vg_dcss.dcss_sigpending[sig]) {
+ vg_dcss.dcss_sigpending[sig] = True;
+ vg_dcss.dcss_destthread[sig] = thread;
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "send_signal %d to_thread %d: now pending", sig, thread );
+ } else {
+ if (vg_dcss.dcss_destthread[sig] == thread) {
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "send_signal %d to_thread %d: already pending ... "
+ "discarded", sig, thread );
+ } else {
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "send_signal %d to_thread %d: was pending for %d, "
+ "now pending for %d",
+ sig, thread, vg_dcss.dcss_destthread[sig], thread );
+ vg_dcss.dcss_destthread[sig] = thread;
+ }
+ }
+ }
+}
+/* ---------------------------------------------------------------------
+ LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
+ ------------------------------------------------------------------ */
+
/* ---------------------------------------------------------------------
Handy utilities to block/restore all host signals.
------------------------------------------------------------------ */
}
/* Restore the blocking mask using the supplied saved one. */
-void VG_(restore_host_signals) ( /* IN */ vki_ksigset_t* saved_mask )
+void VG_(restore_all_host_signals) ( /* IN */ vki_ksigset_t* saved_mask )
{
Int ret;
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
VgSigFrame* frame;
ThreadState* tst;
- tst = VG_(get_thread_state)(tid);
+ vg_assert(VG_(is_valid_tid)(tid));
+ tst = & VG_(threads)[tid];
esp = tst->m_esp;
esp -= sizeof(VgSigFrame);
/* Set the thread so it will next run the handler. */
tst->m_esp = esp;
- tst->m_eip = (Addr)vg_sigpending[sigNo];
+ tst->m_eip = (Addr)vg_scss.scss_per_sig[sigNo].scss_handler;
/* This thread needs to be marked runnable, but we leave that the
caller to do. */
VgSigFrame* frame;
ThreadState* tst;
- tst = VG_(get_thread_state)(tid);
+ vg_assert(VG_(is_valid_tid)(tid));
+ tst = & VG_(threads)[tid];
/* Correctly reestablish the frame base address. */
esp = tst->m_esp;
before the signal was delivered. */
sigNo = vg_pop_signal_frame(tid);
- /* You would have thought that the following assertion made sense
- here:
-
- vg_assert(vg_sigpending[sigNo] == VG_SP_SIGRUNNING);
-
- Alas, you would be wrong. If a sigprocmask has been intercepted
- and it unblocks this signal, then vg_sigpending[sigNo] will
- either be VG_SIGIDLE, or (worse) another instance of it will
- already have arrived, so that the stored value is that of the
- handler.
-
- Note that these anomalies can only occur when a signal handler
- unblocks its own signal inside itself AND THEN RETURNS anyway
- (which seems a bizarre thing to do).
-
- Ho Hum. This seems like a race condition which surely isn't
- handled correctly. */
-
vg_assert(sigNo >= 1 && sigNo <= VKI_KNSIG);
- vg_sigpending[sigNo] = VG_SP_SIGIDLE;
/* Unlock and return. */
- VG_(restore_host_signals)( &saved_procmask );
+ VG_(restore_all_host_signals)( &saved_procmask );
/* Scheduler now can resume this thread, or perhaps some other.
Tell the scheduler whether or not any syscall interrupted by
this signal should be restarted, if possible, or no. */
- return vg_sig_sarestart[sigNo];
+ return
+ (vg_scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
+ ? True
+ : False;
}
{
vki_ksigset_t saved_procmask;
Int sigNo;
- Bool found;
+ Bool found, scss_changed;
ThreadState* tst;
ThreadId tid;
- /* A cheap check. We don't need to have exclusive access
- to the queue, because in the worst case, vg_oursignalhandler
- will add signals, causing us to return, thinking there
- are no signals to deliver, when in fact there are some.
- A subsequent call here will handle the signal(s) we missed.
- */
+ /* A cheap check. We don't need to have exclusive access to the
+ pending array, because in the worst case, vg_oursignalhandler
+ will add signals, causing us to return, thinking there are no
+ signals to deliver, when in fact there are some. A subsequent
+ call here will handle the signal(s) we missed. */
found = False;
for (sigNo = 1; sigNo <= VKI_KNSIG; sigNo++)
- if (vg_sigpending[sigNo] != VG_SP_SIGIDLE
- && vg_sigpending[sigNo] != VG_SP_SIGRUNNING)
+ if (vg_dcss.dcss_sigpending[sigNo])
found = True;
if (!found) return False;
blocking all the host's signals. That means vg_oursignalhandler
can't run whilst we are messing with stuff.
*/
+ scss_changed = False;
VG_(block_all_host_signals)( &saved_procmask );
/* Look for signals to deliver ... */
for (sigNo = 1; sigNo <= VKI_KNSIG; sigNo++) {
- if (vg_sigpending[sigNo] == VG_SP_SIGIDLE
- || vg_sigpending[sigNo] == VG_SP_SIGRUNNING) continue;
+
+ if (!vg_dcss.dcss_sigpending[sigNo])
+ continue;
+
/* sigNo is pending. Try to find a suitable thread to deliver
it to. */
-
/* First off, are any threads in sigwait() for the signal?
If so just give to one of them and have done. */
for (tid = 1; tid < VG_N_THREADS; tid++) {
- tst = VG_(get_thread_state_UNCHECKED)(tid);
+ tst = & VG_(threads)[tid];
if (tst->status != VgTs_WaitSIG)
continue;
if (VG_(ksigismember)(&(tst->sigs_waited_for), sigNo))
}
if (tid < VG_N_THREADS) {
UInt* sigwait_args;
- tst = VG_(get_thread_state)(tid);
+ tst = & VG_(threads)[tid];
if (VG_(clo_trace_signals) || VG_(clo_trace_sched))
VG_(message)(Vg_DebugMsg,
"releasing thread %d from sigwait() due to signal %d",
if (NULL != (UInt*)(sigwait_args[2])) {
*(Int*)(sigwait_args[2]) = sigNo;
if (VG_(clo_instrument))
- VGM_(make_readable)( (Addr)(sigwait_args[2]), sizeof(UInt));
+ VGM_(make_readable)( (Addr)(sigwait_args[2]),
+ sizeof(UInt));
}
- tst->m_edx = 0;
- tst->sh_edx = VGM_WORD_VALID;
+ SET_EDX(tid, 0);
tst->status = VgTs_Runnable;
VG_(ksigemptyset)(&tst->sigs_waited_for);
- VG_(update_sigstate_following_WaitSIG_change)();
- vg_sigpending[sigNo] = VG_SP_SIGIDLE;
+ scss_changed = True;
+ vg_dcss.dcss_sigpending[sigNo] = False;
+ vg_dcss.dcss_destthread[sigNo] = VG_INVALID_THREADID;
+ /*paranoia*/
continue; /* for (sigNo = 1; ...) loop */
}
/* Well, nobody appears to be sigwaiting for it. So we really
- are delivering the signal in the usual way, and so the
- handler better be valid. */
- vg_assert(vg_sigpending[sigNo] != VG_SP_SIGIDLE);
- vg_assert(vg_sigpending[sigNo] != VG_SH_FAKEHANDLER);
- vg_assert(vg_sigpending[sigNo] != VG_SP_SIGRUNNING);
+ are delivering the signal in the usual way. And that the
+ client really has a handler for this thread! */
+ vg_assert(vg_dcss.dcss_sigpending[sigNo]);
+ vg_assert(vg_scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN
+ && vg_scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
- tid = vg_sig_threadid[sigNo];
+ tid = vg_dcss.dcss_destthread[sigNo];
vg_assert(tid == VG_INVALID_THREADID
|| VG_(is_valid_tid)(tid));
if (tid != VG_INVALID_THREADID) {
/* directed to a specific thread; ensure it actually still
exists ... */
- tst = VG_(get_thread_state_UNCHECKED)(tid);
+ tst = & VG_(threads)[tid];
if (tst->status == VgTs_Empty) {
/* dead, for whatever reason; ignore this signal */
if (VG_(clo_trace_signals))
VG_(message)(Vg_DebugMsg,
"discarding signal %d for nonexistent thread %d",
sigNo, tid );
- vg_sigpending[sigNo] = VG_SP_SIGIDLE;
+ vg_dcss.dcss_sigpending[sigNo] = False;
+ vg_dcss.dcss_destthread[sigNo] = VG_INVALID_THREADID;
continue; /* for (sigNo = 1; ...) loop */
}
} else {
/* not directed to a specific thread, so search for a
suitable candidate */
for (tid = 1; tid < VG_N_THREADS; tid++) {
- tst = VG_(get_thread_state_UNCHECKED)(tid);
+ tst = & VG_(threads)[tid];
if (tst->status != VgTs_Empty
&& !VG_(ksigismember)(&(tst->sig_mask), sigNo))
break;
signal handler with the frame on top of the client's stack,
as it expects. */
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(VG_(get_thread_state)(tid)->status != VgTs_Empty);
vg_push_signal_frame ( tid, sigNo );
- VG_(get_thread_state)(tid)->status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
/* Signify that the signal has been delivered. */
- vg_sigpending[sigNo] = VG_SP_SIGRUNNING;
+ vg_dcss.dcss_sigpending[sigNo] = False;
+ vg_dcss.dcss_destthread[sigNo] = VG_INVALID_THREADID;
+
+ if (vg_scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONESHOT) {
+ /* Do the ONESHOT thing. */
+ vg_scss.scss_per_sig[sigNo].scss_handler = VKI_SIG_DFL;
+ scss_changed = True;
+ }
}
/* Unlock and return. */
- VG_(restore_host_signals)( &saved_procmask );
- return True;
-}
-
-
-/* A thread is about to exit. Forget about any signals which are
- still pending for it. */
-void VG_(notify_signal_machinery_of_thread_exit) ( ThreadId tid )
-{
- Int sigNo;
- for (sigNo = 1; sigNo <= VKI_KNSIG; sigNo++) {
- if (vg_sigpending[sigNo] == VG_SP_SIGIDLE
- || vg_sigpending[sigNo] == VG_SP_SIGRUNNING)
- continue;
- if (vg_sig_threadid[sigNo] == tid) {
- /* sigNo is pending for tid, which is just about to disappear.
- So forget about the pending signal. */
- vg_sig_threadid[sigNo] = VG_INVALID_THREADID;
- vg_sigpending[sigNo] = VG_SP_SIGIDLE;
- if (VG_(clo_trace_signals))
- VG_(message)(Vg_DebugMsg,
- "discarding pending signal %d due to thread %d exiting",
- sigNo, tid );
- }
+ if (scss_changed) {
+ /* handle_SCSS_change computes a new kernel blocking mask and
+ applies that. */
+ VG_(handle_SCSS_change)( False /* lazy update */ );
+ } else {
+ /* No SCSS change, so just restore the existing blocking
+ mask. */
+ VG_(restore_all_host_signals)( &saved_procmask );
}
+
+ return True;
}
to have mutual exclusion when adding stuff to the queue. */
static
-void VG_(oursignalhandler) ( Int sigNo )
+void vg_oursignalhandler ( Int sigNo )
{
+ ThreadId tid;
Int dummy_local;
+ Bool sane;
vki_ksigset_t saved_procmask;
/*
VG_(block_all_host_signals)( &saved_procmask );
- if (vg_sighandler[sigNo] == VG_SH_NOHANDLER) {
+ /* This is a sanity check. Either a signal has arrived because the
+ client set a handler for it, or because some thread sigwaited on
+ it. Establish that at least one of these is the case. */
+ sane = False;
+ if (vg_scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL
+ && vg_scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN) {
+ sane = True;
+ } else {
+ for (tid = 1; tid < VG_N_THREADS; tid++) {
+ if (VG_(threads)[tid].status != VgTs_WaitSIG)
+ continue;
+ if (VG_(ksigismember)(&VG_(threads)[tid].sigs_waited_for, sigNo))
+ sane = True;
+ }
+ }
+ if (!sane) {
if (VG_(clo_trace_signals)) {
VG_(add_to_msg)("unexpected!");
VG_(end_msg)();
that matters. */
VG_(panic)("vg_oursignalhandler: unexpected signal");
}
+ /* End of the sanity check. */
/* Decide what to do with it. */
- if (vg_sigpending[sigNo] == VG_SP_SIGRUNNING) {
- /* Already running; ignore it. */
- if (VG_(clo_trace_signals)) {
- VG_(add_to_msg)("already running; discarded" );
- VG_(end_msg)();
- }
- }
- else
- if (vg_sigpending[sigNo] != VG_SP_SIGRUNNING
- && vg_sigpending[sigNo] != VG_SP_SIGIDLE) {
- /* Not running and not idle == pending; ignore it. */
+ if (vg_dcss.dcss_sigpending[sigNo]) {
+ /* pending; ignore it. */
if (VG_(clo_trace_signals)) {
VG_(add_to_msg)("already pending; discarded" );
VG_(end_msg)();
}
- }
- else {
+ } else {
/* Ok, we'd better deliver it to the client. */
- vg_assert(vg_sigpending[sigNo] == VG_SP_SIGIDLE);
/* Queue it up for delivery at some point in the future. */
- vg_assert(vg_sighandler[sigNo] != VG_SH_NOHANDLER);
- vg_sigpending[sigNo] = vg_sighandler[sigNo];
- vg_sig_threadid[sigNo] = VG_INVALID_THREADID;
+ vg_dcss.dcss_sigpending[sigNo] = True;
+ vg_dcss.dcss_destthread[sigNo] = VG_INVALID_THREADID;
if (VG_(clo_trace_signals)) {
VG_(add_to_msg)("queued" );
VG_(end_msg)();
/* We've finished messing with the queue, so re-enable host
signals. */
- VG_(restore_host_signals)( &saved_procmask );
+ VG_(restore_all_host_signals)( &saved_procmask );
- if ((sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS
- || sigNo == VKI_SIGFPE || sigNo == VKI_SIGILL)) {
+ if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS
+ || sigNo == VKI_SIGFPE || sigNo == VKI_SIGILL) {
/* Can't continue; must longjmp back to the scheduler and thus
enter the sighandler immediately. */
VG_(longjmpd_on_signal) = sigNo;
}
-/* Copy the process' real signal state to the sim state. Whilst
- doing this, block all real signals.
+/* At startup, copy the process' real signal state to the SCSS.
+ Whilst doing this, block all real signals. Then calculate SKSS and
+ set the kernel to that. Also initialise DCSS.
*/
void VG_(sigstartup_actions) ( void )
{
saved_procmask remembers the previous mask. */
VG_(block_all_host_signals)( &saved_procmask );
+ /* Copy per-signal settings to SCSS. */
+ for (i = 1; i <= VKI_KNSIG; i++) {
+
+ /* Get the old host action */
+ ret = VG_(ksigaction)(i, NULL, &sa);
+ vg_assert(ret == 0);
+
+ if (VG_(clo_trace_signals))
+ VG_(printf)("snaffling handler 0x%x for signal %d\n",
+ (Addr)(sa.ksa_handler), i );
+
+ vg_scss.scss_per_sig[i].scss_handler = sa.ksa_handler;
+ vg_scss.scss_per_sig[i].scss_flags = sa.ksa_flags;
+ vg_scss.scss_per_sig[i].scss_mask = sa.ksa_mask;
+ vg_scss.scss_per_sig[i].scss_restorer = sa.ksa_restorer;
+ }
+
+ /* Copy the process' signal mask into the root thread. */
+ vg_assert(VG_(threads)[1].status == VgTs_Runnable);
+ VG_(threads)[1].sig_mask = saved_procmask;
+
+ /* Initialise DCSS. */
+ for (i = 1; i <= VKI_KNSIG; i++) {
+ vg_dcss.dcss_sigpending[i] = False;
+ vg_dcss.dcss_destthread[i] = VG_INVALID_THREADID;
+ }
+
/* Register an alternative stack for our own signal handler to run
on. */
altstack_info.ss_sp = &(VG_(sigstack)[0]);
"vg_sigstartup_actions: sigstack installed ok");
}
- /* Set initial state for the signal simulation. */
- for (i = 1; i <= VKI_KNSIG; i++) {
- vg_sighandler[i] = VG_SH_NOHANDLER;
- vg_sigpending[i] = VG_SP_SIGIDLE;
- vg_sig_sarestart[i] = True; /* An easy default */
- vg_sig_threadid[i] = VG_INVALID_THREADID;
- }
-
- for (i = 1; i <= VKI_KNSIG; i++) {
-
- /* Get the old host action */
- ret = VG_(ksigaction)(i, NULL, &sa);
- vg_assert(ret == 0);
-
- /* If there's already a handler set, record it, then route the
- signal through to our handler. */
- if (sa.ksa_handler != VKI_SIG_IGN
- && sa.ksa_handler != VKI_SIG_DFL) {
- if (VG_(clo_trace_signals))
- VG_(printf)("snaffling handler 0x%x for signal %d\n",
- (Addr)(sa.ksa_handler), i );
- if ((sa.ksa_flags & VKI_SA_ONSTACK) != 0)
- VG_(unimplemented)
- ("signals on an alternative stack (SA_ONSTACK)");
-
- vg_sighandler[i] = sa.ksa_handler;
- sa.ksa_handler = &VG_(oursignalhandler);
- /* Save the restart status, then set it to restartable. */
- vg_sig_sarestart[i]
- = (sa.ksa_flags & VKI_SA_RESTART) ? True : False;
- sa.ksa_flags |= VKI_SA_RESTART;
-
- ret = VG_(ksigaction)(i, &sa, NULL);
- vg_assert(ret == 0);
- }
- }
-
/* DEBUGGING HACK */
/* VG_(ksignal)(VKI_SIGUSR1, &VG_(oursignalhandler)); */
- /* Finally, restore the blocking mask. */
- VG_(restore_host_signals)( &saved_procmask );
+ /* Calculate SKSS and apply it. This also sets the initial kernel
+ mask we need to run with. */
+ VG_(handle_SCSS_change)( True /* forced update */ );
}
VG_(block_all_host_signals)( &saved_procmask );
- /* copy the sim signal actions to the real ones. */
- /* Hmm, this isn't accurate. Doesn't properly restore the
- SA_RESTART flag nor SA_ONSTACK. */
+ /* Copy per-signal settings from SCSS. */
for (i = 1; i <= VKI_KNSIG; i++) {
- if (i == VKI_SIGKILL || i == VKI_SIGSTOP) continue;
- if (vg_sighandler[i] == VG_SH_NOHANDLER
- || vg_sighandler[i] == VG_SH_FAKEHANDLER) continue;
- ret = VG_(ksigaction)(i, NULL, &sa);
- vg_assert(ret == 0);
- sa.ksa_handler = vg_sighandler[i];
- ret = VG_(ksigaction)(i, &sa, NULL);
- }
-
- VG_(restore_host_signals)( &saved_procmask );
-}
-
-
-void VG_(update_sigstate_following_WaitSIG_change) ( void )
-{
- ThreadId tid;
- Int sig;
- vki_ksigset_t global_waitsigs;
- ThreadState* tst;
- VG_(ksigemptyset)( &global_waitsigs );
+ sa.ksa_handler = vg_scss.scss_per_sig[i].scss_handler;
+ sa.ksa_flags = vg_scss.scss_per_sig[i].scss_flags;
+ sa.ksa_mask = vg_scss.scss_per_sig[i].scss_mask;
+ sa.ksa_restorer = vg_scss.scss_per_sig[i].scss_restorer;
- /* Calculate the new set of signals which are being sigwait()d for
- by at least one thread. */
- for (tid = 1; tid < VG_N_THREADS; tid++) {
- tst = VG_(get_thread_state_UNCHECKED)(tid);
- if (tst->status != VgTs_WaitSIG)
- continue;
- vg_assert(! VG_(kisemptysigset)(
- & tst->sigs_waited_for ));
- VG_(ksigaddset_from_set)( & global_waitsigs,
- & tst->sigs_waited_for );
- }
-
- /* Now adjust vg_sighandler accordingly.
-
- For each signal s: (lapses into pseudo-Haskell ...)
-
- if s `elem` global_waitsigs[s]
- -- at least one thread is sigwait()ing for s. That means that at
- least _some_ kind of handler is needed.
- case vg_sighandler[s] of
- VG_SH_NOHANDLER -> install our own handler and set waitsigs[s]
- to VG_SH_FAKEHANDLER
- VG_SH_FAKEHANDLER -> there's already a handler. Do nothing.
- real_handler -> the client had a handler here anyway, so
- just leave it alone, ie, do nothing.
-
- if s `notElem` global_waitsigs[s]
- -- we're not sigwait()ing for s (any longer).
- case vg_sighandler[s] of
- VG_SH_FAKEHANDLER -> there is a handler installed, but ONLY for
- the purposes of handling sigwait(). So set it back to
- VG_SH_NOHANDLER and tell the kernel that we want to do the
- default action for s from now on, ie, we wish to deregister
- OUR handle.
- VG_SH_NOHANDLER -> there was no handler anyway. Do nothing.
- real_handler -> the client had a handler here anyway, so
- just leave it alone, ie, do nothing.
-
- */
-
- for (sig = 1; sig <= VKI_KNSIG; sig++) {
- if (VG_(ksigismember)( & global_waitsigs, sig )) {
- if (vg_sighandler[sig] == VG_SH_NOHANDLER
- /* && existing kernel handler is SIG_DFL */) {
- /* add handler */
- /* We really only ought to do this if the existing kernel
- handler is SIG_DFL. That's because when queried by the
- client's sigaction, that's what we claim it is if a fake
- handler has been installed. Or (perhaps better)
- remember the kernel's setting.
- */
- VG_(ksignal)( sig, &VG_(oursignalhandler) );
- vg_sighandler[sig] = VG_SH_FAKEHANDLER;
- if (VG_(clo_trace_signals)) {
- VG_(message)(Vg_DebugMsg,
- "adding fake handler for signal %d "
- "following WaitSIG change", sig );
- }
- }
- } else {
- if (vg_sighandler[sig] == VG_SH_FAKEHANDLER) {
- /* remove handler */
- VG_(ksignal)( sig, VKI_SIG_DFL);
- vg_sighandler[sig] = VG_SH_NOHANDLER;
- if (VG_(clo_trace_signals)) {
- VG_(message)(Vg_DebugMsg,
- "removing fake handler for signal %d "
- "following WaitSIG change", sig );
- }
- }
- }
- }
-}
-
-/* ---------------------------------------------------------------------
- Handle signal-related syscalls from the simulatee.
- ------------------------------------------------------------------ */
-
-/* Do more error checking? */
-void VG_(do__NR_sigaction) ( ThreadId tid )
-{
- UInt res;
- void* our_old_handler;
- vki_ksigaction* new_action;
- vki_ksigaction* old_action;
- ThreadState* tst = VG_(get_thread_state)( tid );
- UInt param1 = tst->m_ebx; /* int sigNo */
- UInt param2 = tst->m_ecx; /* k_sigaction* new_action */
- UInt param3 = tst->m_edx; /* k_sigaction* old_action */
- new_action = (vki_ksigaction*)param2;
- old_action = (vki_ksigaction*)param3;
-
- if (VG_(clo_trace_signals))
- VG_(message)(Vg_DebugExtraMsg,
- "__NR_sigaction: sigNo %d, "
- "new 0x%x, old 0x%x, new flags 0x%x",
- param1,(UInt)new_action,(UInt)old_action,
- (UInt)(new_action ? new_action->ksa_flags : 0) );
- /* VG_(ppSigProcMask)(); */
-
- /* Rule out various error conditions. The aim is to ensure that if
- the call is passed to the kernel it will definitely succeed. */
-
- /* Reject out-of-range signal numbers. */
- if (param1 < 1 || param1 > VKI_KNSIG) goto bad_signo;
-
- /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
- if ( (param1 == VKI_SIGKILL || param1 == VKI_SIGSTOP)
- && new_action
- && new_action->ksa_handler != VKI_SIG_DFL)
- goto bad_sigkill_or_sigstop;
-
- our_old_handler = vg_sighandler[param1];
- /* VG_(printf)("old handler = 0x%x\n", our_old_handler); */
- /* If a new handler has been specified, mess with its handler. */
- if (new_action) {
- if (new_action->ksa_handler == VKI_SIG_IGN ||
- new_action->ksa_handler == VKI_SIG_DFL) {
- vg_sighandler[param1] = VG_SH_NOHANDLER;
- vg_sigpending[param1] = VG_SP_SIGIDLE;
- /* Dangerous! Could lose signals like this. */
- } else {
- /* VG_(printf)("new handler = 0x%x\n", new_action->ksa_handler); */
- /* The client isn't allowed to use an alternative signal
- stack. We, however, must. */
- if ((new_action->ksa_flags & VKI_SA_ONSTACK) != 0)
- VG_(unimplemented)
- ("signals on an alternative stack (SA_ONSTACK)");
- new_action->ksa_flags |= VKI_SA_ONSTACK;
- vg_sighandler[param1] = new_action->ksa_handler;
- vg_sig_sarestart[param1]
- = (new_action->ksa_flags & VKI_SA_RESTART) ? True : False;
- new_action->ksa_flags |= VKI_SA_RESTART;
- new_action->ksa_handler = &VG_(oursignalhandler);
- }
- }
-
- KERNEL_DO_SYSCALL(tid,res);
- /* VG_(printf)("RES = %d\n", res); */
-
- /* If the client asks for the old handler, maintain our fiction
- by stuffing in the handler it thought it asked for ... */
- if (old_action) {
- if (old_action->ksa_handler == VKI_SIG_IGN ||
- old_action->ksa_handler == VKI_SIG_DFL) {
- /* No old action; we should have a NULL handler. */
- vg_assert(our_old_handler == VG_SH_NOHANDLER);
- } else {
- /* There's a handler. */
- if (param1 != VKI_SIGKILL && param1 != VKI_SIGSTOP) {
- vg_assert(old_action->ksa_handler == &VG_(oursignalhandler));
- vg_assert((old_action->ksa_flags & VKI_SA_ONSTACK) != 0);
- }
- /* Is the handler a fake one which the client doesn't know
- about? */
- if (vg_sighandler[param1] == VG_SH_FAKEHANDLER) {
- /* Yes. Pretend it was in a SIG_DFL state before. */
- old_action->ksa_handler = VKI_SIG_DFL;
- } else {
- old_action->ksa_handler = our_old_handler;
- }
- /* Since the client is not allowed to ask for an alternative
- sig stack, unset the bit for anything we pass back to
- it. */
- old_action->ksa_flags &= ~VKI_SA_ONSTACK;
- /* Restore the SA_RESTART flag to whatever we snaffled. */
- if (vg_sig_sarestart[param1])
- old_action->ksa_flags |= VKI_SA_RESTART;
- else
- old_action->ksa_flags &= ~VKI_SA_RESTART;
- }
- }
- goto good;
-
- good:
- tst->m_eax = (UInt)0;
- return;
-
- bad_signo:
- VG_(message)(Vg_UserMsg,
- "Warning: bad signal number %d in __NR_sigaction.",
- param1);
- VG_(baseBlock)[VGOFF_(m_eax)] = (UInt)(-VKI_EINVAL);
- return;
-
- bad_sigkill_or_sigstop:
- VG_(message)(Vg_UserMsg,
- "Warning: attempt to set %s handler in __NR_sigaction.",
- param1 == VKI_SIGKILL ? "SIGKILL" : "SIGSTOP" );
-
- VG_(baseBlock)[VGOFF_(m_eax)] = (UInt)(-VKI_EINVAL);
- return;
-}
-
-
-/* The kernel handles sigprocmask in the usual way, but we also need
- to inspect it, so as to spot requests to unblock signals. We then
- inspect vg_sigpending, which records the current state of signal
- delivery to the client. The problematic case is when a signal is
- delivered to the client, in which case the relevant vg_sigpending
- slot is set to VG_SIGRUNNING. This inhibits further signal
- deliveries. This mechanism implements the POSIX requirement that a
- signal is blocked in its own handler.
-
- If the handler returns normally, the slot is changed back to
- VG_SIGIDLE, so that further instances of the signal can be
- delivered. The problem occurs when the handler never returns, but
- longjmps. POSIX mandates that you then have to do an explicit
- setprocmask to re-enable the signal. That is what we try and spot
- here. Although the call is passed to the kernel, we also need to
- spot unblocked signals whose state is VG_SIGRUNNING, and change it
- back to VG_SIGIDLE.
-*/
-void VG_(do__NR_sigprocmask) ( Int how, vki_ksigset_t* set )
-{
- Int i;
- if (VG_(clo_trace_signals))
- VG_(message)(Vg_DebugMsg,
- "vg_do__NR_sigprocmask: how = %d (%s), set = %p",
- how,
- how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
- how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
- how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
- set
- );
-
- /* Sometimes this happens. I don't know what it signifies. */
- if (set == NULL)
- return;
+ if (VG_(clo_trace_signals))
+ VG_(printf)("restoring handler 0x%x for signal %d\n",
+ (Addr)(sa.ksa_handler), i );
- /* Not interested in blocking of signals. */
- if (how == VKI_SIG_BLOCK)
- return;
+ /* Get the old host action */
+ ret = VG_(ksigaction)(i, &sa, NULL);
+ vg_assert(ret == 0);
- /* Detect and ignore unknown action. */
- if (how != VKI_SIG_UNBLOCK && how != VKI_SIG_SETMASK) {
- VG_(message)(Vg_DebugMsg,
- "sigprocmask: unknown `how' field %d", how);
- return;
}
- for (i = 1; i <= VKI_KNSIG; i++) {
- Bool unblock_me = False;
- if (how == VKI_SIG_SETMASK) {
- if (!VG_(ksigismember)(set,i))
- unblock_me = True;
- } else { /* how == SIG_UNBLOCK */
- if (VG_(ksigismember)(set,i))
- unblock_me = True;
- }
- if (unblock_me && vg_sigpending[i] == VG_SP_SIGRUNNING) {
- vg_sigpending[i] = VG_SP_SIGIDLE;
- if (VG_(clo_verbosity) > 1)
- VG_(message)(Vg_UserMsg,
- "Warning: unblocking signal %d "
- "due to sigprocmask", i );
- }
- }
+ /* A bit of a kludge -- set the sigmask to that of the root
+ thread. */
+ vg_assert(VG_(threads)[1].status != VgTs_Empty);
+ VG_(restore_all_host_signals)( &VG_(threads)[1].sig_mask );
}
-
/*--------------------------------------------------------------------*/
/*--- end vg_signals.c ---*/
/*--------------------------------------------------------------------*/
#define VG_USERREQ__READ_MILLISECOND_TIMER 0x3011
#define VG_USERREQ__PTHREAD_SIGMASK 0x3012
#define VG_USERREQ__SIGWAIT 0x3013
+#define VG_USERREQ__PTHREAD_KILL 0x3014
/* Cosmetic ... */
#define VG_USERREQ__GET_PTHREAD_TRACE_LEVEL 0x3101
ThreadState;
-/* Trivial range check on tid. */
+/* The thread table. */
+extern ThreadState VG_(threads)[VG_N_THREADS];
+
+/* Check that tid is in range and denotes a non-Empty thread. */
extern Bool VG_(is_valid_tid) ( ThreadId tid );
+/* Check that tid is in range. */
+extern Bool VG_(is_valid_or_empty_tid) ( ThreadId tid );
+
/* Copy the specified thread's state into VG_(baseBlock) in
preparation for running it. */
extern void VG_(load_thread_state)( ThreadId );
VG_(baseBlock) with junk, for sanity-check reasons. */
extern void VG_(save_thread_state)( ThreadId );
-/* Get the thread state block for the specified thread. */
-extern ThreadState* VG_(get_thread_state)( ThreadId );
-extern ThreadState* VG_(get_thread_state_UNCHECKED)( ThreadId );
-
/* And for the currently running one, if valid. */
extern ThreadState* VG_(get_current_thread_state) ( void );
(VG_AR_CLIENT_STACKBASE_REDZONE_SZW * VKI_BYTES_PER_WORD)
+/* Write a value to the client's %EDX (request return value register)
+ and set the shadow to indicate it is defined. */
+#define SET_EDX(zztid, zzval) \
+ do { VG_(threads)[zztid].m_edx = (zzval); \
+ VG_(threads)[zztid].sh_edx = VGM_WORD_VALID; \
+ } while (0)
+
+#define SET_EAX(zztid, zzval) \
+ do { VG_(threads)[zztid].m_eax = (zzval); \
+ VG_(threads)[zztid].sh_eax = VGM_WORD_VALID; \
+ } while (0)
+
/* ---------------------------------------------------------------------
Exports of vg_signals.c
extern Bool VG_(deliver_signals) ( void );
extern void VG_(unblock_host_signal) ( Int sigNo );
-extern void VG_(notify_signal_machinery_of_thread_exit) ( ThreadId tid );
-extern void VG_(update_sigstate_following_WaitSIG_change) ( void );
+extern void VG_(handle_SCSS_change) ( Bool force_update );
+
/* Fake system calls for signal handling. */
extern void VG_(do__NR_sigaction) ( ThreadId tid );
-extern void VG_(do__NR_sigprocmask) ( Int how, vki_ksigset_t* set );
+extern void VG_(do__NR_sigprocmask) ( ThreadId tid,
+ Int how,
+ vki_ksigset_t* set,
+ vki_ksigset_t* oldset );
+extern void VG_(do_pthread_sigmask_SCSS_upd) ( ThreadId tid,
+ Int how,
+ vki_ksigset_t* set,
+ vki_ksigset_t* oldset );
+extern void VG_(send_signal_to_thread) ( ThreadId thread,
+ Int signo );
/* Modify the current thread's state once we have detected it is
returning from a signal handler. */
/* Handy utilities to block/restore all host signals. */
extern void VG_(block_all_host_signals)
( /* OUT */ vki_ksigset_t* saved_mask );
-extern void VG_(restore_host_signals)
+extern void VG_(restore_all_host_signals)
( /* IN */ vki_ksigset_t* saved_mask );
/* ---------------------------------------------------------------------
definitions, which are different in places from those that glibc
defines. Since we're operating right at the kernel interface,
glibc's view of the world is entirely irrelevant. */
+
+/* --- Signal set ops --- */
extern Int VG_(ksigfillset)( vki_ksigset_t* set );
extern Int VG_(ksigemptyset)( vki_ksigset_t* set );
+
+extern Bool VG_(kisfullsigset)( vki_ksigset_t* set );
extern Bool VG_(kisemptysigset)( vki_ksigset_t* set );
+
extern Int VG_(ksigaddset)( vki_ksigset_t* set, Int signum );
+extern Int VG_(ksigdelset)( vki_ksigset_t* set, Int signum );
+extern Int VG_(ksigismember) ( vki_ksigset_t* set, Int signum );
-extern Int VG_(ksigprocmask)( Int how, const vki_ksigset_t* set,
- vki_ksigset_t* oldset );
-extern Int VG_(ksigaction) ( Int signum,
- const vki_ksigaction* act,
- vki_ksigaction* oldact );
-extern Int VG_(ksigismember) ( vki_ksigset_t* set, Int signum );
extern void VG_(ksigaddset_from_set)( vki_ksigset_t* dst,
vki_ksigset_t* src );
extern void VG_(ksigdelset_from_set)( vki_ksigset_t* dst,
vki_ksigset_t* src );
+/* --- Mess with the kernel's sig state --- */
+extern Int VG_(ksigprocmask)( Int how, const vki_ksigset_t* set,
+ vki_ksigset_t* oldset );
+extern Int VG_(ksigaction) ( Int signum,
+ const vki_ksigaction* act,
+ vki_ksigaction* oldact );
extern Int VG_(ksignal)(Int signum, void (*sighandler)(Int));
extern Int VG_(ksigaltstack)( const vki_kstack_t* ss, vki_kstack_t* oss );
+extern Int VG_(kill)( Int pid, Int signo );
/* ---------------------------------------------------------------------
extern Bool VG_(is_kerror) ( Int res );
-#define KERNEL_DO_SYSCALL(thread_id, result_lvalue) \
- VG_(load_thread_state)(thread_id); \
- VG_(copy_baseBlock_to_m_state_static)(); \
- VG_(do_syscall)(); \
- VG_(copy_m_state_static_to_baseBlock)(); \
- VG_(save_thread_state)(thread_id); \
- result_lvalue = VG_(get_thread_state)(thread_id)->m_eax;
+#define KERNEL_DO_SYSCALL(thread_id, result_lvalue) \
+ VG_(load_thread_state)(thread_id); \
+ VG_(copy_baseBlock_to_m_state_static)(); \
+ VG_(do_syscall)(); \
+ VG_(copy_m_state_static_to_baseBlock)(); \
+ VG_(save_thread_state)(thread_id); \
+ VG_(threads)[thread_id].sh_eax = VGM_WORD_VALID; \
+ result_lvalue = VG_(threads)[thread_id].m_eax;
/* ---------------------------------------------------------------------
#define VKI_SA_ONSTACK 0x08000000
#define VKI_SA_RESTART 0x10000000
-#if 0
#define VKI_SA_NOCLDSTOP 0x00000001
+#define VKI_SA_RESETHAND 0x80000000
+#define VKI_SA_ONESHOT VKI_SA_RESETHAND
+#define VKI_SA_NODEFER 0x40000000
+#define VKI_SA_NOMASK VKI_SA_NODEFER
+#if 0
#define VKI_SA_NOCLDWAIT 0x00000002 /* not supported yet */
#define VKI_SA_SIGINFO 0x00000004
-#define VKI_SA_NODEFER 0x40000000
-#define VKI_SA_RESETHAND 0x80000000
-#define VKI_SA_NOMASK SA_NODEFER
-#define VKI_SA_ONESHOT SA_RESETHAND
#define VKI_SA_INTERRUPT 0x20000000 /* dummy -- ignored */
#define VKI_SA_RESTORER 0x04000000
#endif
#define VKI_EINTR 4 /* Interrupted system call */
#define VKI_EINVAL 22 /* Invalid argument */
#define VKI_ENOMEM 12 /* Out of memory */
+#define VKI_EFAULT 14 /* Bad address */
+#define VKI_ESRCH 3 /* No such process */
#define VKI_EWOULDBLOCK VKI_EAGAIN /* Operation would block */
#define VKI_EAGAIN 11 /* Try again */
unmodified. Haaaack!
Also mash the how value so that the SIG_ constants from glibc
- do not have to be included into vg_scheduler.c. */
+ constants to VKI_ constants, so that the former do not have to
+ be included into vg_scheduler.c. */
ensure_valgrind("pthread_sigmask");
switch (how) {
- case SIG_SETMASK: how = 1; break;
- case SIG_BLOCK: how = 2; break;
- case SIG_UNBLOCK: how = 3; break;
+ case SIG_SETMASK: how = VKI_SIG_SETMASK; break;
+ case SIG_BLOCK: how = VKI_SIG_BLOCK; break;
+ case SIG_UNBLOCK: how = VKI_SIG_UNBLOCK; break;
default: return EINVAL;
}
}
+int pthread_kill(pthread_t thread, int signo)
+{
+ int res;
+ ensure_valgrind("pthread_kill");
+ VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
+ VG_USERREQ__PTHREAD_KILL,
+ thread, signo, 0, 0);
+ return res;
+}
+
+
/* ---------------------------------------------------
THREAD-SPECIFICs
------------------------------------------------ */
//void pthread_join ( void ) { unimp("pthread_join"); }
//void pthread_key_create ( void ) { unimp("pthread_key_create"); }
//void pthread_key_delete ( void ) { unimp("pthread_key_delete"); }
-void pthread_kill ( void ) { unimp("pthread_kill"); }
+//void pthread_kill ( void ) { unimp("pthread_kill"); }
//void pthread_mutex_destroy ( void ) { unimp("pthread_mutex_destroy"); }
//void pthread_mutex_init ( void ) { unimp("pthread_mutex_init"); }
//void pthread_mutex_lock ( void ) { unimp("pthread_mutex_lock"); }
/* Process Valgrind's command-line opts (from env var VG_OPTS). */
process_cmd_line_options();
- /* Initialise the signal handling subsystem. */
+ /* Initialise the scheduler, and copy the client's state from
+ baseBlock into VG_(threads)[1]. This has to come before signal
+ initialisations. */
+ VG_(scheduler_init)();
+
+ /* Initialise the signal handling subsystem, temporarily parking
+ the saved blocking-mask in saved_sigmask. */
VG_(sigstartup_actions)();
+ /* Perhaps we're profiling Valgrind? */
# ifdef VG_PROFILE
VGP_(init_profiling)();
# endif
VG_(bbs_to_go) = VG_(clo_stop_after);
+ /* Run! */
VGP_PUSHCC(VgpSched);
- VG_(scheduler_init)();
src = VG_(scheduler)();
VGP_POPCC;
case VgSrc_ExitSyscall: /* the normal way out */
vg_assert(VG_(last_run_tid) > 0
&& VG_(last_run_tid) < VG_N_THREADS);
- tst = VG_(get_thread_state)(VG_(last_run_tid));
+ tst = & VG_(threads)[VG_(last_run_tid)];
vg_assert(tst->status == VgTs_Runnable);
/* The thread's %EBX will hold the arg to exit(), so we just
do exit with that arg. */
return True;
}
+Bool VG_(kisfullsigset)( vki_ksigset_t* set )
+{
+ Int i;
+ vg_assert(set != NULL);
+ for (i = 0; i < VKI_KNSIG_WORDS; i++)
+ if (set->ws[i] != ~0x0) return False;
+ return True;
+}
+
+
Int VG_(ksigaddset)( vki_ksigset_t* set, Int signum )
{
if (set == NULL)
return 0;
}
+Int VG_(ksigdelset)( vki_ksigset_t* set, Int signum )
+{
+ if (set == NULL)
+ return -1;
+ if (signum < 1 && signum > VKI_KNSIG)
+ return -1;
+ signum--;
+ set->ws[signum / VKI_KNSIG_BPW] &= ~(1 << (signum % VKI_KNSIG_BPW));
+ return 0;
+}
+
Int VG_(ksigismember) ( vki_ksigset_t* set, Int signum )
{
if (set == NULL)
= vg_do_syscall4(__NR_rt_sigaction,
signum, (UInt)act, (UInt)oldact,
VKI_KNSIG_WORDS * VKI_BYTES_PER_WORD);
+ /* VG_(printf)("res = %d\n",res); */
return VG_(is_kerror)(res) ? -1 : 0;
}
}
+Int VG_(kill)( Int pid, Int signo )
+{
+ Int res = vg_do_syscall2(__NR_kill, pid, signo);
+ return VG_(is_kerror)(res) ? -1 : 0;
+}
+
+
/* ---------------------------------------------------------------------
mmap/munmap, exit, fcntl
------------------------------------------------------------------ */
void VG_(assert_fail) ( Char* expr, Char* file, Int line, Char* fn )
{
+ static Bool entered = False;
+ if (entered)
+ VG_(exit)(2);
+ entered = True;
VG_(printf)("\n%s: %s:%d (%s): Assertion `%s' failed.\n",
"valgrind", file, line, fn, expr );
VG_(pp_sched_status)();
/* struct ThreadState is defined in vg_include.h. */
-/* Private globals. A statically allocated array of threads. NOTE:
- [0] is never used, to simplify the simulation of initialisers for
+/* Globals. A statically allocated array of threads. NOTE: [0] is
+ never used, to simplify the simulation of initialisers for
LinuxThreads. */
-static ThreadState vg_threads[VG_N_THREADS];
+ThreadState VG_(threads)[VG_N_THREADS];
/* The tid of the thread currently in VG_(baseBlock). */
static Int vg_tid_currently_in_baseBlock = VG_INVALID_THREADID;
__inline__
Bool VG_(is_valid_tid) ( ThreadId tid )
+{
+ /* tid is unsigned, hence no < 0 test. */
+ if (tid == 0) return False;
+ if (tid >= VG_N_THREADS) return False;
+ if (VG_(threads)[tid].status == VgTs_Empty) return False;
+ return True;
+}
+
+
+__inline__
+Bool VG_(is_valid_or_empty_tid) ( ThreadId tid )
{
/* tid is unsigned, hence no < 0 test. */
if (tid == 0) return False;
if (vg_tid_currently_in_baseBlock != VG_INVALID_THREADID) {
tid = vg_tid_currently_in_baseBlock;
if (VG_(baseBlock)[VGOFF_(m_esp)] <= a
- && a <= vg_threads[tid].stack_highest_word)
+ && a <= VG_(threads)[tid].stack_highest_word)
return tid;
else
tid_to_skip = tid;
}
for (tid = 1; tid < VG_N_THREADS; tid++) {
- if (vg_threads[tid].status == VgTs_Empty) continue;
+ if (VG_(threads)[tid].status == VgTs_Empty) continue;
if (tid == tid_to_skip) continue;
- if (vg_threads[tid].m_esp <= a
- && a <= vg_threads[tid].stack_highest_word)
+ if (VG_(threads)[tid].m_esp <= a
+ && a <= VG_(threads)[tid].stack_highest_word)
return tid;
}
return VG_INVALID_THREADID;
Int i;
VG_(printf)("\nsched status:\n");
for (i = 1; i < VG_N_THREADS; i++) {
- if (vg_threads[i].status == VgTs_Empty) continue;
+ if (VG_(threads)[i].status == VgTs_Empty) continue;
VG_(printf)("\nThread %d: status = ", i);
- switch (vg_threads[i].status) {
+ switch (VG_(threads)[i].status) {
case VgTs_Runnable: VG_(printf)("Runnable"); break;
case VgTs_WaitFD: VG_(printf)("WaitFD"); break;
case VgTs_WaitJoiner: VG_(printf)("WaitJoiner(%d)",
- vg_threads[i].joiner); break;
+ VG_(threads)[i].joiner); break;
case VgTs_WaitJoinee: VG_(printf)("WaitJoinee"); break;
case VgTs_Sleeping: VG_(printf)("Sleeping"); break;
case VgTs_WaitMX: VG_(printf)("WaitMX"); break;
default: VG_(printf)("???"); break;
}
VG_(printf)(", associated_mx = %p, associated_cv = %p\n",
- vg_threads[i].associated_mx,
- vg_threads[i].associated_cv );
+ VG_(threads)[i].associated_mx,
+ VG_(threads)[i].associated_cv );
VG_(pp_ExeContext)(
- VG_(get_ExeContext)( False, vg_threads[i].m_eip,
- vg_threads[i].m_ebp ));
+ VG_(get_ExeContext)( False, VG_(threads)[i].m_eip,
+ VG_(threads)[i].m_ebp ));
}
VG_(printf)("\n");
}
Int orig_size, trans_size;
/* Ensure there is space to hold a translation. */
VG_(maybe_do_lru_pass)();
- VG_(translate)( &vg_threads[tid],
+ VG_(translate)( &VG_(threads)[tid],
orig_addr, &orig_size, &trans_addr, &trans_size );
/* Copy data at trans_addr into the translation cache.
Returned pointer is to the code, not to the 4-byte
{
Int i;
for (i = 1; i < VG_N_THREADS; i++) {
- if (vg_threads[i].status == VgTs_Empty)
+ if (VG_(threads)[i].status == VgTs_Empty)
return i;
}
VG_(printf)("vg_alloc_ThreadState: no free slots available\n");
}
-ThreadState* VG_(get_thread_state_UNCHECKED) ( ThreadId tid )
-{
- vg_assert(VG_(is_valid_tid)(tid));
- return & vg_threads[tid];
-}
-
-
-ThreadState* VG_(get_thread_state) ( ThreadId tid )
-{
- vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status != VgTs_Empty);
- return & vg_threads[tid];
-}
-
-
ThreadState* VG_(get_current_thread_state) ( void )
{
- vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
- return VG_(get_thread_state) ( vg_tid_currently_in_baseBlock );
+ vg_assert(VG_(is_valid_tid)(vg_tid_currently_in_baseBlock));
+ return & VG_(threads)[vg_tid_currently_in_baseBlock];
}
ThreadId VG_(get_current_tid) ( void )
{
- vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
+ vg_assert(VG_(is_valid_tid)(vg_tid_currently_in_baseBlock));
return vg_tid_currently_in_baseBlock;
}
Int i;
vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
- VG_(baseBlock)[VGOFF_(m_eax)] = vg_threads[tid].m_eax;
- VG_(baseBlock)[VGOFF_(m_ebx)] = vg_threads[tid].m_ebx;
- VG_(baseBlock)[VGOFF_(m_ecx)] = vg_threads[tid].m_ecx;
- VG_(baseBlock)[VGOFF_(m_edx)] = vg_threads[tid].m_edx;
- VG_(baseBlock)[VGOFF_(m_esi)] = vg_threads[tid].m_esi;
- VG_(baseBlock)[VGOFF_(m_edi)] = vg_threads[tid].m_edi;
- VG_(baseBlock)[VGOFF_(m_ebp)] = vg_threads[tid].m_ebp;
- VG_(baseBlock)[VGOFF_(m_esp)] = vg_threads[tid].m_esp;
- VG_(baseBlock)[VGOFF_(m_eflags)] = vg_threads[tid].m_eflags;
- VG_(baseBlock)[VGOFF_(m_eip)] = vg_threads[tid].m_eip;
+ VG_(baseBlock)[VGOFF_(m_eax)] = VG_(threads)[tid].m_eax;
+ VG_(baseBlock)[VGOFF_(m_ebx)] = VG_(threads)[tid].m_ebx;
+ VG_(baseBlock)[VGOFF_(m_ecx)] = VG_(threads)[tid].m_ecx;
+ VG_(baseBlock)[VGOFF_(m_edx)] = VG_(threads)[tid].m_edx;
+ VG_(baseBlock)[VGOFF_(m_esi)] = VG_(threads)[tid].m_esi;
+ VG_(baseBlock)[VGOFF_(m_edi)] = VG_(threads)[tid].m_edi;
+ VG_(baseBlock)[VGOFF_(m_ebp)] = VG_(threads)[tid].m_ebp;
+ VG_(baseBlock)[VGOFF_(m_esp)] = VG_(threads)[tid].m_esp;
+ VG_(baseBlock)[VGOFF_(m_eflags)] = VG_(threads)[tid].m_eflags;
+ VG_(baseBlock)[VGOFF_(m_eip)] = VG_(threads)[tid].m_eip;
for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
- VG_(baseBlock)[VGOFF_(m_fpustate) + i] = vg_threads[tid].m_fpu[i];
-
- VG_(baseBlock)[VGOFF_(sh_eax)] = vg_threads[tid].sh_eax;
- VG_(baseBlock)[VGOFF_(sh_ebx)] = vg_threads[tid].sh_ebx;
- VG_(baseBlock)[VGOFF_(sh_ecx)] = vg_threads[tid].sh_ecx;
- VG_(baseBlock)[VGOFF_(sh_edx)] = vg_threads[tid].sh_edx;
- VG_(baseBlock)[VGOFF_(sh_esi)] = vg_threads[tid].sh_esi;
- VG_(baseBlock)[VGOFF_(sh_edi)] = vg_threads[tid].sh_edi;
- VG_(baseBlock)[VGOFF_(sh_ebp)] = vg_threads[tid].sh_ebp;
- VG_(baseBlock)[VGOFF_(sh_esp)] = vg_threads[tid].sh_esp;
- VG_(baseBlock)[VGOFF_(sh_eflags)] = vg_threads[tid].sh_eflags;
+ VG_(baseBlock)[VGOFF_(m_fpustate) + i] = VG_(threads)[tid].m_fpu[i];
+
+ VG_(baseBlock)[VGOFF_(sh_eax)] = VG_(threads)[tid].sh_eax;
+ VG_(baseBlock)[VGOFF_(sh_ebx)] = VG_(threads)[tid].sh_ebx;
+ VG_(baseBlock)[VGOFF_(sh_ecx)] = VG_(threads)[tid].sh_ecx;
+ VG_(baseBlock)[VGOFF_(sh_edx)] = VG_(threads)[tid].sh_edx;
+ VG_(baseBlock)[VGOFF_(sh_esi)] = VG_(threads)[tid].sh_esi;
+ VG_(baseBlock)[VGOFF_(sh_edi)] = VG_(threads)[tid].sh_edi;
+ VG_(baseBlock)[VGOFF_(sh_ebp)] = VG_(threads)[tid].sh_ebp;
+ VG_(baseBlock)[VGOFF_(sh_esp)] = VG_(threads)[tid].sh_esp;
+ VG_(baseBlock)[VGOFF_(sh_eflags)] = VG_(threads)[tid].sh_eflags;
vg_tid_currently_in_baseBlock = tid;
}
vg_assert(vg_tid_currently_in_baseBlock != VG_INVALID_THREADID);
- vg_threads[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
- vg_threads[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
- vg_threads[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
- vg_threads[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
- vg_threads[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
- vg_threads[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
- vg_threads[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
- vg_threads[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
- vg_threads[tid].m_eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
- vg_threads[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
+ VG_(threads)[tid].m_eax = VG_(baseBlock)[VGOFF_(m_eax)];
+ VG_(threads)[tid].m_ebx = VG_(baseBlock)[VGOFF_(m_ebx)];
+ VG_(threads)[tid].m_ecx = VG_(baseBlock)[VGOFF_(m_ecx)];
+ VG_(threads)[tid].m_edx = VG_(baseBlock)[VGOFF_(m_edx)];
+ VG_(threads)[tid].m_esi = VG_(baseBlock)[VGOFF_(m_esi)];
+ VG_(threads)[tid].m_edi = VG_(baseBlock)[VGOFF_(m_edi)];
+ VG_(threads)[tid].m_ebp = VG_(baseBlock)[VGOFF_(m_ebp)];
+ VG_(threads)[tid].m_esp = VG_(baseBlock)[VGOFF_(m_esp)];
+ VG_(threads)[tid].m_eflags = VG_(baseBlock)[VGOFF_(m_eflags)];
+ VG_(threads)[tid].m_eip = VG_(baseBlock)[VGOFF_(m_eip)];
for (i = 0; i < VG_SIZE_OF_FPUSTATE_W; i++)
- vg_threads[tid].m_fpu[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
-
- vg_threads[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
- vg_threads[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
- vg_threads[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
- vg_threads[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
- vg_threads[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
- vg_threads[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
- vg_threads[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
- vg_threads[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
- vg_threads[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
+ VG_(threads)[tid].m_fpu[i] = VG_(baseBlock)[VGOFF_(m_fpustate) + i];
+
+ VG_(threads)[tid].sh_eax = VG_(baseBlock)[VGOFF_(sh_eax)];
+ VG_(threads)[tid].sh_ebx = VG_(baseBlock)[VGOFF_(sh_ebx)];
+ VG_(threads)[tid].sh_ecx = VG_(baseBlock)[VGOFF_(sh_ecx)];
+ VG_(threads)[tid].sh_edx = VG_(baseBlock)[VGOFF_(sh_edx)];
+ VG_(threads)[tid].sh_esi = VG_(baseBlock)[VGOFF_(sh_esi)];
+ VG_(threads)[tid].sh_edi = VG_(baseBlock)[VGOFF_(sh_edi)];
+ VG_(threads)[tid].sh_ebp = VG_(baseBlock)[VGOFF_(sh_ebp)];
+ VG_(threads)[tid].sh_esp = VG_(baseBlock)[VGOFF_(sh_esp)];
+ VG_(threads)[tid].sh_eflags = VG_(baseBlock)[VGOFF_(sh_eflags)];
/* Fill it up with junk. */
VG_(baseBlock)[VGOFF_(m_eax)] = junk;
{
volatile UInt trc = 0;
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status == VgTs_Runnable);
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
vg_assert(VG_(bbs_to_go) > 0);
VGP_PUSHCC(VgpRun);
}
for (i = 0 /* NB; not 1 */; i < VG_N_THREADS; i++) {
- vg_threads[i].status = VgTs_Empty;
- vg_threads[i].stack_size = 0;
- vg_threads[i].stack_base = (Addr)NULL;
- vg_threads[i].tid = i;
- VG_(ksigemptyset)(&vg_threads[i].sig_mask);
- VG_(ksigemptyset)(&vg_threads[i].sigs_waited_for);
+ VG_(threads)[i].status = VgTs_Empty;
+ VG_(threads)[i].stack_size = 0;
+ VG_(threads)[i].stack_base = (Addr)NULL;
+ VG_(threads)[i].tid = i;
+ VG_(ksigemptyset)(&VG_(threads)[i].sig_mask);
+ VG_(ksigemptyset)(&VG_(threads)[i].sigs_waited_for);
}
for (i = 0; i < VG_N_WAITING_FDS; i++)
tid_main = vg_alloc_ThreadState();
vg_assert(tid_main == 1);
- vg_threads[tid_main].status = VgTs_Runnable;
- vg_threads[tid_main].joiner = VG_INVALID_THREADID;
- vg_threads[tid_main].associated_mx = NULL;
- vg_threads[tid_main].associated_cv = NULL;
- vg_threads[tid_main].retval = NULL; /* not important */
+ VG_(threads)[tid_main].status = VgTs_Runnable;
+ VG_(threads)[tid_main].joiner = VG_INVALID_THREADID;
+ VG_(threads)[tid_main].associated_mx = NULL;
+ VG_(threads)[tid_main].associated_cv = NULL;
+ VG_(threads)[tid_main].retval = NULL; /* not important */
for (i = 0; i < VG_N_THREAD_KEYS; i++)
- vg_threads[tid_main].specifics[i] = NULL;
+ VG_(threads)[tid_main].specifics[i] = NULL;
/* Copy VG_(baseBlock) state to tid_main's slot. */
vg_tid_currently_in_baseBlock = tid_main;
VG_(save_thread_state) ( tid_main );
- vg_threads[tid_main].stack_highest_word
- = vg_threads[tid_main].m_esp /* -4 ??? */;
+ VG_(threads)[tid_main].stack_highest_word
+ = VG_(threads)[tid_main].m_esp /* -4 ??? */;
/* So now ... */
vg_assert(vg_tid_currently_in_baseBlock == VG_INVALID_THREADID);
return True; \
}
- ThreadState* tst = &vg_threads[tid];
+ ThreadState* tst = &VG_(threads)[tid];
UInt* arg = (UInt*)(tst->m_eax);
UInt req_no = arg[0];
Int i, waiters;
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status == VgTs_WaitFD);
- vg_assert(vg_threads[tid].m_eax == __NR_read
- || vg_threads[tid].m_eax == __NR_write);
+ vg_assert(VG_(threads)[tid].status == VgTs_WaitFD);
+ vg_assert(VG_(threads)[tid].m_eax == __NR_read
+ || VG_(threads)[tid].m_eax == __NR_write);
/* Excessively paranoidly ... find the fd this op was waiting
for, and mark it as not being waited on. */
for (i = 0; i < VG_N_WAITING_FDS; i++) {
if (vg_waiting_fds[i].tid == tid) {
waiters++;
- vg_assert(vg_waiting_fds[i].syscall_no == vg_threads[tid].m_eax);
+ vg_assert(vg_waiting_fds[i].syscall_no == VG_(threads)[tid].m_eax);
}
}
vg_assert(waiters == 1);
/* Easy; we don't have to do anything. */
return;
- if (vg_threads[tid].status == VgTs_WaitFD
- && (vg_threads[tid].m_eax == __NR_read
- || vg_threads[tid].m_eax == __NR_write)) {
+ if (VG_(threads)[tid].status == VgTs_WaitFD
+ && (VG_(threads)[tid].m_eax == __NR_read
+ || VG_(threads)[tid].m_eax == __NR_write)) {
/* read() or write() interrupted. Force a return with EINTR. */
cleanup_waiting_fd_table(tid);
- vg_threads[tid].m_eax = -VKI_EINTR;
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].m_eax = -VKI_EINTR;
+ VG_(threads)[tid].status = VgTs_Runnable;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
return;
}
- if (vg_threads[tid].status == VgTs_WaitFD
- && vg_threads[tid].m_eax == __NR_nanosleep) {
+ if (VG_(threads)[tid].status == VgTs_WaitFD
+ && VG_(threads)[tid].m_eax == __NR_nanosleep) {
/* We interrupted a nanosleep(). The right thing to do is to
write the unused time to nanosleep's second param and return
EINTR, but I'm too lazy for that. */
return;
}
- if (vg_threads[tid].status == VgTs_WaitFD) {
+ if (VG_(threads)[tid].status == VgTs_WaitFD) {
VG_(panic)("handle_signal_return: unknown interrupted syscall");
}
Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status == VgTs_Runnable);
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
- syscall_no = vg_threads[tid].m_eax; /* syscall number */
+ syscall_no = VG_(threads)[tid].m_eax; /* syscall number */
if (syscall_no == __NR_nanosleep) {
UInt t_now, t_awaken;
struct vki_timespec* req;
- req = (struct vki_timespec*)vg_threads[tid].m_ebx; /* arg1 */
+ req = (struct vki_timespec*)VG_(threads)[tid].m_ebx; /* arg1 */
t_now = VG_(read_millisecond_timer)();
t_awaken
= t_now
+ (UInt)1000ULL * (UInt)(req->tv_sec)
+ (UInt)(req->tv_nsec) / 1000000;
- vg_threads[tid].status = VgTs_Sleeping;
- vg_threads[tid].awaken_at = t_awaken;
+ VG_(threads)[tid].status = VgTs_Sleeping;
+ VG_(threads)[tid].awaken_at = t_awaken;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf, "at %d: nanosleep for %d",
t_now, t_awaken-t_now);
immediately, in order to lodge a request with the Linux kernel.
We later poll for I/O completion using select(). */
- fd = vg_threads[tid].m_ebx /* arg1 */;
+ fd = VG_(threads)[tid].m_ebx /* arg1 */;
orig_fd_blockness = fd_is_blockful(fd);
set_fd_nonblocking(fd);
vg_assert(!fd_is_blockful(fd));
VG_(check_known_blocking_syscall)(tid, syscall_no, NULL /* PRE */);
/* This trashes the thread's %eax; we have to preserve it. */
- saved_eax = vg_threads[tid].m_eax;
+ saved_eax = VG_(threads)[tid].m_eax;
KERNEL_DO_SYSCALL(tid,res);
/* Restore original blockfulness of the fd. */
*/
VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
/* We're still runnable. */
- vg_assert(vg_threads[tid].status == VgTs_Runnable);
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
} else {
/* It would have blocked. First, restore %EAX to what it was
before our speculative call. */
- vg_threads[tid].m_eax = saved_eax;
+ VG_(threads)[tid].m_eax = saved_eax;
/* Put this fd in a table of fds on which we are waiting for
completion. The arguments for select() later are constructed
from this table. */
add_waiting_fd(tid, fd, saved_eax /* which holds the syscall # */);
/* Deschedule thread until an I/O completion happens. */
- vg_threads[tid].status = VgTs_WaitFD;
+ VG_(threads)[tid].status = VgTs_WaitFD;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,"block until I/O ready on fd %d", fd);
print_sched_event(tid, msg_buf);
/* Awaken any sleeping threads whose sleep has expired. */
for (tid = 1; tid < VG_N_THREADS; tid++)
- if (vg_threads[tid].status == VgTs_Sleeping)
+ if (VG_(threads)[tid].status == VgTs_Sleeping)
break;
/* Avoid pointless calls to VG_(read_millisecond_timer). */
if (tid < VG_N_THREADS) {
t_now = VG_(read_millisecond_timer)();
for (tid = 1; tid < VG_N_THREADS; tid++) {
- if (vg_threads[tid].status != VgTs_Sleeping)
+ if (VG_(threads)[tid].status != VgTs_Sleeping)
continue;
- if (t_now >= vg_threads[tid].awaken_at) {
+ if (t_now >= VG_(threads)[tid].awaken_at) {
/* Resume this thread. Set to zero the remaining-time
(second) arg of nanosleep, since it's used up all its
time. */
- vg_assert(vg_threads[tid].m_eax == __NR_nanosleep);
- rem = (struct vki_timespec *)vg_threads[tid].m_ecx; /* arg2 */
+ vg_assert(VG_(threads)[tid].m_eax == __NR_nanosleep);
+ rem = (struct vki_timespec *)VG_(threads)[tid].m_ecx; /* arg2 */
if (rem != NULL) {
rem->tv_sec = 0;
rem->tv_nsec = 0;
}
/* Make the syscall return 0 (success). */
- vg_threads[tid].m_eax = 0;
+ VG_(threads)[tid].m_eax = 0;
/* Reschedule this thread. */
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf, "at %d: nanosleep done",
t_now);
}
/* UNBLOCK ALL SIGNALS */
- VG_(restore_host_signals)( &saved_procmask );
+ VG_(restore_all_host_signals)( &saved_procmask );
/* VG_(printf)("poll_for_io_completions: %d fs ready\n", n_ready); */
/* The thread actually has to be waiting for the I/O event it
requested before we can deliver the result! */
- if (vg_threads[tid].status != VgTs_WaitFD)
+ if (VG_(threads)[tid].status != VgTs_WaitFD)
continue;
/* Ok, actually do it! We can safely use %EAX as the syscall
call would have blocked. */
syscall_no = vg_waiting_fds[i].syscall_no;
- vg_assert(syscall_no == vg_threads[tid].m_eax);
+ vg_assert(syscall_no == VG_(threads)[tid].m_eax);
KERNEL_DO_SYSCALL(tid,res);
VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
/* Reschedule. */
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
/* Mark slot as no longer in use. */
vg_waiting_fds[i].fd = -1;
/* pp_sched_status(); */
{
Int i, now;
for (i = 1; i < VG_N_THREADS; i++) {
- if (vg_threads[i].status != VgTs_WaitCV)
+ if (VG_(threads)[i].status != VgTs_WaitCV)
continue;
- if (vg_threads[i].awaken_at == 0xFFFFFFFF /* no timeout */)
+ if (VG_(threads)[i].awaken_at == 0xFFFFFFFF /* no timeout */)
continue;
now = VG_(read_millisecond_timer)();
- if (now >= vg_threads[i].awaken_at) {
+ if (now >= VG_(threads)[i].awaken_at) {
do_pthread_cond_timedwait_TIMEOUT(i);
}
}
while (True) {
tid_next++;
if (tid_next >= VG_N_THREADS) tid_next = 1;
- if (vg_threads[tid_next].status == VgTs_WaitFD
- || vg_threads[tid_next].status == VgTs_Sleeping
- || vg_threads[tid_next].status == VgTs_WaitSIG
- || (vg_threads[tid_next].status == VgTs_WaitCV
- && vg_threads[tid_next].awaken_at != 0xFFFFFFFF))
+ if (VG_(threads)[tid_next].status == VgTs_WaitFD
+ || VG_(threads)[tid_next].status == VgTs_Sleeping
+ || VG_(threads)[tid_next].status == VgTs_WaitSIG
+ || (VG_(threads)[tid_next].status == VgTs_WaitCV
+ && VG_(threads)[tid_next].awaken_at != 0xFFFFFFFF))
n_in_bounded_wait ++;
- if (vg_threads[tid_next].status == VgTs_Runnable)
+ if (VG_(threads)[tid_next].status == VgTs_Runnable)
break; /* We can run this one. */
if (tid_next == tid)
break; /* been all the way round */
}
tid = tid_next;
- if (vg_threads[tid].status == VgTs_Runnable) {
+ if (VG_(threads)[tid].status == VgTs_Runnable) {
/* Found a suitable candidate. Fall out of this loop, so
we can advance to stage 2 of the scheduler: actually
running the thread. */
dispatch_ctr_SAVED = VG_(dispatch_ctr);
/* paranoia ... */
- vg_assert(vg_threads[tid].tid == tid);
+ vg_assert(VG_(threads)[tid].tid == tid);
/* Actually run thread tid. */
while (True) {
# if 0
if (VG_(bbs_done) > 31700000 + 0) {
dispatch_ctr_SAVED = VG_(dispatch_ctr) = 2;
- VG_(translate)(&vg_threads[tid], vg_threads[tid].m_eip,
+ VG_(translate)(&VG_(threads)[tid], VG_(threads)[tid].m_eip,
NULL,NULL,NULL);
}
- vg_assert(vg_threads[tid].m_eip != 0);
+ vg_assert(VG_(threads)[tid].m_eip != 0);
# endif
trc = run_thread_for_a_while ( tid );
# if 0
- if (0 == vg_threads[tid].m_eip) {
+ if (0 == VG_(threads)[tid].m_eip) {
VG_(printf)("tid = %d, dc = %llu\n", tid, VG_(bbs_done));
- vg_assert(0 != vg_threads[tid].m_eip);
+ vg_assert(0 != VG_(threads)[tid].m_eip);
}
# endif
/* Trivial event. Miss in the fast-cache. Do a full
lookup for it. */
trans_addr
- = VG_(search_transtab) ( vg_threads[tid].m_eip );
+ = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
if (trans_addr == (Addr)0) {
/* Not found; we need to request a translation. */
- create_translation_for( tid, vg_threads[tid].m_eip );
- trans_addr = VG_(search_transtab) ( vg_threads[tid].m_eip );
+ create_translation_for( tid, VG_(threads)[tid].m_eip );
+ trans_addr = VG_(search_transtab) ( VG_(threads)[tid].m_eip );
if (trans_addr == (Addr)0)
VG_(panic)("VG_TRC_INNER_FASTMISS: missing tt_fast entry");
}
if (trc == VG_TRC_EBP_JMP_CLIENTREQ) {
Bool done;
/* VG_(printf)("request 0x%x\n",
- *(UInt*)(vg_threads[tid].m_eax)); */
+ *(UInt*)(VG_(threads)[tid].m_eax)); */
done = maybe_do_trivial_clientreq(tid);
if (done) {
/* The request is done. We try and continue with the
same thread if still runnable. If not, go back to
Stage 1 to select a new thread to run. */
- if (vg_threads[tid].status == VgTs_Runnable)
+ if (VG_(threads)[tid].status == VgTs_Runnable)
continue; /* with this thread */
else
goto stage1;
to exit. */
# if 0
{ UInt* esp; Int i;
- esp=(UInt*)vg_threads[tid].m_esp;
+ esp=(UInt*)VG_(threads)[tid].m_esp;
VG_(printf)("\nBEFORE\n");
for (i = 10; i >= -10; i--)
VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
}
# endif
- if (vg_threads[tid].m_eax == __NR_exit)
+ if (VG_(threads)[tid].m_eax == __NR_exit)
return VgSrc_ExitSyscall;
sched_do_syscall(tid);
# if 0
{ UInt* esp; Int i;
- esp=(UInt*)vg_threads[tid].m_esp;
+ esp=(UInt*)VG_(threads)[tid].m_esp;
VG_(printf)("AFTER\n");
for (i = 10; i >= -10; i--)
VG_(printf)("%2d %p = 0x%x\n", i, &esp[i], esp[i]);
}
# endif
- if (vg_threads[tid].status == VgTs_Runnable)
+ if (VG_(threads)[tid].status == VgTs_Runnable)
continue; /* with this thread */
else
goto stage1;
*/
/* The thread's %EAX points at an arg block, the first
word of which is the request code. */
- request_code = ((UInt*)(vg_threads[tid].m_eax))[0];
+ request_code = ((UInt*)(VG_(threads)[tid].m_eax))[0];
if (0) {
VG_(sprintf)(msg_buf, "request 0x%x", request_code );
print_sched_event(tid, msg_buf);
throwing away the result. */
VG_(printf)(
"======vvvvvvvv====== LAST TRANSLATION ======vvvvvvvv======\n");
- VG_(translate)( &vg_threads[tid], vg_threads[tid].m_eip, NULL, NULL, NULL );
+ VG_(translate)( &VG_(threads)[tid],
+ VG_(threads)[tid].m_eip, NULL, NULL, NULL );
VG_(printf)("\n");
VG_(printf)(
"======^^^^^^^^====== LAST TRANSLATION ======^^^^^^^^======\n");
typedef unsigned long int pthread_t;
*/
-/* Write a value to the client's %EDX (request return value register)
- and set the shadow to indicate it is defined. */
-#define SET_EDX(zztid, zzval) \
- do { vg_threads[zztid].m_edx = (zzval); \
- vg_threads[zztid].sh_edx = VGM_WORD_VALID; \
- } while (0)
-
/* -----------------------------------------------------------
Thread CREATION, JOINAGE and CANCELLATION.
static
void cleanup_after_thread_exited ( ThreadId tid )
{
- vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status == VgTs_Empty);
+ vg_assert(VG_(is_valid_or_empty_tid)(tid));
+ vg_assert(VG_(threads)[tid].status == VgTs_Empty);
/* Mark its stack no-access */
if (VG_(clo_instrument) && tid != 1)
- VGM_(make_noaccess)( vg_threads[tid].stack_base,
- vg_threads[tid].stack_size );
+ VGM_(make_noaccess)( VG_(threads)[tid].stack_base,
+ VG_(threads)[tid].stack_size );
/* Forget about any pending signals directed specifically at this
- thread. */
- VG_(notify_signal_machinery_of_thread_exit)( tid );
-
- /* Get rid of signal handlers specifically arranged for this
- thread. */
- VG_(update_sigstate_following_WaitSIG_change)();
+ thread, and get rid of signal handlers specifically arranged for
+ this thread. */
+ VG_(handle_SCSS_change)( False /* lazy update */ );
}
Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status != VgTs_Empty);
+ vg_assert(VG_(threads)[tid].status != VgTs_Empty);
if (!VG_(is_valid_tid)(tid_cancellee)
- || vg_threads[tid_cancellee].status == VgTs_Empty) {
+ || VG_(threads)[tid_cancellee].status == VgTs_Empty) {
SET_EDX(tid, ESRCH);
return;
}
VG_(sprintf)(msg_buf, "cancelled by %d", tid);
print_sched_event(tid_cancellee, msg_buf);
}
- vg_threads[tid_cancellee].m_eax = (UInt)PTHREAD_CANCELED;
- vg_threads[tid_cancellee].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
- vg_threads[tid_cancellee].status = VgTs_Runnable;
+ VG_(threads)[tid_cancellee].m_eax = (UInt)PTHREAD_CANCELED;
+ VG_(threads)[tid_cancellee].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
+ VG_(threads)[tid_cancellee].status = VgTs_Runnable;
/* We return with success (0). */
SET_EDX(tid, 0);
VG_(sprintf)(msg_buf, "exiting with %p", retval);
print_sched_event(tid, msg_buf);
}
- vg_threads[tid].m_eax = (UInt)retval;
- vg_threads[tid].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].m_eax = (UInt)retval;
+ VG_(threads)[tid].m_eip = (UInt)&VG_(pthreadreturn_bogusRA);
+ VG_(threads)[tid].status = VgTs_Runnable;
}
/* Mark it as not in use. Leave the stack in place so the next
user of this slot doesn't reallocate it. */
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status != VgTs_Empty);
+ vg_assert(VG_(threads)[tid].status != VgTs_Empty);
- vg_threads[tid].retval = retval;
+ VG_(threads)[tid].retval = retval;
- if (vg_threads[tid].joiner == VG_INVALID_THREADID) {
+ if (VG_(threads)[tid].joiner == VG_INVALID_THREADID) {
/* No one has yet done a join on me */
- vg_threads[tid].status = VgTs_WaitJoiner;
+ VG_(threads)[tid].status = VgTs_WaitJoiner;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
"root fn returns, waiting for a call pthread_join(%d)",
%EAX -- in order to extract the 2nd param of its pthread_join
call. TODO: free properly the slot (also below).
*/
- jnr = vg_threads[tid].joiner;
+ jnr = VG_(threads)[tid].joiner;
vg_assert(VG_(is_valid_tid)(jnr));
- vg_assert(vg_threads[jnr].status == VgTs_WaitJoinee);
- jnr_args = (UInt*)vg_threads[jnr].m_eax;
+ vg_assert(VG_(threads)[jnr].status == VgTs_WaitJoinee);
+ jnr_args = (UInt*)VG_(threads)[jnr].m_eax;
jnr_thread_return = (void**)(jnr_args[2]);
if (jnr_thread_return != NULL)
- *jnr_thread_return = vg_threads[tid].retval;
+ *jnr_thread_return = VG_(threads)[tid].retval;
SET_EDX(jnr, 0); /* success */
- vg_threads[jnr].status = VgTs_Runnable;
- vg_threads[tid].status = VgTs_Empty; /* bye! */
+ VG_(threads)[jnr].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Empty; /* bye! */
cleanup_after_thread_exited ( tid );
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
/* jee, the joinee, is the thread specified as an arg in thread
tid's call to pthread_join. So tid is the join-er. */
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(vg_threads[tid].status == VgTs_Runnable);
+ vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
if (jee == tid) {
SET_EDX(tid, EDEADLK); /* libc constant, not a kernel one */
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
return;
}
if (jee < 0
|| jee >= VG_N_THREADS
- || vg_threads[jee].status == VgTs_Empty) {
+ || VG_(threads)[jee].status == VgTs_Empty) {
/* Invalid thread to join to. */
SET_EDX(tid, EINVAL);
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
return;
}
- if (vg_threads[jee].joiner != VG_INVALID_THREADID) {
+ if (VG_(threads)[jee].joiner != VG_INVALID_THREADID) {
/* Someone already did join on this thread */
SET_EDX(tid, EINVAL);
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
return;
}
- /* if (vg_threads[jee].detached) ... */
+ /* if (VG_(threads)[jee].detached) ... */
/* Perhaps the joinee has already finished? If so return
immediately with its return code, and free up the slot. TODO:
free it properly (also above). */
- if (vg_threads[jee].status == VgTs_WaitJoiner) {
- vg_assert(vg_threads[jee].joiner == VG_INVALID_THREADID);
+ if (VG_(threads)[jee].status == VgTs_WaitJoiner) {
+ vg_assert(VG_(threads)[jee].joiner == VG_INVALID_THREADID);
SET_EDX(tid, 0); /* success */
if (thread_return != NULL) {
- *thread_return = vg_threads[jee].retval;
+ *thread_return = VG_(threads)[jee].retval;
/* Not really right, since it makes the thread's return value
appear to be defined even if it isn't. */
if (VG_(clo_instrument))
VGM_(make_readable)( (Addr)thread_return, sizeof(void*) );
}
- vg_threads[tid].status = VgTs_Runnable;
- vg_threads[jee].status = VgTs_Empty; /* bye! */
+ VG_(threads)[tid].status = VgTs_Runnable;
+ VG_(threads)[jee].status = VgTs_Empty; /* bye! */
cleanup_after_thread_exited ( jee );
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
}
/* Ok, so we'll have to wait on jee. */
- vg_threads[jee].joiner = tid;
- vg_threads[tid].status = VgTs_WaitJoinee;
+ VG_(threads)[jee].joiner = tid;
+ VG_(threads)[tid].status = VgTs_WaitJoinee;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
"blocking on call of pthread_join(%d)", jee );
/* Paranoia ... */
vg_assert(sizeof(pthread_t) == sizeof(UInt));
- vg_assert(vg_threads[parent_tid].status != VgTs_Empty);
+ vg_assert(VG_(threads)[parent_tid].status != VgTs_Empty);
tid = vg_alloc_ThreadState();
/* If we've created the main thread's tid, we're in deep trouble :) */
vg_assert(tid != 1);
- vg_assert(VG_(is_valid_tid)(tid));
+ vg_assert(VG_(is_valid_or_empty_tid)(tid));
/* Copy the parent's CPU state into the child's, in a roundabout
way (via baseBlock). */
is inadequate. */
new_stk_szb = VG_PTHREAD_STACK_MIN;
- if (new_stk_szb > vg_threads[tid].stack_size) {
+ if (new_stk_szb > VG_(threads)[tid].stack_size) {
/* Again, for good measure :) We definitely don't want to be
allocating a stack for the main thread. */
vg_assert(tid != 1);
/* for now, we don't handle the case of anything other than
assigning it for the first time. */
- vg_assert(vg_threads[tid].stack_size == 0);
- vg_assert(vg_threads[tid].stack_base == (Addr)NULL);
+ vg_assert(VG_(threads)[tid].stack_size == 0);
+ vg_assert(VG_(threads)[tid].stack_base == (Addr)NULL);
new_stack = (Addr)VG_(get_memory_from_mmap)( new_stk_szb );
- vg_threads[tid].stack_base = new_stack;
- vg_threads[tid].stack_size = new_stk_szb;
- vg_threads[tid].stack_highest_word
+ VG_(threads)[tid].stack_base = new_stack;
+ VG_(threads)[tid].stack_size = new_stk_szb;
+ VG_(threads)[tid].stack_highest_word
= new_stack + new_stk_szb
- VG_AR_CLIENT_STACKBASE_REDZONE_SZB; /* -4 ??? */;
}
- vg_threads[tid].m_esp
- = vg_threads[tid].stack_base
- + vg_threads[tid].stack_size
+ VG_(threads)[tid].m_esp
+ = VG_(threads)[tid].stack_base
+ + VG_(threads)[tid].stack_size
- VG_AR_CLIENT_STACKBASE_REDZONE_SZB;
if (VG_(clo_instrument))
- VGM_(make_noaccess)( vg_threads[tid].m_esp,
+ VGM_(make_noaccess)( VG_(threads)[tid].m_esp,
VG_AR_CLIENT_STACKBASE_REDZONE_SZB );
/* push arg */
- vg_threads[tid].m_esp -= 4;
- * (UInt*)(vg_threads[tid].m_esp) = (UInt)arg;
+ VG_(threads)[tid].m_esp -= 4;
+ * (UInt*)(VG_(threads)[tid].m_esp) = (UInt)arg;
/* push (magical) return address */
- vg_threads[tid].m_esp -= 4;
- * (UInt*)(vg_threads[tid].m_esp) = (UInt)VG_(pthreadreturn_bogusRA);
+ VG_(threads)[tid].m_esp -= 4;
+ * (UInt*)(VG_(threads)[tid].m_esp) = (UInt)VG_(pthreadreturn_bogusRA);
if (VG_(clo_instrument))
- VGM_(make_readable)( vg_threads[tid].m_esp, 2 * 4 );
+ VGM_(make_readable)( VG_(threads)[tid].m_esp, 2 * 4 );
/* this is where we start */
- vg_threads[tid].m_eip = (UInt)start_routine;
+ VG_(threads)[tid].m_eip = (UInt)start_routine;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
if (VG_(clo_instrument))
VGM_(make_readable)( (Addr)thread, sizeof(pthread_t) );
- vg_threads[tid].associated_mx = NULL;
- vg_threads[tid].associated_cv = NULL;
- vg_threads[tid].joiner = VG_INVALID_THREADID;
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].associated_mx = NULL;
+ VG_(threads)[tid].associated_cv = NULL;
+ VG_(threads)[tid].joiner = VG_INVALID_THREADID;
+ VG_(threads)[tid].status = VgTs_Runnable;
for (i = 0; i < VG_N_THREAD_KEYS; i++)
- vg_threads[tid].specifics[i] = NULL;
+ VG_(threads)[tid].specifics[i] = NULL;
- /* We inherit our parent's signal mask. (?!) */
- vg_threads[tid].sig_mask = vg_threads[parent_tid].sig_mask;
- VG_(ksigemptyset)(&vg_threads[i].sigs_waited_for);
+ /* We inherit our parent's signal mask. */
+ VG_(threads)[tid].sig_mask = VG_(threads)[parent_tid].sig_mask;
+ VG_(ksigemptyset)(&VG_(threads)[i].sigs_waited_for);
/* return zero */
SET_EDX(parent_tid, 0); /* success */
/* Find some arbitrary thread waiting on this mutex, and make it
runnable. If none are waiting, mark the mutex as not held. */
for (i = 1; i < VG_N_THREADS; i++) {
- if (vg_threads[i].status == VgTs_Empty)
+ if (VG_(threads)[i].status == VgTs_Empty)
continue;
- if (vg_threads[i].status == VgTs_WaitMX
- && vg_threads[i].associated_mx == mutex)
+ if (VG_(threads)[i].status == VgTs_WaitMX
+ && VG_(threads)[i].associated_mx == mutex)
break;
}
/* Notionally transfer the hold to thread i, whose
pthread_mutex_lock() call now returns with 0 (success). */
/* The .count is already == 1. */
- vg_assert(vg_threads[i].associated_mx == mutex);
+ vg_assert(VG_(threads)[i].associated_mx == mutex);
mutex->__m_owner = (_pthread_descr)i;
- vg_threads[i].status = VgTs_Runnable;
- vg_threads[i].associated_mx = NULL;
+ VG_(threads)[i].status = VgTs_Runnable;
+ VG_(threads)[i].associated_mx = NULL;
/* m_edx already holds pth_mx_lock() success (0) */
if (VG_(clo_trace_pthread_level) >= 1) {
/* Paranoia ... */
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
/* POSIX doesn't mandate this, but for sanity ... */
if (mutex == NULL) {
/* caller is polling; so return immediately. */
SET_EDX(tid, EBUSY);
} else {
- vg_threads[tid].status = VgTs_WaitMX;
- vg_threads[tid].associated_mx = mutex;
+ VG_(threads)[tid].status = VgTs_WaitMX;
+ VG_(threads)[tid].associated_mx = mutex;
SET_EDX(tid, 0); /* pth_mx_lock success value */
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
/* We get it! [for the first time]. */
mutex->__m_count = 1;
mutex->__m_owner = (_pthread_descr)tid;
- vg_assert(vg_threads[tid].associated_mx == NULL);
+ vg_assert(VG_(threads)[tid].associated_mx == NULL);
/* return 0 (success). */
SET_EDX(tid, 0);
}
/* Paranoia ... */
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (mutex == NULL) {
SET_EDX(tid, EINVAL);
pthread_cond_t* cv;
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_WaitCV
- && vg_threads[tid].awaken_at != 0xFFFFFFFF);
- mx = vg_threads[tid].associated_mx;
+ && VG_(threads)[tid].status == VgTs_WaitCV
+ && VG_(threads)[tid].awaken_at != 0xFFFFFFFF);
+ mx = VG_(threads)[tid].associated_mx;
vg_assert(mx != NULL);
- cv = vg_threads[tid].associated_cv;
+ cv = VG_(threads)[tid].associated_cv;
vg_assert(cv != NULL);
if (mx->__m_owner == VG_INVALID_THREADID) {
/* Currently unheld; hand it out to thread tid. */
vg_assert(mx->__m_count == 0);
- vg_threads[tid].status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
- vg_threads[tid].associated_cv = NULL;
- vg_threads[tid].associated_mx = NULL;
+ VG_(threads)[tid].associated_cv = NULL;
+ VG_(threads)[tid].associated_mx = NULL;
mx->__m_owner = (_pthread_descr)tid;
mx->__m_count = 1;
} else {
/* Currently held. Make thread tid be blocked on it. */
vg_assert(mx->__m_count > 0);
- vg_threads[tid].status = VgTs_WaitMX;
+ VG_(threads)[tid].status = VgTs_WaitMX;
SET_EDX(tid, ETIMEDOUT); /* pthread_cond_wait return value */
- vg_threads[tid].associated_cv = NULL;
- vg_threads[tid].associated_mx = mx;
+ VG_(threads)[tid].associated_cv = NULL;
+ VG_(threads)[tid].associated_mx = mx;
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf,
"pthread_cond_timedwai cv %p: TIMEOUT -> BLOCK for mx %p",
/* Find a thread waiting on this CV. */
for (i = 1; i < VG_N_THREADS; i++) {
- if (vg_threads[i].status == VgTs_Empty)
+ if (VG_(threads)[i].status == VgTs_Empty)
continue;
- if (vg_threads[i].status == VgTs_WaitCV
- && vg_threads[i].associated_cv == cond)
+ if (VG_(threads)[i].status == VgTs_WaitCV
+ && VG_(threads)[i].associated_cv == cond)
break;
}
vg_assert(i <= VG_N_THREADS);
return;
}
- mx = vg_threads[i].associated_mx;
+ mx = VG_(threads)[i].associated_mx;
vg_assert(mx != NULL);
if (mx->__m_owner == VG_INVALID_THREADID) {
/* Currently unheld; hand it out to thread i. */
vg_assert(mx->__m_count == 0);
- vg_threads[i].status = VgTs_Runnable;
- vg_threads[i].associated_cv = NULL;
- vg_threads[i].associated_mx = NULL;
+ VG_(threads)[i].status = VgTs_Runnable;
+ VG_(threads)[i].associated_cv = NULL;
+ VG_(threads)[i].associated_mx = NULL;
mx->__m_owner = (_pthread_descr)i;
mx->__m_count = 1;
/* .m_edx already holds pth_cond_wait success value (0) */
} else {
/* Currently held. Make thread i be blocked on it. */
vg_assert(mx->__m_count > 0);
- vg_threads[i].status = VgTs_WaitMX;
- vg_threads[i].associated_cv = NULL;
- vg_threads[i].associated_mx = mx;
+ VG_(threads)[i].status = VgTs_WaitMX;
+ VG_(threads)[i].associated_cv = NULL;
+ VG_(threads)[i].associated_mx = mx;
SET_EDX(i, 0); /* pth_cond_wait success value */
if (VG_(clo_trace_pthread_level) >= 1) {
/* Paranoia ... */
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (mutex == NULL || cond == NULL) {
SET_EDX(tid, EINVAL);
}
/* Queue ourselves on the condition. */
- vg_threads[tid].status = VgTs_WaitCV;
- vg_threads[tid].associated_cv = cond;
- vg_threads[tid].associated_mx = mutex;
- vg_threads[tid].awaken_at = ms_end;
+ VG_(threads)[tid].status = VgTs_WaitCV;
+ VG_(threads)[tid].associated_cv = cond;
+ VG_(threads)[tid].associated_mx = mutex;
+ VG_(threads)[tid].awaken_at = ms_end;
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf,
/* Paranoia ... */
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (cond == NULL) {
SET_EDX(tid, EINVAL);
vg_assert(sizeof(pthread_key_t) == sizeof(ThreadKey));
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
for (i = 0; i < VG_N_THREAD_KEYS; i++)
if (!vg_thread_keys[i].inuse)
}
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
SET_EDX(tid, EINVAL);
functions correctly. */
# if 1
for (tid = 1; tid < VG_N_THREADS; tid++) {
- if (vg_threads[tid].status != VgTs_Empty)
- vg_threads[tid].specifics[key] = NULL;
+ if (VG_(threads)[tid].status != VgTs_Empty)
+ VG_(threads)[tid].specifics[key] = NULL;
}
# endif
}
}
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
SET_EDX(tid, (UInt)NULL);
return;
}
- SET_EDX(tid, (UInt)vg_threads[tid].specifics[key]);
+ SET_EDX(tid, (UInt)VG_(threads)[tid].specifics[key]);
}
}
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (!is_valid_key(key)) {
SET_EDX(tid, EINVAL);
return;
}
- vg_threads[tid].specifics[key] = pointer;
+ VG_(threads)[tid].specifics[key] = pointer;
SET_EDX(tid, 0);
}
------------------------------------------------ */
/* See comment in vg_libthread.c:pthread_sigmask() regarding
- deliberate confusion of types sigset_t and vki_sigset_t. Also re
- meaning of the mashed_how value. Return 0 for OK and 1 for some
- kind of addressing error, which the vg_libpthread.c routine turns
- into return values 0 and EFAULT respectively. */
+ deliberate confusion of types sigset_t and vki_sigset_t. Return 0
+ for OK and 1 for some kind of addressing error, which the
+ vg_libpthread.c routine turns into return values 0 and EFAULT
+ respectively. */
static
void do_pthread_sigmask ( ThreadId tid,
- Int mashed_how,
+ Int vki_how,
vki_ksigset_t* newmask,
vki_ksigset_t* oldmask )
{
Char msg_buf[100];
if (VG_(clo_trace_pthread_level) >= 1) {
VG_(sprintf)(msg_buf,
- "pthread_sigmask m_how %d, newmask %p, oldmask %p",
- mashed_how, newmask, oldmask );
+ "pthread_sigmask vki_how %d, newmask %p, oldmask %p",
+ vki_how, newmask, oldmask );
print_pthread_event(tid, msg_buf);
}
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
if (VG_(clo_instrument)) {
/* TODO check newmask/oldmask are addressible/defined */
}
- if (oldmask != NULL) {
- *oldmask = vg_threads[tid].sig_mask;
- if (VG_(clo_instrument)) {
- VGM_(make_readable)( (Addr)oldmask, sizeof(vki_ksigset_t) );
- }
- }
-
- switch (mashed_how) {
- case 1: /* SIG_SETMASK */
- vg_threads[tid].sig_mask = *newmask;
- break;
- case 2: /* SIG_BLOCK */
- VG_(ksigaddset_from_set)( & vg_threads[tid].sig_mask, newmask);
- break;
- case 3: /* SIG_UNBLOCK */
- VG_(ksigdelset_from_set)( & vg_threads[tid].sig_mask, newmask);
- break;
- default:
- VG_(panic)("do_pthread_sigmask: invalid mashed_how");
- /*NOTREACHED*/
- break;
- }
+ VG_(do_pthread_sigmask_SCSS_upd) ( tid, vki_how, newmask, oldmask );
+ /* Success. */
SET_EDX(tid, 0);
}
vki_ksigset_t* set,
Int* sig )
{
- Char msg_buf[100];
+ vki_ksigset_t irrelevant_sigmask;
+ Char msg_buf[100];
+
if (VG_(clo_trace_signals) || VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
"suspend due to sigwait(): set %p, sig %p",
}
vg_assert(VG_(is_valid_tid)(tid)
- && vg_threads[tid].status == VgTs_Runnable);
+ && VG_(threads)[tid].status == VgTs_Runnable);
+
+ /* Change SCSS */
+ VG_(threads)[tid].sigs_waited_for = *set;
+ VG_(threads)[tid].status = VgTs_WaitSIG;
- vg_threads[tid].sigs_waited_for = *set;
- vg_threads[tid].status = VgTs_WaitSIG;
- VG_(update_sigstate_following_WaitSIG_change)();
+ VG_(block_all_host_signals)( &irrelevant_sigmask );
+ VG_(handle_SCSS_change)( False /* lazy update */ );
+}
+
+
+static
+void do_pthread_kill ( ThreadId tid, /* me */
+ ThreadId thread, /* thread to signal */
+ Int sig )
+{
+ Char msg_buf[100];
+
+ if (VG_(clo_trace_signals) || VG_(clo_trace_pthread_level) >= 1) {
+ VG_(sprintf)(msg_buf,
+ "pthread_kill thread %d, signo %d",
+ thread, sig );
+ print_pthread_event(tid, msg_buf);
+ }
+
+ vg_assert(VG_(is_valid_tid)(tid)
+ && VG_(threads)[tid].status == VgTs_Runnable);
+
+ if (!VG_(is_valid_tid)(tid)) {
+ SET_EDX(tid, -VKI_ESRCH);
+ return;
+ }
+
+ if (sig < 1 || sig > VKI_KNSIG) {
+ SET_EDX(tid, -VKI_EINVAL);
+ return;
+ }
+
+ VG_(send_signal_to_thread)( thread, sig );
+ SET_EDX(tid, 0);
}
static
void do_nontrivial_clientreq ( ThreadId tid )
{
- UInt* arg = (UInt*)(vg_threads[tid].m_eax);
+ UInt* arg = (UInt*)(VG_(threads)[tid].m_eax);
UInt req_no = arg[0];
switch (req_no) {
(Int*)(arg[2]) );
break;
+ case VG_USERREQ__PTHREAD_KILL:
+ do_pthread_kill ( tid, arg[1], arg[2] );
+ break;
+
+
case VG_USERREQ__MAKE_NOACCESS:
case VG_USERREQ__MAKE_WRITABLE:
case VG_USERREQ__MAKE_READABLE:
case VG_USERREQ__DO_LEAK_CHECK:
SET_EDX(
tid,
- VG_(handle_client_request) ( &vg_threads[tid], arg )
+ VG_(handle_client_request) ( &VG_(threads)[tid], arg )
);
break;
/* VG_(printf)("scheduler_sanity\n"); */
for (i = 1; i < VG_N_THREADS; i++) {
- mx = vg_threads[i].associated_mx;
- cv = vg_threads[i].associated_cv;
- if (vg_threads[i].status == VgTs_WaitMX) {
+ mx = VG_(threads)[i].associated_mx;
+ cv = VG_(threads)[i].associated_cv;
+ if (VG_(threads)[i].status == VgTs_WaitMX) {
/* If we're waiting on a MX: (1) the mx is not null, (2, 3)
it's actually held by someone, since otherwise this thread
is deadlocked, (4) the mutex's owner is not us, since
/* 3 */ vg_assert(VG_(is_valid_tid)((ThreadId)mx->__m_owner));
/* 4 */ vg_assert(i != (ThreadId)mx->__m_owner);
} else
- if (vg_threads[i].status == VgTs_WaitCV) {
+ if (VG_(threads)[i].status == VgTs_WaitCV) {
vg_assert(cv != NULL);
vg_assert(mx != NULL);
} else {
/* vg_assert(mx == NULL); */
}
- if (vg_threads[i].status != VgTs_Empty) {
+ if (VG_(threads)[i].status != VgTs_Empty) {
Int
- stack_used = (Addr)vg_threads[i].stack_highest_word
- - (Addr)vg_threads[i].m_esp;
+ stack_used = (Addr)VG_(threads)[i].stack_highest_word
+ - (Addr)VG_(threads)[i].m_esp;
if (i > 1 /* not the root thread */
&& stack_used
>= (VG_PTHREAD_STACK_MIN - 1000 /* paranoia */)) {
VG_(exit)(1);
}
- if (vg_threads[i].status == VgTs_WaitSIG) {
+ if (VG_(threads)[i].status == VgTs_WaitSIG) {
vg_assert( ! VG_(kisemptysigset)(
- & vg_threads[i].sigs_waited_for) );
+ & VG_(threads)[i].sigs_waited_for) );
} else {
vg_assert( VG_(kisemptysigset)(
- & vg_threads[i].sigs_waited_for) );
+ & VG_(threads)[i].sigs_waited_for) );
}
}
#include "vg_unsafe.h"
#include "valgrind.h" /* for VALGRIND_MAGIC_SEQUENCE */
+/* Define to give more sanity checking for signals. */
+#define DEBUG_SIGNALS
+
+
+/* ---------------------------------------------------------------------
+ Forwards decls.
+ ------------------------------------------------------------------ */
+
+static void vg_oursignalhandler ( Int sigNo );
+
+
+/* ---------------------------------------------------------------------
+ HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
+ ------------------------------------------------------------------ */
+
/* ---------------------------------------------------------------------
Signal state for this process.
------------------------------------------------------------------ */
+
/* Base-ment of these arrays[VKI_KNSIG].
Valid signal numbers are 1 .. VKI_KNSIG inclusive.
and entry [0] is not used.
*/
-/* For each signal, the current action. Either:
-
- -- VG_SH_NOHANDLER if the client hasn't asked to handle the signal,
- and we havent surreptitiously installed any handler ourselves.
- -- VG_SH_FAKEHANDLER if the client hasn't asked to handle the signal
- directly, but has so indirectly via a sigwait() request. In this
- case we may need to install our own handler to catch signals which
- the sigwait-mask for some thread will accept, but for which the
- client hasn't actually installed a handler. These "fake" handlers
- are invisible to the client, so we need to be able to distinguish
- this case so that we can fake a suitable response if the client
- should enquire about the state of this signal using sigaction.
+/* -----------------------------------------------------
+ Static client signal state (SCSS). This is the state
+ that the client thinks it has the kernel in.
+ SCSS records verbatim the client's settings. These
+ are mashed around only when SKSS is calculated from it.
+ -------------------------------------------------- */
- -- Otherwise, the client has installed a signal handler, and this
- is the pointer to it.
+typedef
+ struct {
+ void* scss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
+ client's handler */
+ UInt scss_flags;
+ vki_ksigset_t scss_mask;
+ void* scss_restorer; /* god knows; we ignore it. */
+ }
+ SCSS_Per_Signal;
- Invariant: we never expect to receive a signal for which the
- vg_sighandler[] entry is VG_SH_NOHANDLER. If it is VG_SH_FAKEHANDLER
- we know that we should look for a thread in VgTs_WaitSIG state to
- release. Otherwise, we find a thread capable of handling this
- signal and run the specified handler on it.
+typedef
+ struct {
+ SCSS_Per_Signal scss_per_sig[1+VKI_KNSIG];
+ /* Additional elements to SCSS not stored here:
+ - for each thread, the thread's blocking mask
+ - for each thread in WaitSIG, the set of waited-on sigs
+ */
+ }
+ SCSS;
+
+static SCSS vg_scss;
+
+
+/* -----------------------------------------------------
+ Static kernel signal state (SKSS). This is the state
+ that we have the kernel in. It is computed from SCSS.
+ -------------------------------------------------- */
+
+/* Let's do:
+ sigprocmask assigns to all thread masks
+ so that at least everything is always consistent
+ Flags:
+ SA_NOCLDSTOP -- passed to kernel
+ SA_ONESHOT or SA_RESETHAND -- required; abort if not set
+ SA_RESTART -- we observe this but set our handlers always to restart
+ SA_NOMASK or SA_NODEFER -- required to not be set; abort if set
+ SA_ONSTACK -- currently not supported; abort if set.
*/
-#define VG_SH_NOHANDLER ((void*)0)
-#define VG_SH_FAKEHANDLER ((void*)1)
-void* vg_sighandler[1+VKI_KNSIG];
+typedef
+ struct {
+ void* skss_handler; /* VKI_SIG_DFL or VKI_SIG_IGN
+ or ptr to our handler */
+ UInt skss_flags;
+ /* There is no skss_mask, since we know that we will always ask
+ for all signals to be blocked in our one-and-only
+ sighandler. */
+ /* Also there is no skss_restorer. */
+ }
+ SKSS_Per_Signal;
-/* For each signal, either:
- -- VG_SP_SIGIDLE if not pending and not running
- -- Handler address if pending AND real handler
- -- VG_SH_FAKEHANDLER if pending for sigwait
- -- VG_SP_SIGRUNNING if the handler is running and hasn't (returned or
- unblocked the signal using sigprocmask following a longjmp out
- of the handler).
- */
-#define VG_SP_SIGIDLE ((void*)0)
-#define VG_SP_SIGRUNNING ((void*)2)
+typedef
+ struct {
+ SKSS_Per_Signal skss_per_sig[1+VKI_KNSIG];
+ vki_ksigset_t skss_sigmask; /* process' blocked signal mask */
+ }
+ SKSS;
+
+static SKSS vg_skss;
+
+
+/* -----------------------------------------------------
+ Dynamic client signal state (DCSS). This holds transient
+ information about state of client signals.
+ -------------------------------------------------- */
+
+typedef
+ struct {
+ /* True iff a signal has been received but not yet passed to
+ client. */
+ Bool dcss_sigpending[1+VKI_KNSIG];
+ /* If sigpending[] is True, has meaning:
+ VG_INVALID_THREADID -- to be passed to any suitable thread
+ other -- to be passed only to the specified thread. */
+ ThreadId dcss_destthread[1+VKI_KNSIG];
+ }
+ DCSS;
+
+static DCSS vg_dcss;
+
+
+/* ---------------------------------------------------------------------
+ Compute the SKSS required by the current SCSS.
+ ------------------------------------------------------------------ */
+
+static
+void pp_SKSS ( void )
+{
+ Int sig;
+ VG_(printf)("\n\nSKSS:\n");
+ for (sig = 1; sig <= VKI_KNSIG; sig++) {
+ VG_(printf)("sig %d: handler 0x%x, flags 0x%x\n", sig,
+ vg_skss.skss_per_sig[sig].skss_handler,
+ vg_skss.skss_per_sig[sig].skss_flags );
+
+ }
+ VG_(printf)("Global sigmask (63 .. 0) = 0x%x 0x%x\n",
+ vg_skss.skss_sigmask.ws[1],
+ vg_skss.skss_sigmask.ws[0] );
+}
+
+static __inline__
+Bool is_WaitSIGd_by_any_thread ( Int sig )
+{
+ ThreadId tid;
+ for (tid = 1; tid < VG_N_THREADS; tid++) {
+ if (VG_(threads)[tid].status != VgTs_WaitSIG)
+ continue;
+ if (VG_(ksigismember)( &VG_(threads)[tid].sigs_waited_for, sig ))
+ return True;
+ }
+ return False;
+}
+
+static __inline__
+Bool is_blocked_by_all_threads ( Int sig )
+{
+ ThreadId tid;
+ for (tid = 1; tid < VG_N_THREADS; tid++) {
+ if (VG_(threads)[tid].status == VgTs_Empty)
+ continue;
+ if (! VG_(ksigismember)( &VG_(threads)[tid].sig_mask, sig ))
+ return False;
+ }
+ return True;
+}
+
+
+/* This is the core, clever bit. Computation is as follows:
+ For each signal
+ handler = if client has a handler, then our handler
+ else if is WaitSIG'd by any thread, then our handler
+ else if client is DFL, then DFL
+ else (client must be IGN) IGN
+
+ blocked = if is blocked by all threads and not WaitSIG'd by
+ any thread
+ then BLOCKED
+ else UNBLOCKED
+*/
static
-void* vg_sigpending[1+VKI_KNSIG];
+void calculate_SKSS_from_SCSS ( SKSS* dst )
+{
+ Int sig;
+ void* skss_handler;
+ void* scss_handler;
+ Bool iz_WaitSIGd_by_any_thread;
+ Bool iz_blocked_by_all_threads;
+ Bool skss_blocked;
+ UInt scss_flags;
+ UInt skss_flags;
+
+ VG_(ksigemptyset)( &dst->skss_sigmask );
+
+ for (sig = 1; sig <= VKI_KNSIG; sig++) {
+
+ /* Calculate kernel handler and blockedness for sig, as per rules
+ in above comment. */
+
+ iz_WaitSIGd_by_any_thread = is_WaitSIGd_by_any_thread(sig);
+ iz_blocked_by_all_threads = is_blocked_by_all_threads(sig);
+
+ scss_handler = vg_scss.scss_per_sig[sig].scss_handler;
+ scss_flags = vg_scss.scss_per_sig[sig].scss_flags;
+
+ /* Restorer */
+ /*
+ Doesn't seem like we can spin this one.
+ if (vg_scss.scss_per_sig[sig].scss_restorer != NULL)
+ VG_(unimplemented)
+ ("sigactions with non-NULL .sa_restorer field");
+ */
+
+ /* Handler */
+
+ if (scss_handler != VKI_SIG_DFL && scss_handler != VKI_SIG_IGN) {
+ skss_handler = &vg_oursignalhandler;
+ } else
+ if (iz_WaitSIGd_by_any_thread) {
+ skss_handler = &vg_oursignalhandler;
+ } else
+ if (scss_handler == VKI_SIG_DFL) {
+ skss_handler = VKI_SIG_DFL;
+ }
+ else {
+ vg_assert(scss_handler == VKI_SIG_IGN);
+ skss_handler = VKI_SIG_IGN;
+ }
+
+ /* Blockfulness */
+
+ skss_blocked
+ = iz_blocked_by_all_threads && !iz_WaitSIGd_by_any_thread;
+
+ /* Flags */
+
+ skss_flags = 0;
+ /* SA_NOCLDSTOP: pass to kernel */
+ if (scss_flags & VKI_SA_NOCLDSTOP)
+ skss_flags |= VKI_SA_NOCLDSTOP;
+ /* SA_ONESHOT: ignore client setting */
+ /*
+ if (!(scss_flags & VKI_SA_ONESHOT))
+ VG_(unimplemented)
+ ("sigactions without SA_ONESHOT");
+ vg_assert(scss_flags & VKI_SA_ONESHOT);
+ skss_flags |= VKI_SA_ONESHOT;
+ */
+ /* SA_RESTART: ignore client setting and set for us */
+ skss_flags |= VKI_SA_RESTART;
+ /* SA_NOMASK: not allowed */
+ /*
+ .. well, ignore it.
+ if (scss_flags & VKI_SA_NOMASK)
+ VG_(unimplemented)
+ ("sigactions with SA_NOMASK");
+ vg_assert(!(scss_flags & VKI_SA_NOMASK));
+ */
+ /* SA_ONSTACK: not allowed */
+ if (scss_flags & VKI_SA_ONSTACK)
+ VG_(unimplemented)
+ ("signals on an alternative stack (SA_ONSTACK)");
+ vg_assert(!(scss_flags & VKI_SA_ONSTACK));
+ /* ... but WE ask for on-stack ourselves ... */
+ skss_flags |= VKI_SA_ONSTACK;
+
+ /* Create SKSS entry for this signal. */
+
+ if (skss_blocked
+ && sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
+ VG_(ksigaddset)( &dst->skss_sigmask, sig );
+
+ dst->skss_per_sig[sig].skss_handler = skss_handler;
+ dst->skss_per_sig[sig].skss_flags = skss_flags;
+ }
+
+ /* Sanity checks. */
+ vg_assert(dst->skss_per_sig[VKI_SIGKILL].skss_handler
+ == VKI_SIG_DFL);
+ vg_assert(dst->skss_per_sig[VKI_SIGSTOP].skss_handler
+ == VKI_SIG_DFL);
+ vg_assert(!VG_(ksigismember)( &dst->skss_sigmask, VKI_SIGKILL ));
+ vg_assert(!VG_(ksigismember)( &dst->skss_sigmask, VKI_SIGSTOP ));
+
+ if (0)
+ pp_SKSS();
+}
+
+
+/* ---------------------------------------------------------------------
+ After a possible SCSS change, update SKSS and the kernel itself.
+ ------------------------------------------------------------------ */
+
+/* IMPORTANT NOTE: to avoid race conditions, we must always enter here
+ with ALL KERNEL SIGNALS BLOCKED !
+*/
+void VG_(handle_SCSS_change) ( Bool force_update )
+{
+ Int res, sig;
+ SKSS skss_old;
+ vki_ksigaction ksa, ksa_old;
+
+# ifdef DEBUG_SIGNALS
+ vki_ksigset_t test_sigmask;
+ res = VG_(ksigprocmask)( VKI_SIG_SETMASK /*irrelevant*/,
+ NULL, &test_sigmask );
+ vg_assert(res == 0);
+ /* The kernel never says that SIGKILL or SIGSTOP are masked. It is
+ correct! So we fake it here for the purposes only of
+ assertion. */
+ VG_(ksigaddset)( &test_sigmask, VKI_SIGKILL );
+ VG_(ksigaddset)( &test_sigmask, VKI_SIGSTOP );
+ vg_assert(VG_(kisfullsigset)( &test_sigmask ));
+# endif
+
+ /* Remember old SKSS and calculate new one. */
+ skss_old = vg_skss;
+ calculate_SKSS_from_SCSS ( &vg_skss );
+
+ /* Compare the new SKSS entries vs the old ones, and update kernel
+ where they differ. */
+ for (sig = 1; sig <= VKI_KNSIG; sig++) {
+
+ /* Trying to do anything with SIGKILL is pointless; just ignore
+ it. */
+ if (sig == VKI_SIGKILL || sig == VKI_SIGSTOP)
+ continue;
+
+ /* Aside: take the opportunity to clean up DCSS: forget about any
+ pending signals directed at dead threads. */
+ if (vg_dcss.dcss_sigpending[sig]
+ && vg_dcss.dcss_destthread[sig] != VG_INVALID_THREADID) {
+ ThreadId tid = vg_dcss.dcss_destthread[sig];
+ vg_assert(VG_(is_valid_or_empty_tid)(tid));
+ if (VG_(threads)[tid].status == VgTs_Empty) {
+ vg_dcss.dcss_sigpending[sig] = False;
+ vg_dcss.dcss_destthread[sig] = VG_INVALID_THREADID;
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "discarding pending signal %d due to thread %d exiting",
+ sig, tid );
+ }
+ }
+ /* End of the Aside. Now the Main Business. */
-/* For each signal, the thread id to which the signal should be
- delivered. This is only meaningful if the corresponding
- vg_sigpending entry actually points to a handler, ie, the signal
- is pending.
+ if (!force_update) {
+ if ((skss_old.skss_per_sig[sig].skss_handler
+ == vg_skss.skss_per_sig[sig].skss_handler)
+ && (skss_old.skss_per_sig[sig].skss_flags
+ == vg_skss.skss_per_sig[sig].skss_flags))
+ /* no difference */
+ continue;
+ }
+
+ ksa.ksa_handler = vg_skss.skss_per_sig[sig].skss_handler;
+ ksa.ksa_flags = vg_skss.skss_per_sig[sig].skss_flags;
+ vg_assert(ksa.ksa_flags & VKI_SA_ONSTACK);
+ VG_(ksigfillset)( &ksa.ksa_mask );
+ VG_(ksigdelset)( &ksa.ksa_mask, VKI_SIGKILL );
+ VG_(ksigdelset)( &ksa.ksa_mask, VKI_SIGSTOP );
+ ksa.ksa_restorer = NULL;
+
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "setting ksig %d to: hdlr 0x%x, flags 0x%x, "
+ "mask(63..0) 0x%x 0x%x",
+ sig, ksa.ksa_handler,
+ ksa.ksa_flags,
+ ksa.ksa_mask.ws[1],
+ ksa.ksa_mask.ws[0]
+ );
+
+ res = VG_(ksigaction)( sig, &ksa, &ksa_old );
+ vg_assert(res == 0);
+
+ /* Since we got the old sigaction more or less for free, might
+ as well extract the maximum sanity-check value from it. */
+ if (!force_update) {
+ vg_assert(ksa_old.ksa_handler
+ == skss_old.skss_per_sig[sig].skss_handler);
+ vg_assert(ksa_old.ksa_flags
+ == skss_old.skss_per_sig[sig].skss_flags);
+ vg_assert(ksa_old.ksa_restorer
+ == NULL);
+ VG_(ksigaddset)( &ksa_old.ksa_mask, VKI_SIGKILL );
+ VG_(ksigaddset)( &ksa_old.ksa_mask, VKI_SIGSTOP );
+ vg_assert(VG_(kisfullsigset)( &ksa_old.ksa_mask ));
+ }
+ }
+
+ /* Just set the new sigmask, even if it's no different from the
+ old, since we have to do this anyway, to unblock the host
+ signals. */
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "setting kmask(63..0) to 0x%x 0x%x",
+ vg_skss.skss_sigmask.ws[1],
+ vg_skss.skss_sigmask.ws[0]
+ );
+
+ VG_(restore_all_host_signals)( &vg_skss.skss_sigmask );
+}
+
+
+/* ---------------------------------------------------------------------
+ Update/query SCSS in accordance with client requests.
+ ------------------------------------------------------------------ */
+
+void VG_(do__NR_sigaction) ( ThreadId tid )
+{
+ Int signo;
+ vki_ksigaction* new_act;
+ vki_ksigaction* old_act;
+ vki_ksigset_t irrelevant_sigmask;
+
+ vg_assert(VG_(is_valid_tid)(tid));
+ signo = VG_(threads)[tid].m_ebx; /* int sigNo */
+ new_act = (vki_ksigaction*)(VG_(threads)[tid].m_ecx);
+ old_act = (vki_ksigaction*)(VG_(threads)[tid].m_edx);
+
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugExtraMsg,
+ "__NR_sigaction: tid %d, sigNo %d, "
+ "new 0x%x, old 0x%x, new flags 0x%x",
+ tid, signo, (UInt)new_act, (UInt)old_act,
+ (UInt)(new_act ? new_act->ksa_flags : 0) );
+
+ /* Rule out various error conditions. The aim is to ensure that if
+ when the call is passed to the kernel it will definitely
+ succeed. */
+
+ /* Reject out-of-range signal numbers. */
+ if (signo < 1 || signo > VKI_KNSIG) goto bad_signo;
+
+ /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
+ if ( (signo == VKI_SIGKILL || signo == VKI_SIGSTOP)
+ && new_act
+ && new_act->ksa_handler != VKI_SIG_DFL)
+ goto bad_sigkill_or_sigstop;
+
+ /* If the client supplied non-NULL old_act, copy the relevant SCSS
+ entry into it. */
+ if (old_act) {
+ old_act->ksa_handler = vg_scss.scss_per_sig[signo].scss_handler;
+ old_act->ksa_flags = vg_scss.scss_per_sig[signo].scss_flags;
+ old_act->ksa_mask = vg_scss.scss_per_sig[signo].scss_mask;
+ old_act->ksa_restorer = vg_scss.scss_per_sig[signo].scss_restorer;
+ }
+
+ /* And now copy new SCSS entry from new_act. */
+ if (new_act) {
+ vg_scss.scss_per_sig[signo].scss_handler = new_act->ksa_handler;
+ vg_scss.scss_per_sig[signo].scss_flags = new_act->ksa_flags;
+ vg_scss.scss_per_sig[signo].scss_mask = new_act->ksa_mask;
+ vg_scss.scss_per_sig[signo].scss_restorer = new_act->ksa_restorer;
+ }
+
+ /* All happy bunnies ... */
+ if (new_act) {
+ VG_(block_all_host_signals)( &irrelevant_sigmask );
+ VG_(handle_SCSS_change)( False /* lazy update */ );
+ }
+ SET_EAX(tid, 0);
+ return;
+
+ bad_signo:
+ VG_(message)(Vg_UserMsg,
+ "Warning: bad signal number %d in __NR_sigaction.",
+ signo);
+ SET_EAX(tid, -VKI_EINVAL);
+ return;
+
+ bad_sigkill_or_sigstop:
+ VG_(message)(Vg_UserMsg,
+ "Warning: attempt to set %s handler in __NR_sigaction.",
+ signo == VKI_SIGKILL ? "SIGKILL" : "SIGSTOP" );
+
+ SET_EAX(tid, -VKI_EINVAL);
+ return;
+}
- In this case, the value VG_INVALID_THREADID indicates the signal is
- not directed at a specific thread and so should be delivered to any
- thread whose signal mask (ThreadState.sig_mask) field allows it.
- Any other value indicates that the signal should be delivered only
- to that specific thread, as some point in time when the thread has
- not blocked the signal. It remains pending until then. */
static
-ThreadId vg_sig_threadid[1+VKI_KNSIG];
+void do_sigprocmask_bitops ( Int vki_how,
+ vki_ksigset_t* orig_set,
+ vki_ksigset_t* modifier )
+{
+ switch (vki_how) {
+ case VKI_SIG_BLOCK:
+ VG_(ksigaddset_from_set)( orig_set, modifier );
+ break;
+ case VKI_SIG_UNBLOCK:
+ VG_(ksigdelset_from_set)( orig_set, modifier );
+ break;
+ case VKI_SIG_SETMASK:
+ *orig_set = *modifier;
+ break;
+ default:
+ VG_(panic)("do_sigprocmask_bitops");
+ break;
+ }
+}
+/* Handle blocking mask set/get uniformly for threads and process as a
+ whole. If tid==VG_INVALID_THREADID, this is really
+ __NR_sigprocmask, in which case we set the masks for all threads to
+ the "set" and return in "oldset" that from the root thread (1).
+ Otherwise, tid will denote a valid thread, in which case we just
+ set/get its mask.
-/* For each signal that the client installed a handler for (ie, for
- those for which the vg_sighandler entry is non-VG_SH_NOHANDLER and
- non-VG_SH_FAKEHANDLER), record whether or not the client asked for
- syscalls to be restartable (SA_RESTART) if interrupted by this
- signal. We need to consult this when a signal returns, if it
- should happen that the signal which we delivered has interrupted a
- system call. */
-static
-Bool vg_sig_sarestart[1+VKI_KNSIG];
+ Note that the thread signal masks are an implicit part of SCSS,
+ which is why this routine is allowed to mess with them.
+*/
+static
+void do_setmask ( ThreadId tid,
+ Int how,
+ vki_ksigset_t* newset,
+ vki_ksigset_t* oldset )
+{
+ vki_ksigset_t irrelevant_sigmask;
+
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "do_setmask: tid = %d (0 means ALL), how = %d (%s), set = %p",
+ tid,
+ how,
+ how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
+ how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
+ how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
+ newset
+ );
+
+ if (tid == VG_INVALID_THREADID) {
+ /* Behave as if __NR_sigprocmask. */
+ if (oldset) {
+ /* A bit fragile. Should do better here really. */
+ vg_assert(VG_(threads)[1].status != VgTs_Empty);
+ *oldset = VG_(threads)[1].sig_mask;
+ }
+ if (newset) {
+ ThreadId tidd;
+ for (tidd = 1; tidd < VG_N_THREADS; tidd++) {
+ if (VG_(threads)[tidd].status == VgTs_Empty)
+ continue;
+ do_sigprocmask_bitops (
+ how, &VG_(threads)[tidd].sig_mask, newset );
+ }
+ }
+ } else {
+ /* Just do this thread. */
+ vg_assert(VG_(is_valid_tid)(tid));
+ if (oldset)
+ *oldset = VG_(threads)[tid].sig_mask;
+ if (newset)
+ do_sigprocmask_bitops (
+ how, &VG_(threads)[tid].sig_mask, newset );
+ }
+
+ if (newset) {
+ VG_(block_all_host_signals)( &irrelevant_sigmask );
+ VG_(handle_SCSS_change)( False /* lazy update */ );
+ }
+}
+
+
+void VG_(do__NR_sigprocmask) ( ThreadId tid,
+ Int how,
+ vki_ksigset_t* set,
+ vki_ksigset_t* oldset )
+{
+ if (how == VKI_SIG_BLOCK || how == VKI_SIG_UNBLOCK
+ || how == VKI_SIG_SETMASK) {
+ vg_assert(VG_(is_valid_tid)(tid));
+ do_setmask ( VG_INVALID_THREADID, how, set, oldset );
+ /* Syscall returns 0 (success) to its thread. */
+ SET_EAX(tid, 0);
+ } else {
+ VG_(message)(Vg_DebugMsg,
+ "sigprocmask: unknown `how' field %d", how);
+ SET_EAX(tid, -VKI_EINVAL);
+ }
+}
+
+
+void VG_(do_pthread_sigmask_SCSS_upd) ( ThreadId tid,
+ Int how,
+ vki_ksigset_t* set,
+ vki_ksigset_t* oldset )
+{
+ /* Assume that how has been validated by caller. */
+ vg_assert(how == VKI_SIG_BLOCK || how == VKI_SIG_UNBLOCK
+ || how == VKI_SIG_SETMASK);
+ vg_assert(VG_(is_valid_tid)(tid));
+ do_setmask ( tid, how, set, oldset );
+ /* The request return code is set in do_pthread_sigmask */
+}
+
+
+void VG_(send_signal_to_thread) ( ThreadId thread, Int sig )
+{
+ Int res;
+ vg_assert(VG_(is_valid_tid)(thread));
+ vg_assert(sig >= 1 && sig <= VKI_KNSIG);
+
+ switch ((UInt)(vg_scss.scss_per_sig[sig].scss_handler)) {
+
+ case VKI_SIG_IGN:
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "send_signal %d to_thread %d: IGN, ignored", sig, thread );
+ break;
+
+ case VKI_SIG_DFL:
+ /* This is the tricky case. Since we don't handle default
+ actions, the simple thing is to send someone round to the
+ front door and signal there. Then the kernel will do
+ whatever it does with the default action. */
+ res = VG_(kill)( VG_(getpid)(), sig );
+ vg_assert(res == 0);
+ break;
+
+ default:
+ if (!vg_dcss.dcss_sigpending[sig]) {
+ vg_dcss.dcss_sigpending[sig] = True;
+ vg_dcss.dcss_destthread[sig] = thread;
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "send_signal %d to_thread %d: now pending", sig, thread );
+ } else {
+ if (vg_dcss.dcss_destthread[sig] == thread) {
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "send_signal %d to_thread %d: already pending ... "
+ "discarded", sig, thread );
+ } else {
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "send_signal %d to_thread %d: was pending for %d, "
+ "now pending for %d",
+ sig, thread, vg_dcss.dcss_destthread[sig], thread );
+ vg_dcss.dcss_destthread[sig] = thread;
+ }
+ }
+ }
+}
+/* ---------------------------------------------------------------------
+ LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
+ ------------------------------------------------------------------ */
+
/* ---------------------------------------------------------------------
Handy utilities to block/restore all host signals.
------------------------------------------------------------------ */
}
/* Restore the blocking mask using the supplied saved one. */
-void VG_(restore_host_signals) ( /* IN */ vki_ksigset_t* saved_mask )
+void VG_(restore_all_host_signals) ( /* IN */ vki_ksigset_t* saved_mask )
{
Int ret;
ret = VG_(ksigprocmask)(VKI_SIG_SETMASK, saved_mask, NULL);
VgSigFrame* frame;
ThreadState* tst;
- tst = VG_(get_thread_state)(tid);
+ vg_assert(VG_(is_valid_tid)(tid));
+ tst = & VG_(threads)[tid];
esp = tst->m_esp;
esp -= sizeof(VgSigFrame);
/* Set the thread so it will next run the handler. */
tst->m_esp = esp;
- tst->m_eip = (Addr)vg_sigpending[sigNo];
+ tst->m_eip = (Addr)vg_scss.scss_per_sig[sigNo].scss_handler;
/* This thread needs to be marked runnable, but we leave that the
caller to do. */
VgSigFrame* frame;
ThreadState* tst;
- tst = VG_(get_thread_state)(tid);
+ vg_assert(VG_(is_valid_tid)(tid));
+ tst = & VG_(threads)[tid];
/* Correctly reestablish the frame base address. */
esp = tst->m_esp;
before the signal was delivered. */
sigNo = vg_pop_signal_frame(tid);
- /* You would have thought that the following assertion made sense
- here:
-
- vg_assert(vg_sigpending[sigNo] == VG_SP_SIGRUNNING);
-
- Alas, you would be wrong. If a sigprocmask has been intercepted
- and it unblocks this signal, then vg_sigpending[sigNo] will
- either be VG_SIGIDLE, or (worse) another instance of it will
- already have arrived, so that the stored value is that of the
- handler.
-
- Note that these anomalies can only occur when a signal handler
- unblocks its own signal inside itself AND THEN RETURNS anyway
- (which seems a bizarre thing to do).
-
- Ho Hum. This seems like a race condition which surely isn't
- handled correctly. */
-
vg_assert(sigNo >= 1 && sigNo <= VKI_KNSIG);
- vg_sigpending[sigNo] = VG_SP_SIGIDLE;
/* Unlock and return. */
- VG_(restore_host_signals)( &saved_procmask );
+ VG_(restore_all_host_signals)( &saved_procmask );
/* Scheduler now can resume this thread, or perhaps some other.
Tell the scheduler whether or not any syscall interrupted by
this signal should be restarted, if possible, or no. */
- return vg_sig_sarestart[sigNo];
+ return
+ (vg_scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
+ ? True
+ : False;
}
{
vki_ksigset_t saved_procmask;
Int sigNo;
- Bool found;
+ Bool found, scss_changed;
ThreadState* tst;
ThreadId tid;
- /* A cheap check. We don't need to have exclusive access
- to the queue, because in the worst case, vg_oursignalhandler
- will add signals, causing us to return, thinking there
- are no signals to deliver, when in fact there are some.
- A subsequent call here will handle the signal(s) we missed.
- */
+ /* A cheap check. We don't need to have exclusive access to the
+ pending array, because in the worst case, vg_oursignalhandler
+ will add signals, causing us to return, thinking there are no
+ signals to deliver, when in fact there are some. A subsequent
+ call here will handle the signal(s) we missed. */
found = False;
for (sigNo = 1; sigNo <= VKI_KNSIG; sigNo++)
- if (vg_sigpending[sigNo] != VG_SP_SIGIDLE
- && vg_sigpending[sigNo] != VG_SP_SIGRUNNING)
+ if (vg_dcss.dcss_sigpending[sigNo])
found = True;
if (!found) return False;
blocking all the host's signals. That means vg_oursignalhandler
can't run whilst we are messing with stuff.
*/
+ scss_changed = False;
VG_(block_all_host_signals)( &saved_procmask );
/* Look for signals to deliver ... */
for (sigNo = 1; sigNo <= VKI_KNSIG; sigNo++) {
- if (vg_sigpending[sigNo] == VG_SP_SIGIDLE
- || vg_sigpending[sigNo] == VG_SP_SIGRUNNING) continue;
+
+ if (!vg_dcss.dcss_sigpending[sigNo])
+ continue;
+
/* sigNo is pending. Try to find a suitable thread to deliver
it to. */
-
/* First off, are any threads in sigwait() for the signal?
If so just give to one of them and have done. */
for (tid = 1; tid < VG_N_THREADS; tid++) {
- tst = VG_(get_thread_state_UNCHECKED)(tid);
+ tst = & VG_(threads)[tid];
if (tst->status != VgTs_WaitSIG)
continue;
if (VG_(ksigismember)(&(tst->sigs_waited_for), sigNo))
}
if (tid < VG_N_THREADS) {
UInt* sigwait_args;
- tst = VG_(get_thread_state)(tid);
+ tst = & VG_(threads)[tid];
if (VG_(clo_trace_signals) || VG_(clo_trace_sched))
VG_(message)(Vg_DebugMsg,
"releasing thread %d from sigwait() due to signal %d",
if (NULL != (UInt*)(sigwait_args[2])) {
*(Int*)(sigwait_args[2]) = sigNo;
if (VG_(clo_instrument))
- VGM_(make_readable)( (Addr)(sigwait_args[2]), sizeof(UInt));
+ VGM_(make_readable)( (Addr)(sigwait_args[2]),
+ sizeof(UInt));
}
- tst->m_edx = 0;
- tst->sh_edx = VGM_WORD_VALID;
+ SET_EDX(tid, 0);
tst->status = VgTs_Runnable;
VG_(ksigemptyset)(&tst->sigs_waited_for);
- VG_(update_sigstate_following_WaitSIG_change)();
- vg_sigpending[sigNo] = VG_SP_SIGIDLE;
+ scss_changed = True;
+ vg_dcss.dcss_sigpending[sigNo] = False;
+ vg_dcss.dcss_destthread[sigNo] = VG_INVALID_THREADID;
+ /*paranoia*/
continue; /* for (sigNo = 1; ...) loop */
}
/* Well, nobody appears to be sigwaiting for it. So we really
- are delivering the signal in the usual way, and so the
- handler better be valid. */
- vg_assert(vg_sigpending[sigNo] != VG_SP_SIGIDLE);
- vg_assert(vg_sigpending[sigNo] != VG_SH_FAKEHANDLER);
- vg_assert(vg_sigpending[sigNo] != VG_SP_SIGRUNNING);
+ are delivering the signal in the usual way. And that the
+ client really has a handler for this thread! */
+ vg_assert(vg_dcss.dcss_sigpending[sigNo]);
+ vg_assert(vg_scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN
+ && vg_scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
- tid = vg_sig_threadid[sigNo];
+ tid = vg_dcss.dcss_destthread[sigNo];
vg_assert(tid == VG_INVALID_THREADID
|| VG_(is_valid_tid)(tid));
if (tid != VG_INVALID_THREADID) {
/* directed to a specific thread; ensure it actually still
exists ... */
- tst = VG_(get_thread_state_UNCHECKED)(tid);
+ tst = & VG_(threads)[tid];
if (tst->status == VgTs_Empty) {
/* dead, for whatever reason; ignore this signal */
if (VG_(clo_trace_signals))
VG_(message)(Vg_DebugMsg,
"discarding signal %d for nonexistent thread %d",
sigNo, tid );
- vg_sigpending[sigNo] = VG_SP_SIGIDLE;
+ vg_dcss.dcss_sigpending[sigNo] = False;
+ vg_dcss.dcss_destthread[sigNo] = VG_INVALID_THREADID;
continue; /* for (sigNo = 1; ...) loop */
}
} else {
/* not directed to a specific thread, so search for a
suitable candidate */
for (tid = 1; tid < VG_N_THREADS; tid++) {
- tst = VG_(get_thread_state_UNCHECKED)(tid);
+ tst = & VG_(threads)[tid];
if (tst->status != VgTs_Empty
&& !VG_(ksigismember)(&(tst->sig_mask), sigNo))
break;
signal handler with the frame on top of the client's stack,
as it expects. */
vg_assert(VG_(is_valid_tid)(tid));
- vg_assert(VG_(get_thread_state)(tid)->status != VgTs_Empty);
vg_push_signal_frame ( tid, sigNo );
- VG_(get_thread_state)(tid)->status = VgTs_Runnable;
+ VG_(threads)[tid].status = VgTs_Runnable;
/* Signify that the signal has been delivered. */
- vg_sigpending[sigNo] = VG_SP_SIGRUNNING;
+ vg_dcss.dcss_sigpending[sigNo] = False;
+ vg_dcss.dcss_destthread[sigNo] = VG_INVALID_THREADID;
+
+ if (vg_scss.scss_per_sig[sigNo].scss_flags & VKI_SA_ONESHOT) {
+ /* Do the ONESHOT thing. */
+ vg_scss.scss_per_sig[sigNo].scss_handler = VKI_SIG_DFL;
+ scss_changed = True;
+ }
}
/* Unlock and return. */
- VG_(restore_host_signals)( &saved_procmask );
- return True;
-}
-
-
-/* A thread is about to exit. Forget about any signals which are
- still pending for it. */
-void VG_(notify_signal_machinery_of_thread_exit) ( ThreadId tid )
-{
- Int sigNo;
- for (sigNo = 1; sigNo <= VKI_KNSIG; sigNo++) {
- if (vg_sigpending[sigNo] == VG_SP_SIGIDLE
- || vg_sigpending[sigNo] == VG_SP_SIGRUNNING)
- continue;
- if (vg_sig_threadid[sigNo] == tid) {
- /* sigNo is pending for tid, which is just about to disappear.
- So forget about the pending signal. */
- vg_sig_threadid[sigNo] = VG_INVALID_THREADID;
- vg_sigpending[sigNo] = VG_SP_SIGIDLE;
- if (VG_(clo_trace_signals))
- VG_(message)(Vg_DebugMsg,
- "discarding pending signal %d due to thread %d exiting",
- sigNo, tid );
- }
+ if (scss_changed) {
+ /* handle_SCSS_change computes a new kernel blocking mask and
+ applies that. */
+ VG_(handle_SCSS_change)( False /* lazy update */ );
+ } else {
+ /* No SCSS change, so just restore the existing blocking
+ mask. */
+ VG_(restore_all_host_signals)( &saved_procmask );
}
+
+ return True;
}
to have mutual exclusion when adding stuff to the queue. */
static
-void VG_(oursignalhandler) ( Int sigNo )
+void vg_oursignalhandler ( Int sigNo )
{
+ ThreadId tid;
Int dummy_local;
+ Bool sane;
vki_ksigset_t saved_procmask;
/*
VG_(block_all_host_signals)( &saved_procmask );
- if (vg_sighandler[sigNo] == VG_SH_NOHANDLER) {
+ /* This is a sanity check. Either a signal has arrived because the
+ client set a handler for it, or because some thread sigwaited on
+ it. Establish that at least one of these is the case. */
+ sane = False;
+ if (vg_scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL
+ && vg_scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN) {
+ sane = True;
+ } else {
+ for (tid = 1; tid < VG_N_THREADS; tid++) {
+ if (VG_(threads)[tid].status != VgTs_WaitSIG)
+ continue;
+ if (VG_(ksigismember)(&VG_(threads)[tid].sigs_waited_for, sigNo))
+ sane = True;
+ }
+ }
+ if (!sane) {
if (VG_(clo_trace_signals)) {
VG_(add_to_msg)("unexpected!");
VG_(end_msg)();
that matters. */
VG_(panic)("vg_oursignalhandler: unexpected signal");
}
+ /* End of the sanity check. */
/* Decide what to do with it. */
- if (vg_sigpending[sigNo] == VG_SP_SIGRUNNING) {
- /* Already running; ignore it. */
- if (VG_(clo_trace_signals)) {
- VG_(add_to_msg)("already running; discarded" );
- VG_(end_msg)();
- }
- }
- else
- if (vg_sigpending[sigNo] != VG_SP_SIGRUNNING
- && vg_sigpending[sigNo] != VG_SP_SIGIDLE) {
- /* Not running and not idle == pending; ignore it. */
+ if (vg_dcss.dcss_sigpending[sigNo]) {
+ /* pending; ignore it. */
if (VG_(clo_trace_signals)) {
VG_(add_to_msg)("already pending; discarded" );
VG_(end_msg)();
}
- }
- else {
+ } else {
/* Ok, we'd better deliver it to the client. */
- vg_assert(vg_sigpending[sigNo] == VG_SP_SIGIDLE);
/* Queue it up for delivery at some point in the future. */
- vg_assert(vg_sighandler[sigNo] != VG_SH_NOHANDLER);
- vg_sigpending[sigNo] = vg_sighandler[sigNo];
- vg_sig_threadid[sigNo] = VG_INVALID_THREADID;
+ vg_dcss.dcss_sigpending[sigNo] = True;
+ vg_dcss.dcss_destthread[sigNo] = VG_INVALID_THREADID;
if (VG_(clo_trace_signals)) {
VG_(add_to_msg)("queued" );
VG_(end_msg)();
/* We've finished messing with the queue, so re-enable host
signals. */
- VG_(restore_host_signals)( &saved_procmask );
+ VG_(restore_all_host_signals)( &saved_procmask );
- if ((sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS
- || sigNo == VKI_SIGFPE || sigNo == VKI_SIGILL)) {
+ if (sigNo == VKI_SIGSEGV || sigNo == VKI_SIGBUS
+ || sigNo == VKI_SIGFPE || sigNo == VKI_SIGILL) {
/* Can't continue; must longjmp back to the scheduler and thus
enter the sighandler immediately. */
VG_(longjmpd_on_signal) = sigNo;
}
-/* Copy the process' real signal state to the sim state. Whilst
- doing this, block all real signals.
+/* At startup, copy the process' real signal state to the SCSS.
+ Whilst doing this, block all real signals. Then calculate SKSS and
+ set the kernel to that. Also initialise DCSS.
*/
void VG_(sigstartup_actions) ( void )
{
saved_procmask remembers the previous mask. */
VG_(block_all_host_signals)( &saved_procmask );
+ /* Copy per-signal settings to SCSS. */
+ for (i = 1; i <= VKI_KNSIG; i++) {
+
+ /* Get the old host action */
+ ret = VG_(ksigaction)(i, NULL, &sa);
+ vg_assert(ret == 0);
+
+ if (VG_(clo_trace_signals))
+ VG_(printf)("snaffling handler 0x%x for signal %d\n",
+ (Addr)(sa.ksa_handler), i );
+
+ vg_scss.scss_per_sig[i].scss_handler = sa.ksa_handler;
+ vg_scss.scss_per_sig[i].scss_flags = sa.ksa_flags;
+ vg_scss.scss_per_sig[i].scss_mask = sa.ksa_mask;
+ vg_scss.scss_per_sig[i].scss_restorer = sa.ksa_restorer;
+ }
+
+ /* Copy the process' signal mask into the root thread. */
+ vg_assert(VG_(threads)[1].status == VgTs_Runnable);
+ VG_(threads)[1].sig_mask = saved_procmask;
+
+ /* Initialise DCSS. */
+ for (i = 1; i <= VKI_KNSIG; i++) {
+ vg_dcss.dcss_sigpending[i] = False;
+ vg_dcss.dcss_destthread[i] = VG_INVALID_THREADID;
+ }
+
/* Register an alternative stack for our own signal handler to run
on. */
altstack_info.ss_sp = &(VG_(sigstack)[0]);
"vg_sigstartup_actions: sigstack installed ok");
}
- /* Set initial state for the signal simulation. */
- for (i = 1; i <= VKI_KNSIG; i++) {
- vg_sighandler[i] = VG_SH_NOHANDLER;
- vg_sigpending[i] = VG_SP_SIGIDLE;
- vg_sig_sarestart[i] = True; /* An easy default */
- vg_sig_threadid[i] = VG_INVALID_THREADID;
- }
-
- for (i = 1; i <= VKI_KNSIG; i++) {
-
- /* Get the old host action */
- ret = VG_(ksigaction)(i, NULL, &sa);
- vg_assert(ret == 0);
-
- /* If there's already a handler set, record it, then route the
- signal through to our handler. */
- if (sa.ksa_handler != VKI_SIG_IGN
- && sa.ksa_handler != VKI_SIG_DFL) {
- if (VG_(clo_trace_signals))
- VG_(printf)("snaffling handler 0x%x for signal %d\n",
- (Addr)(sa.ksa_handler), i );
- if ((sa.ksa_flags & VKI_SA_ONSTACK) != 0)
- VG_(unimplemented)
- ("signals on an alternative stack (SA_ONSTACK)");
-
- vg_sighandler[i] = sa.ksa_handler;
- sa.ksa_handler = &VG_(oursignalhandler);
- /* Save the restart status, then set it to restartable. */
- vg_sig_sarestart[i]
- = (sa.ksa_flags & VKI_SA_RESTART) ? True : False;
- sa.ksa_flags |= VKI_SA_RESTART;
-
- ret = VG_(ksigaction)(i, &sa, NULL);
- vg_assert(ret == 0);
- }
- }
-
/* DEBUGGING HACK */
/* VG_(ksignal)(VKI_SIGUSR1, &VG_(oursignalhandler)); */
- /* Finally, restore the blocking mask. */
- VG_(restore_host_signals)( &saved_procmask );
+ /* Calculate SKSS and apply it. This also sets the initial kernel
+ mask we need to run with. */
+ VG_(handle_SCSS_change)( True /* forced update */ );
}
VG_(block_all_host_signals)( &saved_procmask );
- /* copy the sim signal actions to the real ones. */
- /* Hmm, this isn't accurate. Doesn't properly restore the
- SA_RESTART flag nor SA_ONSTACK. */
+ /* Copy per-signal settings from SCSS. */
for (i = 1; i <= VKI_KNSIG; i++) {
- if (i == VKI_SIGKILL || i == VKI_SIGSTOP) continue;
- if (vg_sighandler[i] == VG_SH_NOHANDLER
- || vg_sighandler[i] == VG_SH_FAKEHANDLER) continue;
- ret = VG_(ksigaction)(i, NULL, &sa);
- vg_assert(ret == 0);
- sa.ksa_handler = vg_sighandler[i];
- ret = VG_(ksigaction)(i, &sa, NULL);
- }
-
- VG_(restore_host_signals)( &saved_procmask );
-}
-
-
-void VG_(update_sigstate_following_WaitSIG_change) ( void )
-{
- ThreadId tid;
- Int sig;
- vki_ksigset_t global_waitsigs;
- ThreadState* tst;
- VG_(ksigemptyset)( &global_waitsigs );
+ sa.ksa_handler = vg_scss.scss_per_sig[i].scss_handler;
+ sa.ksa_flags = vg_scss.scss_per_sig[i].scss_flags;
+ sa.ksa_mask = vg_scss.scss_per_sig[i].scss_mask;
+ sa.ksa_restorer = vg_scss.scss_per_sig[i].scss_restorer;
- /* Calculate the new set of signals which are being sigwait()d for
- by at least one thread. */
- for (tid = 1; tid < VG_N_THREADS; tid++) {
- tst = VG_(get_thread_state_UNCHECKED)(tid);
- if (tst->status != VgTs_WaitSIG)
- continue;
- vg_assert(! VG_(kisemptysigset)(
- & tst->sigs_waited_for ));
- VG_(ksigaddset_from_set)( & global_waitsigs,
- & tst->sigs_waited_for );
- }
-
- /* Now adjust vg_sighandler accordingly.
-
- For each signal s: (lapses into pseudo-Haskell ...)
-
- if s `elem` global_waitsigs[s]
- -- at least one thread is sigwait()ing for s. That means that at
- least _some_ kind of handler is needed.
- case vg_sighandler[s] of
- VG_SH_NOHANDLER -> install our own handler and set waitsigs[s]
- to VG_SH_FAKEHANDLER
- VG_SH_FAKEHANDLER -> there's already a handler. Do nothing.
- real_handler -> the client had a handler here anyway, so
- just leave it alone, ie, do nothing.
-
- if s `notElem` global_waitsigs[s]
- -- we're not sigwait()ing for s (any longer).
- case vg_sighandler[s] of
- VG_SH_FAKEHANDLER -> there is a handler installed, but ONLY for
- the purposes of handling sigwait(). So set it back to
- VG_SH_NOHANDLER and tell the kernel that we want to do the
- default action for s from now on, ie, we wish to deregister
- OUR handle.
- VG_SH_NOHANDLER -> there was no handler anyway. Do nothing.
- real_handler -> the client had a handler here anyway, so
- just leave it alone, ie, do nothing.
-
- */
-
- for (sig = 1; sig <= VKI_KNSIG; sig++) {
- if (VG_(ksigismember)( & global_waitsigs, sig )) {
- if (vg_sighandler[sig] == VG_SH_NOHANDLER
- /* && existing kernel handler is SIG_DFL */) {
- /* add handler */
- /* We really only ought to do this if the existing kernel
- handler is SIG_DFL. That's because when queried by the
- client's sigaction, that's what we claim it is if a fake
- handler has been installed. Or (perhaps better)
- remember the kernel's setting.
- */
- VG_(ksignal)( sig, &VG_(oursignalhandler) );
- vg_sighandler[sig] = VG_SH_FAKEHANDLER;
- if (VG_(clo_trace_signals)) {
- VG_(message)(Vg_DebugMsg,
- "adding fake handler for signal %d "
- "following WaitSIG change", sig );
- }
- }
- } else {
- if (vg_sighandler[sig] == VG_SH_FAKEHANDLER) {
- /* remove handler */
- VG_(ksignal)( sig, VKI_SIG_DFL);
- vg_sighandler[sig] = VG_SH_NOHANDLER;
- if (VG_(clo_trace_signals)) {
- VG_(message)(Vg_DebugMsg,
- "removing fake handler for signal %d "
- "following WaitSIG change", sig );
- }
- }
- }
- }
-}
-
-/* ---------------------------------------------------------------------
- Handle signal-related syscalls from the simulatee.
- ------------------------------------------------------------------ */
-
-/* Do more error checking? */
-void VG_(do__NR_sigaction) ( ThreadId tid )
-{
- UInt res;
- void* our_old_handler;
- vki_ksigaction* new_action;
- vki_ksigaction* old_action;
- ThreadState* tst = VG_(get_thread_state)( tid );
- UInt param1 = tst->m_ebx; /* int sigNo */
- UInt param2 = tst->m_ecx; /* k_sigaction* new_action */
- UInt param3 = tst->m_edx; /* k_sigaction* old_action */
- new_action = (vki_ksigaction*)param2;
- old_action = (vki_ksigaction*)param3;
-
- if (VG_(clo_trace_signals))
- VG_(message)(Vg_DebugExtraMsg,
- "__NR_sigaction: sigNo %d, "
- "new 0x%x, old 0x%x, new flags 0x%x",
- param1,(UInt)new_action,(UInt)old_action,
- (UInt)(new_action ? new_action->ksa_flags : 0) );
- /* VG_(ppSigProcMask)(); */
-
- /* Rule out various error conditions. The aim is to ensure that if
- the call is passed to the kernel it will definitely succeed. */
-
- /* Reject out-of-range signal numbers. */
- if (param1 < 1 || param1 > VKI_KNSIG) goto bad_signo;
-
- /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
- if ( (param1 == VKI_SIGKILL || param1 == VKI_SIGSTOP)
- && new_action
- && new_action->ksa_handler != VKI_SIG_DFL)
- goto bad_sigkill_or_sigstop;
-
- our_old_handler = vg_sighandler[param1];
- /* VG_(printf)("old handler = 0x%x\n", our_old_handler); */
- /* If a new handler has been specified, mess with its handler. */
- if (new_action) {
- if (new_action->ksa_handler == VKI_SIG_IGN ||
- new_action->ksa_handler == VKI_SIG_DFL) {
- vg_sighandler[param1] = VG_SH_NOHANDLER;
- vg_sigpending[param1] = VG_SP_SIGIDLE;
- /* Dangerous! Could lose signals like this. */
- } else {
- /* VG_(printf)("new handler = 0x%x\n", new_action->ksa_handler); */
- /* The client isn't allowed to use an alternative signal
- stack. We, however, must. */
- if ((new_action->ksa_flags & VKI_SA_ONSTACK) != 0)
- VG_(unimplemented)
- ("signals on an alternative stack (SA_ONSTACK)");
- new_action->ksa_flags |= VKI_SA_ONSTACK;
- vg_sighandler[param1] = new_action->ksa_handler;
- vg_sig_sarestart[param1]
- = (new_action->ksa_flags & VKI_SA_RESTART) ? True : False;
- new_action->ksa_flags |= VKI_SA_RESTART;
- new_action->ksa_handler = &VG_(oursignalhandler);
- }
- }
-
- KERNEL_DO_SYSCALL(tid,res);
- /* VG_(printf)("RES = %d\n", res); */
-
- /* If the client asks for the old handler, maintain our fiction
- by stuffing in the handler it thought it asked for ... */
- if (old_action) {
- if (old_action->ksa_handler == VKI_SIG_IGN ||
- old_action->ksa_handler == VKI_SIG_DFL) {
- /* No old action; we should have a NULL handler. */
- vg_assert(our_old_handler == VG_SH_NOHANDLER);
- } else {
- /* There's a handler. */
- if (param1 != VKI_SIGKILL && param1 != VKI_SIGSTOP) {
- vg_assert(old_action->ksa_handler == &VG_(oursignalhandler));
- vg_assert((old_action->ksa_flags & VKI_SA_ONSTACK) != 0);
- }
- /* Is the handler a fake one which the client doesn't know
- about? */
- if (vg_sighandler[param1] == VG_SH_FAKEHANDLER) {
- /* Yes. Pretend it was in a SIG_DFL state before. */
- old_action->ksa_handler = VKI_SIG_DFL;
- } else {
- old_action->ksa_handler = our_old_handler;
- }
- /* Since the client is not allowed to ask for an alternative
- sig stack, unset the bit for anything we pass back to
- it. */
- old_action->ksa_flags &= ~VKI_SA_ONSTACK;
- /* Restore the SA_RESTART flag to whatever we snaffled. */
- if (vg_sig_sarestart[param1])
- old_action->ksa_flags |= VKI_SA_RESTART;
- else
- old_action->ksa_flags &= ~VKI_SA_RESTART;
- }
- }
- goto good;
-
- good:
- tst->m_eax = (UInt)0;
- return;
-
- bad_signo:
- VG_(message)(Vg_UserMsg,
- "Warning: bad signal number %d in __NR_sigaction.",
- param1);
- VG_(baseBlock)[VGOFF_(m_eax)] = (UInt)(-VKI_EINVAL);
- return;
-
- bad_sigkill_or_sigstop:
- VG_(message)(Vg_UserMsg,
- "Warning: attempt to set %s handler in __NR_sigaction.",
- param1 == VKI_SIGKILL ? "SIGKILL" : "SIGSTOP" );
-
- VG_(baseBlock)[VGOFF_(m_eax)] = (UInt)(-VKI_EINVAL);
- return;
-}
-
-
-/* The kernel handles sigprocmask in the usual way, but we also need
- to inspect it, so as to spot requests to unblock signals. We then
- inspect vg_sigpending, which records the current state of signal
- delivery to the client. The problematic case is when a signal is
- delivered to the client, in which case the relevant vg_sigpending
- slot is set to VG_SIGRUNNING. This inhibits further signal
- deliveries. This mechanism implements the POSIX requirement that a
- signal is blocked in its own handler.
-
- If the handler returns normally, the slot is changed back to
- VG_SIGIDLE, so that further instances of the signal can be
- delivered. The problem occurs when the handler never returns, but
- longjmps. POSIX mandates that you then have to do an explicit
- setprocmask to re-enable the signal. That is what we try and spot
- here. Although the call is passed to the kernel, we also need to
- spot unblocked signals whose state is VG_SIGRUNNING, and change it
- back to VG_SIGIDLE.
-*/
-void VG_(do__NR_sigprocmask) ( Int how, vki_ksigset_t* set )
-{
- Int i;
- if (VG_(clo_trace_signals))
- VG_(message)(Vg_DebugMsg,
- "vg_do__NR_sigprocmask: how = %d (%s), set = %p",
- how,
- how==VKI_SIG_BLOCK ? "SIG_BLOCK" : (
- how==VKI_SIG_UNBLOCK ? "SIG_UNBLOCK" : (
- how==VKI_SIG_SETMASK ? "SIG_SETMASK" : "???")),
- set
- );
-
- /* Sometimes this happens. I don't know what it signifies. */
- if (set == NULL)
- return;
+ if (VG_(clo_trace_signals))
+ VG_(printf)("restoring handler 0x%x for signal %d\n",
+ (Addr)(sa.ksa_handler), i );
- /* Not interested in blocking of signals. */
- if (how == VKI_SIG_BLOCK)
- return;
+ /* Get the old host action */
+ ret = VG_(ksigaction)(i, &sa, NULL);
+ vg_assert(ret == 0);
- /* Detect and ignore unknown action. */
- if (how != VKI_SIG_UNBLOCK && how != VKI_SIG_SETMASK) {
- VG_(message)(Vg_DebugMsg,
- "sigprocmask: unknown `how' field %d", how);
- return;
}
- for (i = 1; i <= VKI_KNSIG; i++) {
- Bool unblock_me = False;
- if (how == VKI_SIG_SETMASK) {
- if (!VG_(ksigismember)(set,i))
- unblock_me = True;
- } else { /* how == SIG_UNBLOCK */
- if (VG_(ksigismember)(set,i))
- unblock_me = True;
- }
- if (unblock_me && vg_sigpending[i] == VG_SP_SIGRUNNING) {
- vg_sigpending[i] = VG_SP_SIGIDLE;
- if (VG_(clo_verbosity) > 1)
- VG_(message)(Vg_UserMsg,
- "Warning: unblocking signal %d "
- "due to sigprocmask", i );
- }
- }
+ /* A bit of a kludge -- set the sigmask to that of the root
+ thread. */
+ vg_assert(VG_(threads)[1].status != VgTs_Empty);
+ VG_(restore_all_host_signals)( &VG_(threads)[1].sig_mask );
}
-
/*--------------------------------------------------------------------*/
/*--- end vg_signals.c ---*/
/*--------------------------------------------------------------------*/
void VG_(perform_assumed_nonblocking_syscall) ( ThreadId tid )
{
- Bool sane_before_call = True;
- Bool sane_after_call = True;
- ThreadState* tst = VG_(get_thread_state)( tid );
- UInt syscallno = tst->m_eax;
- UInt arg1 = tst->m_ebx;
- UInt arg2 = tst->m_ecx;
- UInt arg3 = tst->m_edx;
- UInt arg4 = tst->m_esi;
- UInt arg5 = tst->m_edi;
-
+ ThreadState* tst;
+ Bool sane_before_call, sane_after_call;
+ UInt syscallno, arg1, arg2, arg3, arg4, arg5;
/* Do not make this unsigned! */
Int res;
VGP_PUSHCC(VgpSyscall);
+ vg_assert(VG_(is_valid_tid)(tid));
+ sane_before_call = True;
+ sane_after_call = True;
+ tst = & VG_(threads)[tid];
+ syscallno = tst->m_eax;
+ arg1 = tst->m_ebx;
+ arg2 = tst->m_ecx;
+ arg3 = tst->m_edx;
+ arg4 = tst->m_esi;
+ arg5 = tst->m_edi;
+
/* Since buggy syscall wrappers sometimes break this, we may as well
check ourselves. */
if (! VG_(first_and_last_secondaries_look_plausible)())
if (arg3 != (UInt)NULL)
must_be_writable( tst, "sigprocmask(oldset)",
arg3, sizeof(vki_ksigset_t));
+# if SIGNAL_SIMULATION
+ VG_(do__NR_sigprocmask) ( tid,
+ arg1 /*how*/,
+ (vki_ksigset_t*) arg2,
+ (vki_ksigset_t*) arg3 );
+ res = tst->m_eax;
+# else
KERNEL_DO_SYSCALL(tid,res);
+# endif
if (!VG_(is_kerror)(res) && res == 0 && arg3 != (UInt)NULL)
make_readable( arg3, sizeof(vki_ksigset_t));
-# if SIGNAL_SIMULATION
- /* For the reason why both the kernel and Valgrind process
- sigprocmask, see the detailed comment at
- vg_do__NR_sigprocmask(). */
- VG_(do__NR_sigprocmask) ( arg1 /*how*/, (vki_ksigset_t*) arg2 );
-# endif
break;
default:
Int syscallno,
Int* /*IN*/ res )
{
- Bool sane_before_post = True;
- Bool sane_after_post = True;
- ThreadState* tst = VG_(get_thread_state)( tid );
- UInt arg1 = tst->m_ebx;
- UInt arg2 = tst->m_ecx;
- UInt arg3 = tst->m_edx;
+ ThreadState* tst;
+ Bool sane_before_post, sane_after_post;
+ UInt arg1, arg2, arg3;
+
+ VGP_PUSHCC(VgpSyscall);
+
+ vg_assert(VG_(is_valid_tid)(tid));
+ sane_before_post = True;
+ sane_after_post = True;
+ tst = & VG_(threads)[tid];
+ arg1 = tst->m_ebx;
+ arg2 = tst->m_ecx;
+ arg3 = tst->m_edx;
/*
- UInt arg4 = tst->m_esi;
- UInt arg5 = tst->m_edi;
+ arg4 = tst->m_esi;
+ arg5 = tst->m_edi;
*/
- VGP_PUSHCC(VgpSyscall);
if (res != NULL
&& ! VG_(first_and_last_secondaries_look_plausible)())