From: Julian Seward Date: Tue, 28 May 2002 11:00:01 +0000 (+0000) Subject: Fix various problems with new thread start / stop / detach stuff. X-Git-Tag: svn/VALGRIND_1_0_3~139 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=6b9ba9848ab9fc3e1c764b787ad49dcf5e4bf3cb;p=thirdparty%2Fvalgrind.git Fix various problems with new thread start / stop / detach stuff. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@327 --- diff --git a/coregrind/arch/x86-linux/vg_libpthread.c b/coregrind/arch/x86-linux/vg_libpthread.c index 52111060ec..b541d8b155 100644 --- a/coregrind/arch/x86-linux/vg_libpthread.c +++ b/coregrind/arch/x86-linux/vg_libpthread.c @@ -198,17 +198,17 @@ void vgPlain_unimp ( char* what ) int pthread_attr_init(pthread_attr_t *attr) { - static int moans = N_MOANS; - if (moans-- > 0) - ignored("pthread_attr_init"); + /* Just initialise the fields which we might look at. */ + attr->__detachstate = PTHREAD_CREATE_JOINABLE; return 0; } int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) { - static int moans = N_MOANS; - if (moans-- > 0) - ignored("pthread_attr_setdetachstate"); + if (detachstate != PTHREAD_CREATE_JOINABLE + && detachstate != PTHREAD_CREATE_DETACHED) + return EINVAL; + attr->__detachstate = detachstate; return 0; } @@ -286,7 +286,7 @@ void thread_exit_wrapper ( void* ret_val ) /* Decide on my final disposition. */ VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */, VG_USERREQ__SET_OR_GET_DETACH, - 2 /* get */, 0, 0, 0); + 2 /* get */, pthread_self(), 0, 0); assert(detached == 0 || detached == 1); if (detached) { @@ -336,14 +336,19 @@ void thread_wrapper ( NewThreadInfo* info ) root_fn = info->root_fn; arg = info->arg; - if (attr) - kludged("pthread_create -- ignoring attributes"); - /* Free up the arg block that pthread_create malloced. */ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */, VG_USERREQ__FREE, info, 0, 0, 0); assert(res == 0); + /* Minimally observe the attributes supplied. */ + if (attr) { + assert(attr->__detachstate == PTHREAD_CREATE_DETACHED + || attr->__detachstate == PTHREAD_CREATE_JOINABLE); + if (attr->__detachstate == PTHREAD_CREATE_DETACHED) + pthread_detach(pthread_self()); + } + /* The root function might not return. But if it does we simply move along to thread_exit_wrapper. All other ways out for the thread (cancellation, or calling pthread_exit) lead there @@ -446,11 +451,22 @@ int pthread_detach(pthread_t th) { int res; ensure_valgrind("pthread_detach"); - VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */, + /* First we enquire as to the current detach state. */ + VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */, VG_USERREQ__SET_OR_GET_DETACH, - 1 /* set */, 0, 0, 0); - assert(res == 0); - return 0; + 2 /* get */, th, 0, 0); + if (res == -1) /* not found */ + return ESRCH; + if (res == 1) /* already detached */ + return EINVAL; + if (res == 0) { + VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */, + VG_USERREQ__SET_OR_GET_DETACH, + 1 /* set */, th, 0, 0); + assert(res == 0); + return 0; + } + barf("pthread_detach"); } diff --git a/coregrind/vg_include.h b/coregrind/vg_include.h index e81fe77eb9..3b8358fd94 100644 --- a/coregrind/vg_include.h +++ b/coregrind/vg_include.h @@ -130,7 +130,7 @@ scheduler algorithms is surely O(N) in the number of threads, since that's simple, at least. And (in practice) we hope that most programs do not need many threads. */ -#define VG_N_THREADS 50 +#define VG_N_THREADS 20 /* Maximum number of pthread keys available. Again, we start low until the need for a higher number presents itself. */ diff --git a/coregrind/vg_libpthread.c b/coregrind/vg_libpthread.c index 52111060ec..b541d8b155 100644 --- a/coregrind/vg_libpthread.c +++ b/coregrind/vg_libpthread.c @@ -198,17 +198,17 @@ void vgPlain_unimp ( char* what ) int pthread_attr_init(pthread_attr_t *attr) { - static int moans = N_MOANS; - if (moans-- > 0) - ignored("pthread_attr_init"); + /* Just initialise the fields which we might look at. */ + attr->__detachstate = PTHREAD_CREATE_JOINABLE; return 0; } int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) { - static int moans = N_MOANS; - if (moans-- > 0) - ignored("pthread_attr_setdetachstate"); + if (detachstate != PTHREAD_CREATE_JOINABLE + && detachstate != PTHREAD_CREATE_DETACHED) + return EINVAL; + attr->__detachstate = detachstate; return 0; } @@ -286,7 +286,7 @@ void thread_exit_wrapper ( void* ret_val ) /* Decide on my final disposition. */ VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */, VG_USERREQ__SET_OR_GET_DETACH, - 2 /* get */, 0, 0, 0); + 2 /* get */, pthread_self(), 0, 0); assert(detached == 0 || detached == 1); if (detached) { @@ -336,14 +336,19 @@ void thread_wrapper ( NewThreadInfo* info ) root_fn = info->root_fn; arg = info->arg; - if (attr) - kludged("pthread_create -- ignoring attributes"); - /* Free up the arg block that pthread_create malloced. */ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */, VG_USERREQ__FREE, info, 0, 0, 0); assert(res == 0); + /* Minimally observe the attributes supplied. */ + if (attr) { + assert(attr->__detachstate == PTHREAD_CREATE_DETACHED + || attr->__detachstate == PTHREAD_CREATE_JOINABLE); + if (attr->__detachstate == PTHREAD_CREATE_DETACHED) + pthread_detach(pthread_self()); + } + /* The root function might not return. But if it does we simply move along to thread_exit_wrapper. All other ways out for the thread (cancellation, or calling pthread_exit) lead there @@ -446,11 +451,22 @@ int pthread_detach(pthread_t th) { int res; ensure_valgrind("pthread_detach"); - VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */, + /* First we enquire as to the current detach state. */ + VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */, VG_USERREQ__SET_OR_GET_DETACH, - 1 /* set */, 0, 0, 0); - assert(res == 0); - return 0; + 2 /* get */, th, 0, 0); + if (res == -1) /* not found */ + return ESRCH; + if (res == 1) /* already detached */ + return EINVAL; + if (res == 0) { + VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */, + VG_USERREQ__SET_OR_GET_DETACH, + 1 /* set */, th, 0, 0); + assert(res == 0); + return 0; + } + barf("pthread_detach"); } diff --git a/coregrind/vg_scheduler.c b/coregrind/vg_scheduler.c index 7dbb89bd1f..a2a5573bd4 100644 --- a/coregrind/vg_scheduler.c +++ b/coregrind/vg_scheduler.c @@ -1672,7 +1672,6 @@ void do_pthread_yield ( ThreadId tid ) { Char msg_buf[100]; vg_assert(VG_(is_valid_tid)(tid)); - if (VG_(clo_trace_sched)) { VG_(sprintf)(msg_buf, "yield"); print_sched_event(tid, msg_buf); @@ -1684,7 +1683,12 @@ void do_pthread_yield ( ThreadId tid ) static void do__testcancel ( ThreadId tid ) { + Char msg_buf[100]; vg_assert(VG_(is_valid_tid)(tid)); + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, "testcancel"); + print_sched_event(tid, msg_buf); + } if (/* is there a cancellation pending on this thread? */ VG_(threads)[tid].cancel_pend != NULL && /* is this thread accepting cancellations? */ @@ -1702,7 +1706,15 @@ static void do__set_cancelstate ( ThreadId tid, Int state ) { Bool old_st; + Char msg_buf[100]; vg_assert(VG_(is_valid_tid)(tid)); + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state, + state==PTHREAD_CANCEL_ENABLE + ? "ENABLE" + : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???")); + print_sched_event(tid, msg_buf); + } old_st = VG_(threads)[tid].cancel_st; if (state == PTHREAD_CANCEL_ENABLE) { VG_(threads)[tid].cancel_st = True; @@ -1721,7 +1733,15 @@ static void do__set_canceltype ( ThreadId tid, Int type ) { Bool old_ty; + Char msg_buf[100]; vg_assert(VG_(is_valid_tid)(tid)); + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type, + type==PTHREAD_CANCEL_ASYNCHRONOUS + ? "ASYNCHRONOUS" + : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???")); + print_sched_event(tid, msg_buf); + } old_ty = VG_(threads)[tid].cancel_ty; if (type == PTHREAD_CANCEL_ASYNCHRONOUS) { VG_(threads)[tid].cancel_ty = False; @@ -1736,20 +1756,54 @@ void do__set_canceltype ( ThreadId tid, Int type ) } +/* Set or get the detach state for thread det. */ static -void do__set_or_get_detach ( ThreadId tid, Int what ) +void do__set_or_get_detach ( ThreadId tid, + Int what, ThreadId det ) { + ThreadId i; + Char msg_buf[100]; + /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n", + tid, what, det); */ vg_assert(VG_(is_valid_tid)(tid)); + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what, + what==0 ? "not-detached" : ( + what==1 ? "detached" : ( + what==2 ? "fetch old value" : "???")), + det ); + print_sched_event(tid, msg_buf); + } + + if (!VG_(is_valid_tid)(det)) { + SET_EDX(tid, -1); + return; + } + switch (what) { case 2: /* get */ - SET_EDX(tid, VG_(threads)[tid].detached ? 1 : 0); + SET_EDX(tid, VG_(threads)[det].detached ? 1 : 0); return; - case 1: /* set detached */ - VG_(threads)[tid].detached = True; + case 1: /* set detached. If someone is in a join-wait for det, + do not detach. */ + for (i = 1; i < VG_N_THREADS; i++) { + if (VG_(threads)[i].status == VgTs_WaitJoinee + && VG_(threads)[i].joiner_jee_tid == det) { + SET_EDX(tid, 0); + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, + "tid %d not detached because %d in join-wait for it %d", + det, i); + print_sched_event(tid, msg_buf); + } + return; + } + } + VG_(threads)[det].detached = True; SET_EDX(tid, 0); return; case 0: /* set not detached */ - VG_(threads)[tid].detached = False; + VG_(threads)[det].detached = False; SET_EDX(tid, 0); return; default: @@ -1768,13 +1822,21 @@ void do__set_cancelpend ( ThreadId tid, vg_assert(VG_(is_valid_tid)(tid)); vg_assert(VG_(threads)[tid].status == VgTs_Runnable); - vg_assert(VG_(is_valid_tid)(cee)); + if (!VG_(is_valid_tid)(cee)) { + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, + "set_cancelpend for invalid tid %d", cee); + print_sched_event(tid, msg_buf); + } + SET_EDX(tid, -VKI_ESRCH); + return; + } VG_(threads)[cee].cancel_pend = cancelpend_hdlr; if (VG_(clo_trace_sched)) { VG_(sprintf)(msg_buf, - "set cancel pending (hdlr = %p, canceller tid = %d)", + "set_cancelpend (hdlr = %p, set by tid %d)", cancelpend_hdlr, tid); print_sched_event(cee, msg_buf); } @@ -1861,7 +1923,7 @@ void do__wait_joiner ( ThreadId tid, void* retval ) vg_assert(VG_(threads)[tid].status == VgTs_Runnable); if (VG_(clo_trace_sched)) { VG_(sprintf)(msg_buf, - "WAIT_JOINER(%p) (non-detached thread exit)", retval); + "do__wait_joiner(retval = %p) (non-detached thread exit)", retval); print_sched_event(tid, msg_buf); } VG_(threads)[tid].status = VgTs_WaitJoiner; @@ -1880,9 +1942,8 @@ void do__quit ( ThreadId tid ) vg_assert(VG_(threads)[tid].status == VgTs_Runnable); VG_(threads)[tid].status = VgTs_Empty; /* bye! */ cleanup_after_thread_exited ( tid ); - if (VG_(clo_trace_sched)) { - VG_(sprintf)(msg_buf, "QUIT (detached thread exit)"); + VG_(sprintf)(msg_buf, "do__quit (detached thread exit)"); print_sched_event(tid, msg_buf); } /* Return value is irrelevant; this thread will not get @@ -2821,6 +2882,35 @@ void do_nontrivial_clientreq ( ThreadId tid ) run. */ break; + case VG_USERREQ__SET_CANCELSTATE: + do__set_cancelstate ( tid, arg[1] ); + break; + + case VG_USERREQ__SET_CANCELTYPE: + do__set_canceltype ( tid, arg[1] ); + break; + + case VG_USERREQ__SET_OR_GET_DETACH: + do__set_or_get_detach ( tid, arg[1], arg[2] ); + break; + + case VG_USERREQ__SET_CANCELPEND: + do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] ); + break; + + case VG_USERREQ__WAIT_JOINER: + do__wait_joiner ( tid, (void*)arg[1] ); + break; + + case VG_USERREQ__QUIT: + do__quit ( tid ); + break; + + case VG_USERREQ__APPLY_IN_NEW_THREAD: + do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1], + (void*)arg[2] ); + break; + case VG_USERREQ__MAKE_NOACCESS: case VG_USERREQ__MAKE_WRITABLE: case VG_USERREQ__MAKE_READABLE: diff --git a/vg_include.h b/vg_include.h index e81fe77eb9..3b8358fd94 100644 --- a/vg_include.h +++ b/vg_include.h @@ -130,7 +130,7 @@ scheduler algorithms is surely O(N) in the number of threads, since that's simple, at least. And (in practice) we hope that most programs do not need many threads. */ -#define VG_N_THREADS 50 +#define VG_N_THREADS 20 /* Maximum number of pthread keys available. Again, we start low until the need for a higher number presents itself. */ diff --git a/vg_libpthread.c b/vg_libpthread.c index 52111060ec..b541d8b155 100644 --- a/vg_libpthread.c +++ b/vg_libpthread.c @@ -198,17 +198,17 @@ void vgPlain_unimp ( char* what ) int pthread_attr_init(pthread_attr_t *attr) { - static int moans = N_MOANS; - if (moans-- > 0) - ignored("pthread_attr_init"); + /* Just initialise the fields which we might look at. */ + attr->__detachstate = PTHREAD_CREATE_JOINABLE; return 0; } int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate) { - static int moans = N_MOANS; - if (moans-- > 0) - ignored("pthread_attr_setdetachstate"); + if (detachstate != PTHREAD_CREATE_JOINABLE + && detachstate != PTHREAD_CREATE_DETACHED) + return EINVAL; + attr->__detachstate = detachstate; return 0; } @@ -286,7 +286,7 @@ void thread_exit_wrapper ( void* ret_val ) /* Decide on my final disposition. */ VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */, VG_USERREQ__SET_OR_GET_DETACH, - 2 /* get */, 0, 0, 0); + 2 /* get */, pthread_self(), 0, 0); assert(detached == 0 || detached == 1); if (detached) { @@ -336,14 +336,19 @@ void thread_wrapper ( NewThreadInfo* info ) root_fn = info->root_fn; arg = info->arg; - if (attr) - kludged("pthread_create -- ignoring attributes"); - /* Free up the arg block that pthread_create malloced. */ VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */, VG_USERREQ__FREE, info, 0, 0, 0); assert(res == 0); + /* Minimally observe the attributes supplied. */ + if (attr) { + assert(attr->__detachstate == PTHREAD_CREATE_DETACHED + || attr->__detachstate == PTHREAD_CREATE_JOINABLE); + if (attr->__detachstate == PTHREAD_CREATE_DETACHED) + pthread_detach(pthread_self()); + } + /* The root function might not return. But if it does we simply move along to thread_exit_wrapper. All other ways out for the thread (cancellation, or calling pthread_exit) lead there @@ -446,11 +451,22 @@ int pthread_detach(pthread_t th) { int res; ensure_valgrind("pthread_detach"); - VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */, + /* First we enquire as to the current detach state. */ + VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */, VG_USERREQ__SET_OR_GET_DETACH, - 1 /* set */, 0, 0, 0); - assert(res == 0); - return 0; + 2 /* get */, th, 0, 0); + if (res == -1) /* not found */ + return ESRCH; + if (res == 1) /* already detached */ + return EINVAL; + if (res == 0) { + VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */, + VG_USERREQ__SET_OR_GET_DETACH, + 1 /* set */, th, 0, 0); + assert(res == 0); + return 0; + } + barf("pthread_detach"); } diff --git a/vg_scheduler.c b/vg_scheduler.c index 7dbb89bd1f..a2a5573bd4 100644 --- a/vg_scheduler.c +++ b/vg_scheduler.c @@ -1672,7 +1672,6 @@ void do_pthread_yield ( ThreadId tid ) { Char msg_buf[100]; vg_assert(VG_(is_valid_tid)(tid)); - if (VG_(clo_trace_sched)) { VG_(sprintf)(msg_buf, "yield"); print_sched_event(tid, msg_buf); @@ -1684,7 +1683,12 @@ void do_pthread_yield ( ThreadId tid ) static void do__testcancel ( ThreadId tid ) { + Char msg_buf[100]; vg_assert(VG_(is_valid_tid)(tid)); + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, "testcancel"); + print_sched_event(tid, msg_buf); + } if (/* is there a cancellation pending on this thread? */ VG_(threads)[tid].cancel_pend != NULL && /* is this thread accepting cancellations? */ @@ -1702,7 +1706,15 @@ static void do__set_cancelstate ( ThreadId tid, Int state ) { Bool old_st; + Char msg_buf[100]; vg_assert(VG_(is_valid_tid)(tid)); + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state, + state==PTHREAD_CANCEL_ENABLE + ? "ENABLE" + : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???")); + print_sched_event(tid, msg_buf); + } old_st = VG_(threads)[tid].cancel_st; if (state == PTHREAD_CANCEL_ENABLE) { VG_(threads)[tid].cancel_st = True; @@ -1721,7 +1733,15 @@ static void do__set_canceltype ( ThreadId tid, Int type ) { Bool old_ty; + Char msg_buf[100]; vg_assert(VG_(is_valid_tid)(tid)); + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type, + type==PTHREAD_CANCEL_ASYNCHRONOUS + ? "ASYNCHRONOUS" + : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???")); + print_sched_event(tid, msg_buf); + } old_ty = VG_(threads)[tid].cancel_ty; if (type == PTHREAD_CANCEL_ASYNCHRONOUS) { VG_(threads)[tid].cancel_ty = False; @@ -1736,20 +1756,54 @@ void do__set_canceltype ( ThreadId tid, Int type ) } +/* Set or get the detach state for thread det. */ static -void do__set_or_get_detach ( ThreadId tid, Int what ) +void do__set_or_get_detach ( ThreadId tid, + Int what, ThreadId det ) { + ThreadId i; + Char msg_buf[100]; + /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n", + tid, what, det); */ vg_assert(VG_(is_valid_tid)(tid)); + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what, + what==0 ? "not-detached" : ( + what==1 ? "detached" : ( + what==2 ? "fetch old value" : "???")), + det ); + print_sched_event(tid, msg_buf); + } + + if (!VG_(is_valid_tid)(det)) { + SET_EDX(tid, -1); + return; + } + switch (what) { case 2: /* get */ - SET_EDX(tid, VG_(threads)[tid].detached ? 1 : 0); + SET_EDX(tid, VG_(threads)[det].detached ? 1 : 0); return; - case 1: /* set detached */ - VG_(threads)[tid].detached = True; + case 1: /* set detached. If someone is in a join-wait for det, + do not detach. */ + for (i = 1; i < VG_N_THREADS; i++) { + if (VG_(threads)[i].status == VgTs_WaitJoinee + && VG_(threads)[i].joiner_jee_tid == det) { + SET_EDX(tid, 0); + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, + "tid %d not detached because %d in join-wait for it %d", + det, i); + print_sched_event(tid, msg_buf); + } + return; + } + } + VG_(threads)[det].detached = True; SET_EDX(tid, 0); return; case 0: /* set not detached */ - VG_(threads)[tid].detached = False; + VG_(threads)[det].detached = False; SET_EDX(tid, 0); return; default: @@ -1768,13 +1822,21 @@ void do__set_cancelpend ( ThreadId tid, vg_assert(VG_(is_valid_tid)(tid)); vg_assert(VG_(threads)[tid].status == VgTs_Runnable); - vg_assert(VG_(is_valid_tid)(cee)); + if (!VG_(is_valid_tid)(cee)) { + if (VG_(clo_trace_sched)) { + VG_(sprintf)(msg_buf, + "set_cancelpend for invalid tid %d", cee); + print_sched_event(tid, msg_buf); + } + SET_EDX(tid, -VKI_ESRCH); + return; + } VG_(threads)[cee].cancel_pend = cancelpend_hdlr; if (VG_(clo_trace_sched)) { VG_(sprintf)(msg_buf, - "set cancel pending (hdlr = %p, canceller tid = %d)", + "set_cancelpend (hdlr = %p, set by tid %d)", cancelpend_hdlr, tid); print_sched_event(cee, msg_buf); } @@ -1861,7 +1923,7 @@ void do__wait_joiner ( ThreadId tid, void* retval ) vg_assert(VG_(threads)[tid].status == VgTs_Runnable); if (VG_(clo_trace_sched)) { VG_(sprintf)(msg_buf, - "WAIT_JOINER(%p) (non-detached thread exit)", retval); + "do__wait_joiner(retval = %p) (non-detached thread exit)", retval); print_sched_event(tid, msg_buf); } VG_(threads)[tid].status = VgTs_WaitJoiner; @@ -1880,9 +1942,8 @@ void do__quit ( ThreadId tid ) vg_assert(VG_(threads)[tid].status == VgTs_Runnable); VG_(threads)[tid].status = VgTs_Empty; /* bye! */ cleanup_after_thread_exited ( tid ); - if (VG_(clo_trace_sched)) { - VG_(sprintf)(msg_buf, "QUIT (detached thread exit)"); + VG_(sprintf)(msg_buf, "do__quit (detached thread exit)"); print_sched_event(tid, msg_buf); } /* Return value is irrelevant; this thread will not get @@ -2821,6 +2882,35 @@ void do_nontrivial_clientreq ( ThreadId tid ) run. */ break; + case VG_USERREQ__SET_CANCELSTATE: + do__set_cancelstate ( tid, arg[1] ); + break; + + case VG_USERREQ__SET_CANCELTYPE: + do__set_canceltype ( tid, arg[1] ); + break; + + case VG_USERREQ__SET_OR_GET_DETACH: + do__set_or_get_detach ( tid, arg[1], arg[2] ); + break; + + case VG_USERREQ__SET_CANCELPEND: + do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] ); + break; + + case VG_USERREQ__WAIT_JOINER: + do__wait_joiner ( tid, (void*)arg[1] ); + break; + + case VG_USERREQ__QUIT: + do__quit ( tid ); + break; + + case VG_USERREQ__APPLY_IN_NEW_THREAD: + do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1], + (void*)arg[2] ); + break; + case VG_USERREQ__MAKE_NOACCESS: case VG_USERREQ__MAKE_WRITABLE: case VG_USERREQ__MAKE_READABLE: