int pthread_attr_init(pthread_attr_t *attr)
{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("pthread_attr_init");
+ /* Just initialise the fields which we might look at. */
+ attr->__detachstate = PTHREAD_CREATE_JOINABLE;
return 0;
}
int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("pthread_attr_setdetachstate");
+ if (detachstate != PTHREAD_CREATE_JOINABLE
+ && detachstate != PTHREAD_CREATE_DETACHED)
+ return EINVAL;
+ attr->__detachstate = detachstate;
return 0;
}
/* Decide on my final disposition. */
VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */,
VG_USERREQ__SET_OR_GET_DETACH,
- 2 /* get */, 0, 0, 0);
+ 2 /* get */, pthread_self(), 0, 0);
assert(detached == 0 || detached == 1);
if (detached) {
root_fn = info->root_fn;
arg = info->arg;
- if (attr)
- kludged("pthread_create -- ignoring attributes");
-
/* Free up the arg block that pthread_create malloced. */
VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
VG_USERREQ__FREE, info, 0, 0, 0);
assert(res == 0);
+ /* Minimally observe the attributes supplied. */
+ if (attr) {
+ assert(attr->__detachstate == PTHREAD_CREATE_DETACHED
+ || attr->__detachstate == PTHREAD_CREATE_JOINABLE);
+ if (attr->__detachstate == PTHREAD_CREATE_DETACHED)
+ pthread_detach(pthread_self());
+ }
+
/* The root function might not return. But if it does we simply
move along to thread_exit_wrapper. All other ways out for the
thread (cancellation, or calling pthread_exit) lead there
{
int res;
ensure_valgrind("pthread_detach");
- VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ /* First we enquire as to the current detach state. */
+ VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */,
VG_USERREQ__SET_OR_GET_DETACH,
- 1 /* set */, 0, 0, 0);
- assert(res == 0);
- return 0;
+ 2 /* get */, th, 0, 0);
+ if (res == -1) /* not found */
+ return ESRCH;
+ if (res == 1) /* already detached */
+ return EINVAL;
+ if (res == 0) {
+ VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */,
+ VG_USERREQ__SET_OR_GET_DETACH,
+ 1 /* set */, th, 0, 0);
+ assert(res == 0);
+ return 0;
+ }
+ barf("pthread_detach");
}
scheduler algorithms is surely O(N) in the number of threads, since
that's simple, at least. And (in practice) we hope that most
programs do not need many threads. */
-#define VG_N_THREADS 50
+#define VG_N_THREADS 20
/* Maximum number of pthread keys available. Again, we start low until
the need for a higher number presents itself. */
int pthread_attr_init(pthread_attr_t *attr)
{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("pthread_attr_init");
+ /* Just initialise the fields which we might look at. */
+ attr->__detachstate = PTHREAD_CREATE_JOINABLE;
return 0;
}
int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("pthread_attr_setdetachstate");
+ if (detachstate != PTHREAD_CREATE_JOINABLE
+ && detachstate != PTHREAD_CREATE_DETACHED)
+ return EINVAL;
+ attr->__detachstate = detachstate;
return 0;
}
/* Decide on my final disposition. */
VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */,
VG_USERREQ__SET_OR_GET_DETACH,
- 2 /* get */, 0, 0, 0);
+ 2 /* get */, pthread_self(), 0, 0);
assert(detached == 0 || detached == 1);
if (detached) {
root_fn = info->root_fn;
arg = info->arg;
- if (attr)
- kludged("pthread_create -- ignoring attributes");
-
/* Free up the arg block that pthread_create malloced. */
VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
VG_USERREQ__FREE, info, 0, 0, 0);
assert(res == 0);
+ /* Minimally observe the attributes supplied. */
+ if (attr) {
+ assert(attr->__detachstate == PTHREAD_CREATE_DETACHED
+ || attr->__detachstate == PTHREAD_CREATE_JOINABLE);
+ if (attr->__detachstate == PTHREAD_CREATE_DETACHED)
+ pthread_detach(pthread_self());
+ }
+
/* The root function might not return. But if it does we simply
move along to thread_exit_wrapper. All other ways out for the
thread (cancellation, or calling pthread_exit) lead there
{
int res;
ensure_valgrind("pthread_detach");
- VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ /* First we enquire as to the current detach state. */
+ VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */,
VG_USERREQ__SET_OR_GET_DETACH,
- 1 /* set */, 0, 0, 0);
- assert(res == 0);
- return 0;
+ 2 /* get */, th, 0, 0);
+ if (res == -1) /* not found */
+ return ESRCH;
+ if (res == 1) /* already detached */
+ return EINVAL;
+ if (res == 0) {
+ VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */,
+ VG_USERREQ__SET_OR_GET_DETACH,
+ 1 /* set */, th, 0, 0);
+ assert(res == 0);
+ return 0;
+ }
+ barf("pthread_detach");
}
{
Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
-
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf, "yield");
print_sched_event(tid, msg_buf);
static
void do__testcancel ( ThreadId tid )
{
+ Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf, "testcancel");
+ print_sched_event(tid, msg_buf);
+ }
if (/* is there a cancellation pending on this thread? */
VG_(threads)[tid].cancel_pend != NULL
&& /* is this thread accepting cancellations? */
void do__set_cancelstate ( ThreadId tid, Int state )
{
Bool old_st;
+ Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
+ state==PTHREAD_CANCEL_ENABLE
+ ? "ENABLE"
+ : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
+ print_sched_event(tid, msg_buf);
+ }
old_st = VG_(threads)[tid].cancel_st;
if (state == PTHREAD_CANCEL_ENABLE) {
VG_(threads)[tid].cancel_st = True;
void do__set_canceltype ( ThreadId tid, Int type )
{
Bool old_ty;
+ Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
+ type==PTHREAD_CANCEL_ASYNCHRONOUS
+ ? "ASYNCHRONOUS"
+ : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
+ print_sched_event(tid, msg_buf);
+ }
old_ty = VG_(threads)[tid].cancel_ty;
if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
VG_(threads)[tid].cancel_ty = False;
}
+/* Set or get the detach state for thread det. */
static
-void do__set_or_get_detach ( ThreadId tid, Int what )
+void do__set_or_get_detach ( ThreadId tid,
+ Int what, ThreadId det )
{
+ ThreadId i;
+ Char msg_buf[100];
+ /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
+ tid, what, det); */
vg_assert(VG_(is_valid_tid)(tid));
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
+ what==0 ? "not-detached" : (
+ what==1 ? "detached" : (
+ what==2 ? "fetch old value" : "???")),
+ det );
+ print_sched_event(tid, msg_buf);
+ }
+
+ if (!VG_(is_valid_tid)(det)) {
+ SET_EDX(tid, -1);
+ return;
+ }
+
switch (what) {
case 2: /* get */
- SET_EDX(tid, VG_(threads)[tid].detached ? 1 : 0);
+ SET_EDX(tid, VG_(threads)[det].detached ? 1 : 0);
return;
- case 1: /* set detached */
- VG_(threads)[tid].detached = True;
+ case 1: /* set detached. If someone is in a join-wait for det,
+ do not detach. */
+ for (i = 1; i < VG_N_THREADS; i++) {
+ if (VG_(threads)[i].status == VgTs_WaitJoinee
+ && VG_(threads)[i].joiner_jee_tid == det) {
+ SET_EDX(tid, 0);
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf,
+ "tid %d not detached because %d in join-wait for it %d",
+ det, i);
+ print_sched_event(tid, msg_buf);
+ }
+ return;
+ }
+ }
+ VG_(threads)[det].detached = True;
SET_EDX(tid, 0);
return;
case 0: /* set not detached */
- VG_(threads)[tid].detached = False;
+ VG_(threads)[det].detached = False;
SET_EDX(tid, 0);
return;
default:
vg_assert(VG_(is_valid_tid)(tid));
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
- vg_assert(VG_(is_valid_tid)(cee));
+ if (!VG_(is_valid_tid)(cee)) {
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf,
+ "set_cancelpend for invalid tid %d", cee);
+ print_sched_event(tid, msg_buf);
+ }
+ SET_EDX(tid, -VKI_ESRCH);
+ return;
+ }
VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
- "set cancel pending (hdlr = %p, canceller tid = %d)",
+ "set_cancelpend (hdlr = %p, set by tid %d)",
cancelpend_hdlr, tid);
print_sched_event(cee, msg_buf);
}
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
- "WAIT_JOINER(%p) (non-detached thread exit)", retval);
+ "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
print_sched_event(tid, msg_buf);
}
VG_(threads)[tid].status = VgTs_WaitJoiner;
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
VG_(threads)[tid].status = VgTs_Empty; /* bye! */
cleanup_after_thread_exited ( tid );
-
if (VG_(clo_trace_sched)) {
- VG_(sprintf)(msg_buf, "QUIT (detached thread exit)");
+ VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
print_sched_event(tid, msg_buf);
}
/* Return value is irrelevant; this thread will not get
run. */
break;
+ case VG_USERREQ__SET_CANCELSTATE:
+ do__set_cancelstate ( tid, arg[1] );
+ break;
+
+ case VG_USERREQ__SET_CANCELTYPE:
+ do__set_canceltype ( tid, arg[1] );
+ break;
+
+ case VG_USERREQ__SET_OR_GET_DETACH:
+ do__set_or_get_detach ( tid, arg[1], arg[2] );
+ break;
+
+ case VG_USERREQ__SET_CANCELPEND:
+ do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
+ break;
+
+ case VG_USERREQ__WAIT_JOINER:
+ do__wait_joiner ( tid, (void*)arg[1] );
+ break;
+
+ case VG_USERREQ__QUIT:
+ do__quit ( tid );
+ break;
+
+ case VG_USERREQ__APPLY_IN_NEW_THREAD:
+ do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
+ (void*)arg[2] );
+ break;
+
case VG_USERREQ__MAKE_NOACCESS:
case VG_USERREQ__MAKE_WRITABLE:
case VG_USERREQ__MAKE_READABLE:
scheduler algorithms is surely O(N) in the number of threads, since
that's simple, at least. And (in practice) we hope that most
programs do not need many threads. */
-#define VG_N_THREADS 50
+#define VG_N_THREADS 20
/* Maximum number of pthread keys available. Again, we start low until
the need for a higher number presents itself. */
int pthread_attr_init(pthread_attr_t *attr)
{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("pthread_attr_init");
+ /* Just initialise the fields which we might look at. */
+ attr->__detachstate = PTHREAD_CREATE_JOINABLE;
return 0;
}
int pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
- static int moans = N_MOANS;
- if (moans-- > 0)
- ignored("pthread_attr_setdetachstate");
+ if (detachstate != PTHREAD_CREATE_JOINABLE
+ && detachstate != PTHREAD_CREATE_DETACHED)
+ return EINVAL;
+ attr->__detachstate = detachstate;
return 0;
}
/* Decide on my final disposition. */
VALGRIND_MAGIC_SEQUENCE(detached, (-1) /* default */,
VG_USERREQ__SET_OR_GET_DETACH,
- 2 /* get */, 0, 0, 0);
+ 2 /* get */, pthread_self(), 0, 0);
assert(detached == 0 || detached == 1);
if (detached) {
root_fn = info->root_fn;
arg = info->arg;
- if (attr)
- kludged("pthread_create -- ignoring attributes");
-
/* Free up the arg block that pthread_create malloced. */
VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
VG_USERREQ__FREE, info, 0, 0, 0);
assert(res == 0);
+ /* Minimally observe the attributes supplied. */
+ if (attr) {
+ assert(attr->__detachstate == PTHREAD_CREATE_DETACHED
+ || attr->__detachstate == PTHREAD_CREATE_JOINABLE);
+ if (attr->__detachstate == PTHREAD_CREATE_DETACHED)
+ pthread_detach(pthread_self());
+ }
+
/* The root function might not return. But if it does we simply
move along to thread_exit_wrapper. All other ways out for the
thread (cancellation, or calling pthread_exit) lead there
{
int res;
ensure_valgrind("pthread_detach");
- VALGRIND_MAGIC_SEQUENCE(res, (-1) /* default */,
+ /* First we enquire as to the current detach state. */
+ VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */,
VG_USERREQ__SET_OR_GET_DETACH,
- 1 /* set */, 0, 0, 0);
- assert(res == 0);
- return 0;
+ 2 /* get */, th, 0, 0);
+ if (res == -1) /* not found */
+ return ESRCH;
+ if (res == 1) /* already detached */
+ return EINVAL;
+ if (res == 0) {
+ VALGRIND_MAGIC_SEQUENCE(res, (-2) /* default */,
+ VG_USERREQ__SET_OR_GET_DETACH,
+ 1 /* set */, th, 0, 0);
+ assert(res == 0);
+ return 0;
+ }
+ barf("pthread_detach");
}
{
Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
-
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf, "yield");
print_sched_event(tid, msg_buf);
static
void do__testcancel ( ThreadId tid )
{
+ Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf, "testcancel");
+ print_sched_event(tid, msg_buf);
+ }
if (/* is there a cancellation pending on this thread? */
VG_(threads)[tid].cancel_pend != NULL
&& /* is this thread accepting cancellations? */
void do__set_cancelstate ( ThreadId tid, Int state )
{
Bool old_st;
+ Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf, "set_cancelstate to %d (%s)", state,
+ state==PTHREAD_CANCEL_ENABLE
+ ? "ENABLE"
+ : (state==PTHREAD_CANCEL_DISABLE ? "DISABLE" : "???"));
+ print_sched_event(tid, msg_buf);
+ }
old_st = VG_(threads)[tid].cancel_st;
if (state == PTHREAD_CANCEL_ENABLE) {
VG_(threads)[tid].cancel_st = True;
void do__set_canceltype ( ThreadId tid, Int type )
{
Bool old_ty;
+ Char msg_buf[100];
vg_assert(VG_(is_valid_tid)(tid));
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf, "set_canceltype to %d (%s)", type,
+ type==PTHREAD_CANCEL_ASYNCHRONOUS
+ ? "ASYNCHRONOUS"
+ : (type==PTHREAD_CANCEL_DEFERRED ? "DEFERRED" : "???"));
+ print_sched_event(tid, msg_buf);
+ }
old_ty = VG_(threads)[tid].cancel_ty;
if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
VG_(threads)[tid].cancel_ty = False;
}
+/* Set or get the detach state for thread det. */
static
-void do__set_or_get_detach ( ThreadId tid, Int what )
+void do__set_or_get_detach ( ThreadId tid,
+ Int what, ThreadId det )
{
+ ThreadId i;
+ Char msg_buf[100];
+ /* VG_(printf)("do__set_or_get_detach tid %d what %d det %d\n",
+ tid, what, det); */
vg_assert(VG_(is_valid_tid)(tid));
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf, "set_or_get_detach %d (%s) for tid %d", what,
+ what==0 ? "not-detached" : (
+ what==1 ? "detached" : (
+ what==2 ? "fetch old value" : "???")),
+ det );
+ print_sched_event(tid, msg_buf);
+ }
+
+ if (!VG_(is_valid_tid)(det)) {
+ SET_EDX(tid, -1);
+ return;
+ }
+
switch (what) {
case 2: /* get */
- SET_EDX(tid, VG_(threads)[tid].detached ? 1 : 0);
+ SET_EDX(tid, VG_(threads)[det].detached ? 1 : 0);
return;
- case 1: /* set detached */
- VG_(threads)[tid].detached = True;
+ case 1: /* set detached. If someone is in a join-wait for det,
+ do not detach. */
+ for (i = 1; i < VG_N_THREADS; i++) {
+ if (VG_(threads)[i].status == VgTs_WaitJoinee
+ && VG_(threads)[i].joiner_jee_tid == det) {
+ SET_EDX(tid, 0);
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf,
+ "tid %d not detached because %d in join-wait for it %d",
+ det, i);
+ print_sched_event(tid, msg_buf);
+ }
+ return;
+ }
+ }
+ VG_(threads)[det].detached = True;
SET_EDX(tid, 0);
return;
case 0: /* set not detached */
- VG_(threads)[tid].detached = False;
+ VG_(threads)[det].detached = False;
SET_EDX(tid, 0);
return;
default:
vg_assert(VG_(is_valid_tid)(tid));
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
- vg_assert(VG_(is_valid_tid)(cee));
+ if (!VG_(is_valid_tid)(cee)) {
+ if (VG_(clo_trace_sched)) {
+ VG_(sprintf)(msg_buf,
+ "set_cancelpend for invalid tid %d", cee);
+ print_sched_event(tid, msg_buf);
+ }
+ SET_EDX(tid, -VKI_ESRCH);
+ return;
+ }
VG_(threads)[cee].cancel_pend = cancelpend_hdlr;
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
- "set cancel pending (hdlr = %p, canceller tid = %d)",
+ "set_cancelpend (hdlr = %p, set by tid %d)",
cancelpend_hdlr, tid);
print_sched_event(cee, msg_buf);
}
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
if (VG_(clo_trace_sched)) {
VG_(sprintf)(msg_buf,
- "WAIT_JOINER(%p) (non-detached thread exit)", retval);
+ "do__wait_joiner(retval = %p) (non-detached thread exit)", retval);
print_sched_event(tid, msg_buf);
}
VG_(threads)[tid].status = VgTs_WaitJoiner;
vg_assert(VG_(threads)[tid].status == VgTs_Runnable);
VG_(threads)[tid].status = VgTs_Empty; /* bye! */
cleanup_after_thread_exited ( tid );
-
if (VG_(clo_trace_sched)) {
- VG_(sprintf)(msg_buf, "QUIT (detached thread exit)");
+ VG_(sprintf)(msg_buf, "do__quit (detached thread exit)");
print_sched_event(tid, msg_buf);
}
/* Return value is irrelevant; this thread will not get
run. */
break;
+ case VG_USERREQ__SET_CANCELSTATE:
+ do__set_cancelstate ( tid, arg[1] );
+ break;
+
+ case VG_USERREQ__SET_CANCELTYPE:
+ do__set_canceltype ( tid, arg[1] );
+ break;
+
+ case VG_USERREQ__SET_OR_GET_DETACH:
+ do__set_or_get_detach ( tid, arg[1], arg[2] );
+ break;
+
+ case VG_USERREQ__SET_CANCELPEND:
+ do__set_cancelpend ( tid, arg[1], (void(*)(void*))arg[2] );
+ break;
+
+ case VG_USERREQ__WAIT_JOINER:
+ do__wait_joiner ( tid, (void*)arg[1] );
+ break;
+
+ case VG_USERREQ__QUIT:
+ do__quit ( tid );
+ break;
+
+ case VG_USERREQ__APPLY_IN_NEW_THREAD:
+ do__apply_in_new_thread ( tid, (void*(*)(void*))arg[1],
+ (void*)arg[2] );
+ break;
+
case VG_USERREQ__MAKE_NOACCESS:
case VG_USERREQ__MAKE_WRITABLE:
case VG_USERREQ__MAKE_READABLE: