Mozilla work.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@107
}
}
+static void kludged ( char* msg )
+{
+ if (get_pt_trace_level() >= 1) {
+ char* ig = "vg_libpthread.so: KLUDGED call to: ";
+ write(2, ig, strlen(ig));
+ write(2, msg, strlen(msg));
+ ig = "\n";
+ write(2, ig, strlen(ig));
+ }
+}
+
/* ---------------------------------------------------------------------
Pass pthread_ calls to Valgrind's request mechanism.
return 0;
}
+int pthread_attr_setinheritsched(pthread_attr_t *attr, int inherit)
+{
+ ignored("pthread_attr_setinheritsched");
+ return 0;
+}
+
+/* This is completely bogus. */
+int pthread_attr_getschedparam(const pthread_attr_t *attr,
+ struct sched_param *param)
+{
+ kludged("pthread_attr_getschedparam");
+ if (param) param->__sched_priority = 0; /* who knows */
+ return 0;
+}
+
+int pthread_attr_setschedparam(pthread_attr_t *attr,
+ const struct sched_param *param)
+{
+ ignored("pthread_attr_setschedparam");
+ return 0;
+}
+int pthread_attr_destroy(pthread_attr_t *attr)
+{
+ ignored("pthread_attr_destroy");
+ return 0;
+}
/* ---------------------------------------------------
THREADs
}
}
+int pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+ int res;
+ static int moans = 3;
+ if (!(RUNNING_ON_VALGRIND) && moans-- > 0) {
+ char* str = "pthread_mutex_trylock-NOT-INSIDE-VALGRIND\n";
+ write(2, str, strlen(str));
+ return 0;
+ } else {
+ VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
+ VG_USERREQ__PTHREAD_MUTEX_TRYLOCK,
+ mutex, 0, 0, 0);
+ return res;
+ }
+}
+
int pthread_mutex_unlock(pthread_mutex_t *mutex)
{
int res;
int *policy,
struct sched_param *param)
{
+ kludged("pthread_getschedparam");
if (policy) *policy = SCHED_OTHER;
if (param) param->__sched_priority = 0; /* who knows */
return 0;
#define VG_USERREQ__PTHREAD_JOIN 0x3002
#define VG_USERREQ__PTHREAD_GET_THREADID 0x3003
#define VG_USERREQ__PTHREAD_MUTEX_LOCK 0x3004
-#define VG_USERREQ__PTHREAD_MUTEX_UNLOCK 0x3005
-#define VG_USERREQ__PTHREAD_CANCEL 0x3006
-#define VG_USERREQ__PTHREAD_EXIT 0x3007
-#define VG_USERREQ__PTHREAD_COND_WAIT 0x3008
-#define VG_USERREQ__PTHREAD_COND_SIGNAL 0x3009
-#define VG_USERREQ__PTHREAD_COND_BROADCAST 0x300A
+#define VG_USERREQ__PTHREAD_MUTEX_TRYLOCK 0x3005
+#define VG_USERREQ__PTHREAD_MUTEX_UNLOCK 0x3006
+#define VG_USERREQ__PTHREAD_CANCEL 0x3007
+#define VG_USERREQ__PTHREAD_EXIT 0x3008
+#define VG_USERREQ__PTHREAD_COND_WAIT 0x3009
+#define VG_USERREQ__PTHREAD_COND_SIGNAL 0x300A
+#define VG_USERREQ__PTHREAD_COND_BROADCAST 0x300B
/* Cosmetic ... */
#define VG_USERREQ__GET_PTHREAD_TRACE_LEVEL 0x3101
}
}
+static void kludged ( char* msg )
+{
+ if (get_pt_trace_level() >= 1) {
+ char* ig = "vg_libpthread.so: KLUDGED call to: ";
+ write(2, ig, strlen(ig));
+ write(2, msg, strlen(msg));
+ ig = "\n";
+ write(2, ig, strlen(ig));
+ }
+}
+
/* ---------------------------------------------------------------------
Pass pthread_ calls to Valgrind's request mechanism.
return 0;
}
+int pthread_attr_setinheritsched(pthread_attr_t *attr, int inherit)
+{
+ ignored("pthread_attr_setinheritsched");
+ return 0;
+}
+
+/* This is completely bogus. */
+int pthread_attr_getschedparam(const pthread_attr_t *attr,
+ struct sched_param *param)
+{
+ kludged("pthread_attr_getschedparam");
+ if (param) param->__sched_priority = 0; /* who knows */
+ return 0;
+}
+
+int pthread_attr_setschedparam(pthread_attr_t *attr,
+ const struct sched_param *param)
+{
+ ignored("pthread_attr_setschedparam");
+ return 0;
+}
+int pthread_attr_destroy(pthread_attr_t *attr)
+{
+ ignored("pthread_attr_destroy");
+ return 0;
+}
/* ---------------------------------------------------
THREADs
}
}
+int pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+ int res;
+ static int moans = 3;
+ if (!(RUNNING_ON_VALGRIND) && moans-- > 0) {
+ char* str = "pthread_mutex_trylock-NOT-INSIDE-VALGRIND\n";
+ write(2, str, strlen(str));
+ return 0;
+ } else {
+ VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
+ VG_USERREQ__PTHREAD_MUTEX_TRYLOCK,
+ mutex, 0, 0, 0);
+ return res;
+ }
+}
+
int pthread_mutex_unlock(pthread_mutex_t *mutex)
{
int res;
int *policy,
struct sched_param *param)
{
+ kludged("pthread_getschedparam");
if (policy) *policy = SCHED_OTHER;
if (param) param->__sched_priority = 0; /* who knows */
return 0;
static
-void do_pthread_mutex_lock( ThreadId tid, pthread_mutex_t *mutex )
+void do_pthread_mutex_lock( ThreadId tid,
+ Bool is_trylock,
+ pthread_mutex_t *mutex )
{
- Char msg_buf[100];
+ Char msg_buf[100];
+ Char* caller
+ = is_trylock ? "pthread_mutex_lock "
+ : "pthread_mutex_trylock";
if (VG_(clo_trace_pthread_level) >= 2) {
- VG_(sprintf)(msg_buf, "pthread_mutex_lock mx %p ...", mutex );
+ VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
print_pthread_event(tid, msg_buf);
}
tid, mutex, mutex->__m_count);
return;
} else {
- vg_threads[tid].m_edx = EDEADLK;
+ if (is_trylock)
+ vg_threads[tid].m_edx = EBUSY;
+ else
+ vg_threads[tid].m_edx = EDEADLK;
return;
}
} else {
/* Someone else has it; we have to wait. Mark ourselves
thusly. */
/* GUARD: __m_count > 0 && __m_owner is valid */
- vg_threads[tid].status = VgTs_WaitMX;
- vg_threads[tid].associated_mx = mutex;
- /* No assignment to %EDX, since we're blocking. */
- if (VG_(clo_trace_pthread_level) >= 1) {
- VG_(sprintf)(msg_buf, "pthread_mutex_lock mx %p: BLOCK",
- mutex );
- print_pthread_event(tid, msg_buf);
- }
+ if (is_trylock) {
+ /* caller is polling; so return immediately. */
+ vg_threads[tid].m_edx = EBUSY;
+ } else {
+ vg_threads[tid].status = VgTs_WaitMX;
+ vg_threads[tid].associated_mx = mutex;
+ /* No assignment to %EDX, since we're blocking. */
+ if (VG_(clo_trace_pthread_level) >= 1) {
+ VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
+ caller, mutex );
+ print_pthread_event(tid, msg_buf);
+ }
+ }
return;
}
break;
case VG_USERREQ__PTHREAD_MUTEX_LOCK:
- do_pthread_mutex_lock( tid, (pthread_mutex_t *)(arg[1]) );
+ do_pthread_mutex_lock( tid, False, (pthread_mutex_t *)(arg[1]) );
+ break;
+
+ case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
+ do_pthread_mutex_lock( tid, True, (pthread_mutex_t *)(arg[1]) );
break;
case VG_USERREQ__PTHREAD_MUTEX_UNLOCK:
#define VG_USERREQ__PTHREAD_JOIN 0x3002
#define VG_USERREQ__PTHREAD_GET_THREADID 0x3003
#define VG_USERREQ__PTHREAD_MUTEX_LOCK 0x3004
-#define VG_USERREQ__PTHREAD_MUTEX_UNLOCK 0x3005
-#define VG_USERREQ__PTHREAD_CANCEL 0x3006
-#define VG_USERREQ__PTHREAD_EXIT 0x3007
-#define VG_USERREQ__PTHREAD_COND_WAIT 0x3008
-#define VG_USERREQ__PTHREAD_COND_SIGNAL 0x3009
-#define VG_USERREQ__PTHREAD_COND_BROADCAST 0x300A
+#define VG_USERREQ__PTHREAD_MUTEX_TRYLOCK 0x3005
+#define VG_USERREQ__PTHREAD_MUTEX_UNLOCK 0x3006
+#define VG_USERREQ__PTHREAD_CANCEL 0x3007
+#define VG_USERREQ__PTHREAD_EXIT 0x3008
+#define VG_USERREQ__PTHREAD_COND_WAIT 0x3009
+#define VG_USERREQ__PTHREAD_COND_SIGNAL 0x300A
+#define VG_USERREQ__PTHREAD_COND_BROADCAST 0x300B
/* Cosmetic ... */
#define VG_USERREQ__GET_PTHREAD_TRACE_LEVEL 0x3101
}
}
+static void kludged ( char* msg )
+{
+ if (get_pt_trace_level() >= 1) {
+ char* ig = "vg_libpthread.so: KLUDGED call to: ";
+ write(2, ig, strlen(ig));
+ write(2, msg, strlen(msg));
+ ig = "\n";
+ write(2, ig, strlen(ig));
+ }
+}
+
/* ---------------------------------------------------------------------
Pass pthread_ calls to Valgrind's request mechanism.
return 0;
}
+int pthread_attr_setinheritsched(pthread_attr_t *attr, int inherit)
+{
+ ignored("pthread_attr_setinheritsched");
+ return 0;
+}
+
+/* This is completely bogus. */
+int pthread_attr_getschedparam(const pthread_attr_t *attr,
+ struct sched_param *param)
+{
+ kludged("pthread_attr_getschedparam");
+ if (param) param->__sched_priority = 0; /* who knows */
+ return 0;
+}
+
+int pthread_attr_setschedparam(pthread_attr_t *attr,
+ const struct sched_param *param)
+{
+ ignored("pthread_attr_setschedparam");
+ return 0;
+}
+int pthread_attr_destroy(pthread_attr_t *attr)
+{
+ ignored("pthread_attr_destroy");
+ return 0;
+}
/* ---------------------------------------------------
THREADs
}
}
+int pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+ int res;
+ static int moans = 3;
+ if (!(RUNNING_ON_VALGRIND) && moans-- > 0) {
+ char* str = "pthread_mutex_trylock-NOT-INSIDE-VALGRIND\n";
+ write(2, str, strlen(str));
+ return 0;
+ } else {
+ VALGRIND_MAGIC_SEQUENCE(res, 0 /* default */,
+ VG_USERREQ__PTHREAD_MUTEX_TRYLOCK,
+ mutex, 0, 0, 0);
+ return res;
+ }
+}
+
int pthread_mutex_unlock(pthread_mutex_t *mutex)
{
int res;
int *policy,
struct sched_param *param)
{
+ kludged("pthread_getschedparam");
if (policy) *policy = SCHED_OTHER;
if (param) param->__sched_priority = 0; /* who knows */
return 0;
static
-void do_pthread_mutex_lock( ThreadId tid, pthread_mutex_t *mutex )
+void do_pthread_mutex_lock( ThreadId tid,
+ Bool is_trylock,
+ pthread_mutex_t *mutex )
{
- Char msg_buf[100];
+ Char msg_buf[100];
+ Char* caller
+ = is_trylock ? "pthread_mutex_lock "
+ : "pthread_mutex_trylock";
if (VG_(clo_trace_pthread_level) >= 2) {
- VG_(sprintf)(msg_buf, "pthread_mutex_lock mx %p ...", mutex );
+ VG_(sprintf)(msg_buf, "%s mx %p ...", caller, mutex );
print_pthread_event(tid, msg_buf);
}
tid, mutex, mutex->__m_count);
return;
} else {
- vg_threads[tid].m_edx = EDEADLK;
+ if (is_trylock)
+ vg_threads[tid].m_edx = EBUSY;
+ else
+ vg_threads[tid].m_edx = EDEADLK;
return;
}
} else {
/* Someone else has it; we have to wait. Mark ourselves
thusly. */
/* GUARD: __m_count > 0 && __m_owner is valid */
- vg_threads[tid].status = VgTs_WaitMX;
- vg_threads[tid].associated_mx = mutex;
- /* No assignment to %EDX, since we're blocking. */
- if (VG_(clo_trace_pthread_level) >= 1) {
- VG_(sprintf)(msg_buf, "pthread_mutex_lock mx %p: BLOCK",
- mutex );
- print_pthread_event(tid, msg_buf);
- }
+ if (is_trylock) {
+ /* caller is polling; so return immediately. */
+ vg_threads[tid].m_edx = EBUSY;
+ } else {
+ vg_threads[tid].status = VgTs_WaitMX;
+ vg_threads[tid].associated_mx = mutex;
+ /* No assignment to %EDX, since we're blocking. */
+ if (VG_(clo_trace_pthread_level) >= 1) {
+ VG_(sprintf)(msg_buf, "%s mx %p: BLOCK",
+ caller, mutex );
+ print_pthread_event(tid, msg_buf);
+ }
+ }
return;
}
break;
case VG_USERREQ__PTHREAD_MUTEX_LOCK:
- do_pthread_mutex_lock( tid, (pthread_mutex_t *)(arg[1]) );
+ do_pthread_mutex_lock( tid, False, (pthread_mutex_t *)(arg[1]) );
+ break;
+
+ case VG_USERREQ__PTHREAD_MUTEX_TRYLOCK:
+ do_pthread_mutex_lock( tid, True, (pthread_mutex_t *)(arg[1]) );
break;
case VG_USERREQ__PTHREAD_MUTEX_UNLOCK: