without triggering Valgrind's redirection mechanism.
- Discuss on the Valgrind mailing list the modificaiton of tests/vg_regtest
such that it ignores files ending in ~ or #.
+- Continue the discussion on the Valgrind mailing list about docbook and
+ 'make dist'.
+- Continue the discussion on the Valgrind mailing list about -Wformat.
+- Explain on the Valgrind mailing list the difference between a bus lock
+ and acquire / release labels.
- Find out why a race is reported on std::string::string(std::string const&)
(stc test case 16).
- Add support for objects that are shared over threads and that use reference
- Performance testing and tuning.
- testing on PPC and AIX (current implementation is only tested on X86 and
AMD64).
-- [AMD64] Find out why removing 'write(1, "", 0)' in drd_intercepts.c triggers
- a crash on AMD64. Is this an exp-drd or a VEX bug ?
+- Find out why there are sometimes races reported on exp-drd/test/matinv.
+- [Fedora 8] Find out why pth_broadcast sometimes hangs on Fedora 8. Is this an
+ exp-drd, pth_broadcast, kernel or glibc bug ?
- On x86 and amd64 platforms, add support for implicit locking arising from
the use of the LOCK instruction prefix.
- Convert the array in drd_thread.c with thread information into an OSet.
(works fine on i386). This is a bug in Valgrind's debug info reader
-- VG_(find_seginfo)() returns NULL for BSS symbols on x86_64. Not yet in
the KDE bug tracking system.
-- No error message is printed for tc20_verifywrap when a locked mutex is
- deallocated (mutex was allocated on the stack).
thread_new_segment(PtThreadIdToDrdThreadId(arg[1]));
break;
+ case VG_USERREQ__DRD_TRACE_ADDR:
+ drd_trace_addr(arg[1]);
+ break;
+
case VG_USERREQ__SET_PTHREADID:
thread_set_pthreadid(thread_get_running_tid(), arg[1]);
break;
/* To ask the drd tool to start a new segment in the specified thread. */
VG_USERREQ__DRD_START_NEW_SEGMENT,
/* args: POSIX thread ID. */
+ /* To ask the drd tool to trace all accesses to the specified address. */
+ VG_USERREQ__DRD_TRACE_ADDR,
+ /* args: Addr. */
/* Tell the core the pthread_t of the running thread */
VG_USERREQ__SET_PTHREADID,
VALGRIND_GET_ORIG_FN(fn);
VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__PRE_PTHREAD_MUTEX_LOCK,
mutex, sizeof(*mutex), mutex_type(mutex), 0, 0);
-#if 1
- // The only purpose of the system call below is to make drd work on AMD64
- // systems. Without this system call, clients crash (SIGSEGV) in
- // std::locale::locale().
- write(1, "", 0);
-#endif
CALL_FN_W_W(ret, fn, mutex);
VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__POST_PTHREAD_MUTEX_LOCK,
mutex, ret == 0, 0, 0, 0);
#include "drd_error.h"
#include "drd_malloc_wrappers.h"
#include "drd_mutex.h"
+#include "drd_rwlock.h"
#include "drd_segment.h"
#include "drd_semaphore.h"
#include "drd_suppression.h"
Bool trace_csw = False;
Bool trace_danger_set = False;
Bool trace_mutex = False;
+ Bool trace_rwlock = False;
Bool trace_segment = False;
Bool trace_semaphore = False;
Bool trace_suppression = False;
else VG_BOOL_CLO(arg, "--trace-fork-join", drd_trace_fork_join)
else VG_BOOL_CLO(arg, "--trace-mem", drd_trace_mem)
else VG_BOOL_CLO(arg, "--trace-mutex", trace_mutex)
+ else VG_BOOL_CLO(arg, "--trace-rwlock", trace_rwlock)
else VG_BOOL_CLO(arg, "--trace-segment", trace_segment)
else VG_BOOL_CLO(arg, "--trace-semaphore", trace_semaphore)
else VG_BOOL_CLO(arg, "--trace-suppression", trace_suppression)
thread_trace_danger_set(trace_danger_set);
if (trace_mutex)
mutex_set_trace(trace_mutex);
+ if (trace_rwlock)
+ rwlock_set_trace(trace_rwlock);
if (trace_segment)
sg_set_trace(trace_segment);
if (trace_semaphore)
#if 1
if (drd_trace_mem || (addr == drd_trace_address))
{
- VG_(message)(Vg_UserMsg, "load 0x%lx size %ld %s (vg %d / drd %d)",
+ char vc[80];
+ vc_snprint(vc, sizeof(vc), thread_get_vc(thread_get_running_tid()));
+ VG_(message)(Vg_UserMsg, "load 0x%lx size %ld %s (vg %d / drd %d / vc %s)",
addr,
size,
thread_get_name(thread_get_running_tid()),
VG_(get_running_tid)(),
- thread_get_running_tid());
+ thread_get_running_tid(),
+ vc);
VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
VG_(clo_backtrace_size));
tl_assert(DrdThreadIdToVgThreadId(thread_get_running_tid())
#if 1
if (drd_trace_mem || (addr == drd_trace_address))
{
- VG_(message)(Vg_UserMsg, "store 0x%lx size %ld %s (vg %d / drd %d / off %d)",
+ char vc[80];
+ vc_snprint(vc, sizeof(vc), thread_get_vc(thread_get_running_tid()));
+ VG_(message)(Vg_UserMsg, "store 0x%lx size %ld %s (vg %d / drd %d / off %d / vc %s)",
addr,
size,
thread_get_name(thread_get_running_tid()),
VG_(get_running_tid)(),
thread_get_running_tid(),
- addr - thread_get_stack_min(thread_get_running_tid()));
+ addr - thread_get_stack_min(thread_get_running_tid()),
+ vc);
VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
VG_(clo_backtrace_size));
tl_assert(DrdThreadIdToVgThreadId(thread_get_running_tid())
barrier_thread_delete(drd_joinee);
}
+void drd_trace_addr(const Addr addr)
+{
+ drd_trace_address = addr;
+}
+
/* Called after a thread has performed its last memory access. */
static void drd_thread_finished(ThreadId tid)
{
#include "priv_drd_clientreq.h"
#include "pub_tool_errormgr.h" // VG_(maybe_record_error)()
#include "pub_tool_libcassert.h" // tl_assert()
+#include "pub_tool_libcbase.h" // VG_(strlen)
#include "pub_tool_libcprint.h" // VG_(message)()
#include "pub_tool_machine.h" // VG_(get_IP)()
#include "pub_tool_threadstate.h" // VG_(get_running_tid)()
if (clientobj_present(mutex, mutex + size))
{
- GenericErrInfo GEI;
- VG_(maybe_record_error)(VG_(get_running_tid)(),
- GenericErr,
- VG_(get_IP)(VG_(get_running_tid)()),
- "Not a mutex",
- &GEI);
- return 0;
+ GenericErrInfo GEI;
+ VG_(maybe_record_error)(VG_(get_running_tid)(),
+ GenericErr,
+ VG_(get_IP)(VG_(get_running_tid)()),
+ "Not a mutex",
+ &GEI);
+ return 0;
}
p = &clientobj_add(mutex, mutex + size, ClientMutex)->mutex;
}
if (! p || ! took_lock)
- return;
+ return;
if (p->recursion_count == 0)
{
+ const DrdThreadId last_owner = p->owner;
+
+ if (last_owner != drd_tid && last_owner != DRD_INVALID_THREADID)
+ thread_combine_vc2(drd_tid, mutex_get_last_vc(mutex));
+ thread_new_segment(drd_tid);
+
p->owner = drd_tid;
s_mutex_lock_count++;
}
p->owner = drd_tid;
}
p->recursion_count++;
-
- if (p->recursion_count == 1)
- {
- const DrdThreadId last_owner = p->owner;
-
- if (last_owner != drd_tid && last_owner != DRD_INVALID_THREADID)
- thread_combine_vc2(drd_tid, mutex_get_last_vc(mutex));
- thread_new_segment(drd_tid);
- }
}
/**
if (p == 0 || mutex_type == mutex_type_invalid_mutex)
{
- GenericErrInfo GEI;
- VG_(maybe_record_error)(vg_tid,
- GenericErr,
- VG_(get_IP)(vg_tid),
- "Not a mutex",
- &GEI);
- return;
+ GenericErrInfo GEI;
+ VG_(maybe_record_error)(vg_tid,
+ GenericErr,
+ VG_(get_IP)(vg_tid),
+ "Not a mutex",
+ &GEI);
+ return;
}
if (p->owner == DRD_INVALID_THREADID)
VG_(get_IP)(vg_tid),
"Mutex not locked",
&MEI);
- return;
+ return;
}
tl_assert(p);
if (p->mutex_type != mutex_type)
{
VG_(message)(Vg_UserMsg, "??? mutex %p: type changed from %d into %d",
- p->a1, p->mutex_type, mutex_type);
+ p->a1, p->mutex_type, mutex_type);
}
tl_assert(p->mutex_type == mutex_type);
tl_assert(p->owner != DRD_INVALID_THREADID);
/*
* Local variables:
- * c-basic-offset: 3
+ * c-basic-offset: 2
* End:
*/
struct rwlock_thread_info
{
- UWord tid; // DrdThreadId.
- UInt reader_nesting_count;
- UInt writer_nesting_count;
+ UWord tid; // DrdThreadId.
+ UInt reader_nesting_count;
+ UInt writer_nesting_count;
VectorClock vc; // Vector clock associated with last unlock by this thread.
+ Bool last_lock_was_writer_lock;
};
q->reader_nesting_count = 0;
q->writer_nesting_count = 0;
vc_init(&q->vc, 0, 0);
+ q->last_lock_was_writer_lock = False;
VG_(OSetGen_Insert)(oset, q);
}
tl_assert(q);
}
static void rwlock_combine_other_vc(struct rwlock_info* const p,
- const DrdThreadId tid)
+ const DrdThreadId tid,
+ const Bool readers_too)
{
struct rwlock_thread_info* q;
VG_(OSetGen_ResetIter)(p->thread_info);
for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
{
- if (q->tid != tid)
+ if (q->tid != tid && (readers_too || q->last_lock_was_writer_lock))
{
thread_combine_vc2(tid, &q->vc);
}
if (s_trace_rwlock)
{
VG_(message)(Vg_UserMsg,
- "[%d/%d] rwlock_destroy 0x%lx",
+ "[%d/%d] rwlock_destroy 0x%lx",
VG_(get_running_tid)(),
thread_get_running_tid(),
p->a1);
if (s_trace_rwlock)
{
VG_(message)(Vg_UserMsg,
- "[%d/%d] rwlock_init %s 0x%lx",
+ "[%d/%d] rwlock_init 0x%lx",
VG_(get_running_tid)(),
thread_get_running_tid(),
rwlock);
q = lookup_or_insert_node(p->thread_info, drd_tid);
if (++q->reader_nesting_count == 1)
{
- rwlock_combine_other_vc(p, drd_tid);
+ rwlock_combine_other_vc(p, drd_tid, False);
thread_new_segment(drd_tid);
}
}
q = lookup_or_insert_node(p->thread_info, thread_get_running_tid());
tl_assert(q->writer_nesting_count == 0);
q->writer_nesting_count++;
+ q->last_lock_was_writer_lock = True;
tl_assert(q->writer_nesting_count == 1);
- rwlock_combine_other_vc(p, drd_tid);
+ rwlock_combine_other_vc(p, drd_tid, True);
thread_new_segment(drd_tid);
}
if (s_trace_rwlock && p != 0)
{
VG_(message)(Vg_UserMsg,
- "[%d/%d] rwlock_unlock 0x%lx",
+ "[%d/%d] rwlock_unlock 0x%lx",
vg_tid,
drd_tid,
rwlock);
/* current vector clock of the thread such that it is available when */
/* this rwlock is locked again. */
vc_assign(&q->vc, vc);
+ q->last_lock_was_writer_lock = False;
thread_new_segment(drd_tid);
}
}
/**
- * Discard all segments that have a defined ordered against the latest vector
+ * Discard all segments that have a defined order against the latest vector
* clock of every thread -- these segments can no longer be involved in a
* data race.
*/
VG_(message)(Vg_DebugMsg, "%s", msg);
}
- for (p = s_threadinfo[tid].first; p; p = p->next)
+ p = s_threadinfo[tid].last;
{
unsigned j;
{
if (IsValidDrdThreadId(j))
{
- const Segment* const q = s_threadinfo[j].last;
+ const Segment* q;
+ for (q = s_threadinfo[j].last; q; q = q->prev)
if (j != tid && q != 0
&& ! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
{
void drd_post_thread_join(DrdThreadId joiner, DrdThreadId joinee);
+void drd_trace_addr(const Addr addr);
+
void drd_pre_mutex_init(Addr mutex, SizeT size, const MutexT mutex_type);
void drd_post_mutex_destroy(Addr mutex, const MutexT mutex_type);
void drd_pre_mutex_lock(const Addr mutex, const SizeT size,
return vc_lte(vc1, vc2) || vc_lte(vc2, vc1);
}
-/**
- * Compute elementwise minimum.
- */
+/** Compute elementwise minimum. */
void vc_min(VectorClock* const result,
const VectorClock* const rhs)
{
tl_assert(result);
tl_assert(rhs);
- // First count the number of shared thread id's.
+ /* First count the number of shared thread ID's. */
j = 0;
shared = 0;
for (i = 0; i < result->size; i++)
vc_check(result);
- // Next, combine both vector clocks into one.
+ /* Next, combine both vector clocks into one. */
i = 0;
for (j = 0; j < rhs->size; j++)
{
vc_check(result);
while (i < result->size && result->vc[i].threadid < rhs->vc[j].threadid)
+ {
+ /* Thread ID is missing in second vector clock. Clear the count. */
+ result->vc[i].count = 0;
i++;
+ }
if (i >= result->size)
{
result->size++;
}
else if (result->vc[i].threadid > rhs->vc[j].threadid)
{
- unsigned k;
- for (k = result->size; k > i; k--)
- {
- result->vc[k] = result->vc[k - 1];
- }
- result->size++;
- result->vc[i] = rhs->vc[j];
- vc_check(result);
+ /* Thread ID is missing in first vector clock. Leave out. */
}
else
{
+ /* The thread ID is present in both vector clocks. Compute the minimum */
+ /* of vc[i].count and vc[j].count. */
tl_assert(result->vc[i].threadid == rhs->vc[j].threadid);
if (rhs->vc[j].count < result->vc[i].count)
{
}
}
vc_check(result);
- tl_assert(result->size == new_size);
}
/**
pth_detached2.vgtest \
pth_detached2.stdout.exp pth_detached2.stderr.exp \
recursive_mutex.vgtest recursive_mutex.stderr.exp \
+ rwlock_race.vgtest rwlock_race.stderr.exp \
sem_as_mutex.vgtest sem_as_mutex.stderr.exp \
sem_as_mutex2.vgtest sem_as_mutex2.stderr.exp \
sigalrm.vgtest \
pth_cond_race \
pth_create_chain \
pth_detached \
- sem_as_mutex \
recursive_mutex \
+ rwlock_race \
+ sem_as_mutex \
sigalrm \
tc01_simple_race \
tc02_simple_tls \
recursive_mutex_SOURCES = recursive_mutex.c
recursive_mutex_LDADD = -lpthread
+rwlock_race_SOURCES = rwlock_race.c
+rwlock_race_LDADD = -lpthread
+
sem_as_mutex_SOURCES = sem_as_mutex.c
sem_as_mutex_LDADD = -lpthread
--- /dev/null
+/** Cause a race inside code protected by a reader lock.
+ */
+
+
+/* Needed for older glibc's (2.3 and older, at least) who don't
+ otherwise "know" about pthread_rwlock_anything or about
+ PTHREAD_MUTEX_RECURSIVE (amongst things). */
+
+#define _GNU_SOURCE 1
+
+#include <stdio.h>
+#include <pthread.h>
+#include "../drd_clientreq.h"
+
+
+static pthread_rwlock_t s_rwlock;
+static int s_racy;
+
+static void* thread(void* arg)
+{
+ pthread_rwlock_rdlock(&s_rwlock);
+ s_racy++;
+ pthread_rwlock_unlock(&s_rwlock);
+ return 0;
+}
+
+int main(int argc, char** argv)
+{
+ pthread_t thread1;
+ pthread_t thread2;
+
+#if 0
+ int res;
+ VALGRIND_DO_CLIENT_REQUEST(res, 0, VG_USERREQ__DRD_TRACE_ADDR,
+ &s_racy, 0, 0, 0, 0);
+#endif
+
+ pthread_rwlock_init(&s_rwlock, 0);
+ pthread_create(&thread1, 0, thread, 0);
+ pthread_create(&thread2, 0, thread, 0);
+ pthread_join(thread1, 0);
+ pthread_join(thread2, 0);
+ pthread_rwlock_destroy(&s_rwlock);
+
+ fprintf(stderr, "Result: %d\n", s_racy);
+
+ return 0;
+}
--- /dev/null
+
+Thread 2:
+Conflicting load by thread 2 at 0x........ size 4
+ at 0x........: thread (rwlock_race.c:?)
+ by 0x........: vg_thread_wrapper (drd_intercepts.c:?)
+ by 0x........: start_thread (in libpthread-?.?.so)
+ by 0x........: clone (in /...libc...)
+Allocation context: unknown
+Other segment start (thread 2)
+ (thread finished, call stack no longer available)
+Other segment end (thread 2)
+ (thread finished, call stack no longer available)
+
+Conflicting store by thread 2 at 0x........ size 4
+ at 0x........: thread (rwlock_race.c:?)
+ by 0x........: vg_thread_wrapper (drd_intercepts.c:?)
+ by 0x........: start_thread (in libpthread-?.?.so)
+ by 0x........: clone (in /...libc...)
+Allocation context: unknown
+Other segment start (thread 2)
+ (thread finished, call stack no longer available)
+Other segment end (thread 2)
+ (thread finished, call stack no longer available)
+Result: 2
+
+ERROR SUMMARY: 2 errors from 2 contexts (suppressed: 0 from 0)
--- /dev/null
+prog: rwlock_race
-ERROR SUMMARY: 2 errors from 2 contexts
+ERROR SUMMARY: 4 errors from 4 contexts
+Thread 2:
+Conflicting load by thread 2 at 0x........ size 4
+ at 0x........: vfprintf (in /...libc...)
+ by 0x........: printf (in /...libc...)
+ by 0x........: child (tc21_pthonce.c:73)
+ by 0x........: vg_thread_wrapper (drd_intercepts.c:?)
+ by 0x........: start_thread (in libpthread-?.?.so)
+ by 0x........: clone (in /...libc...)
+Allocation context: unknown
+Other segment start (thread 1)
+ at 0x........: clone (in /...libc...)
+ by 0x........: do_clone (in libpthread-?.?.so)
+ by 0x........: pthread_create@@GLIBC_2.2.5 (in libpthread-?.?.so)
+ by 0x........: pthread_create* (drd_intercepts.c:?)
+ by 0x........: main (tc21_pthonce.c:86)
+Other segment end (thread 1)
+ at 0x........: clone (in /...libc...)
+ by 0x........: do_clone (in libpthread-?.?.so)
+ by 0x........: pthread_create@@GLIBC_2.2.5 (in libpthread-?.?.so)
+ by 0x........: pthread_create* (drd_intercepts.c:?)
+ by 0x........: main (tc21_pthonce.c:86)
-ERROR SUMMARY: 0 errors from 0 contexts (suppressed: 0 from 0)
+Conflicting load by thread 2 at 0x........ size 4
+ at 0x........: vfprintf (in /...libc...)
+ by 0x........: printf (in /...libc...)
+ by 0x........: child (tc21_pthonce.c:73)
+ by 0x........: vg_thread_wrapper (drd_intercepts.c:?)
+ by 0x........: start_thread (in libpthread-?.?.so)
+ by 0x........: clone (in /...libc...)
+Allocation context: unknown
+Other segment start (thread 1)
+ at 0x........: clone (in /...libc...)
+ by 0x........: do_clone (in libpthread-?.?.so)
+ by 0x........: pthread_create@@GLIBC_2.2.5 (in libpthread-?.?.so)
+ by 0x........: pthread_create* (drd_intercepts.c:?)
+ by 0x........: main (tc21_pthonce.c:86)
+Other segment end (thread 1)
+ at 0x........: clone (in /...libc...)
+ by 0x........: do_clone (in libpthread-?.?.so)
+ by 0x........: pthread_create@@GLIBC_2.2.5 (in libpthread-?.?.so)
+ by 0x........: pthread_create* (drd_intercepts.c:?)
+ by 0x........: main (tc21_pthonce.c:86)
+
+ERROR SUMMARY: 2 errors from 2 contexts (suppressed: 0 from 0)
fun:pthread_create@@GLIBC_*
fun:pthread_create*
}
+{
+ pthread
+ exp-drd:ConflictingAccess
+ fun:clone
+ fun:do_clone
+ fun:pthread_create@@GLIBC_*
+ fun:pthread_create*
+}
{
pthread-glibc2.7-pthread_create
exp-drd:ConflictingAccess
fun:pthread_mutex_lock
fun:pthread_mutex_lock
}
+{
+ pthread
+ exp-drd:ConflictingAccess
+ fun:__pthread_mutex_cond_lock
+ fun:pthread_cond_wait@@GLIBC_*
+ fun:pthread_cond_wait*
+}
{
pthread
exp-drd:ConflictingAccess