#define __DRD_CLIENTOBJ_H
-#include "drd_clientreq.h" /* MutexT */
-#include "drd_thread.h" /* DrdThreadId */
+#include "drd_clientreq.h" /* MutexT */
+#include "drd_thread.h" /* DrdThreadId */
#include "pub_tool_basics.h"
+#include "pub_tool_execontext.h" /* ExeContext */
#include "pub_tool_oset.h"
int recursion_count; // 0 if free, >= 1 if locked.
DrdThreadId owner; // owner if locked, last owner if free.
Segment* last_locked_segment;
+ ULong acquiry_time_ms;
+ ExeContext* acquired_at;
};
struct cond_info
struct barrier_info
{
- Addr a1;
- ObjType type;
- void (*cleanup)(union drd_clientobj*);
+ Addr a1;
+ ObjType type;
+ void (*cleanup)(union drd_clientobj*);
BarrierT barrier_type; // pthread_barrier or gomp_barrier.
Word count; // Participant count in a barrier wait.
Word pre_iteration; // pthread_barrier_wait() call count modulo two.
struct rwlock_info
{
- Addr a1;
- ObjType type;
- void (*cleanup)(union drd_clientobj*);
- OSet* thread_info;
+ Addr a1;
+ ObjType type;
+ void (*cleanup)(union drd_clientobj*);
+ OSet* thread_info;
+ ULong acquiry_time_ms;
+ ExeContext* acquired_at;
};
typedef union drd_clientobj
VG_(pp_ExeContext)(VG_(get_error_where)(e));
break;
}
+ case HoldtimeErr: {
+ HoldtimeErrInfo* p =(HoldtimeErrInfo*)(VG_(get_error_extra)(e));
+ tl_assert(p);
+ tl_assert(p->acquired_at);
+ VG_(message)(Vg_UserMsg, "Acquired at:");
+ VG_(pp_ExeContext)(p->acquired_at);
+ VG_(message)(Vg_UserMsg,
+ "Lock on %s 0x%lx was held during %d ms (threshold: %d ms).",
+ VG_(get_error_string)(e),
+ p->synchronization_object,
+ p->hold_time_ms,
+ p->threshold_ms);
+ VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ break;
+ }
case GenericErr: {
//GenericErrInfo* gei =(GenericErrInfo*)(VG_(get_error_extra)(e));
VG_(message)(Vg_UserMsg, "%s", VG_(get_error_string)(e));
return sizeof(BarrierErrInfo);
case RwlockErr:
return sizeof(RwlockErrInfo);
+ case HoldtimeErr:
+ return sizeof(HoldtimeErrInfo);
case GenericErr:
return sizeof(GenericErrInfo);
default:
;
else if (VG_(strcmp)(name, STR_RwlockErr) == 0)
;
+ else if (VG_(strcmp)(name, STR_HoldtimeErr) == 0)
+ ;
else if (VG_(strcmp)(name, STR_GenericErr) == 0)
;
else
case SemaphoreErr: return VGAPPEND(STR_, SemaphoreErr);
case BarrierErr: return VGAPPEND(STR_, BarrierErr);
case RwlockErr: return VGAPPEND(STR_, RwlockErr);
+ case HoldtimeErr: return VGAPPEND(STR_, HoldtimeErr);
case GenericErr: return VGAPPEND(STR_, GenericErr);
default:
tl_assert(0);
BarrierErr = 7,
#define STR_RwlockErr "RwlockErr"
RwlockErr = 8,
+#define STR_HoldtimeErr "HoldtimeErr"
+ HoldtimeErr = 9,
#define STR_GenericErr "GenericErr"
- GenericErr = 9,
+ GenericErr = 10,
} DrdErrorKind;
/* The classification of a faulting address. */
Addr rwlock;
} RwlockErrInfo;
+typedef struct {
+ Addr synchronization_object;
+ ExeContext* acquired_at;
+ UInt hold_time_ms;
+ UInt threshold_ms;
+} HoldtimeErrInfo;
+
typedef struct {
} GenericErrInfo;
static Bool drd_process_cmd_line_option(Char* arg)
{
- int segment_merging = -1;
- int show_confl_seg = -1;
- int trace_barrier = -1;
- int trace_clientobj = -1;
- int trace_cond = -1;
- int trace_csw = -1;
- int trace_danger_set = -1;
- int trace_mutex = -1;
- int trace_rwlock = -1;
- int trace_segment = -1;
- int trace_semaphore = -1;
- int trace_suppression = -1;
- Char* trace_address = 0;
-
- VG_BOOL_CLO (arg, "--check-stack-var", s_drd_check_stack_var)
- else VG_BOOL_CLO(arg, "--drd-stats", s_drd_print_stats)
- else VG_BOOL_CLO(arg, "--segment-merging", segment_merging)
- else VG_BOOL_CLO(arg, "--show-confl-seg", show_confl_seg)
- else VG_BOOL_CLO(arg, "--show-stack-usage", s_show_stack_usage)
- else VG_BOOL_CLO(arg, "--trace-barrier", trace_barrier)
- else VG_BOOL_CLO(arg, "--trace-clientobj", trace_clientobj)
- else VG_BOOL_CLO(arg, "--trace-cond", trace_cond)
- else VG_BOOL_CLO(arg, "--trace-csw", trace_csw)
- else VG_BOOL_CLO(arg, "--trace-danger-set", trace_danger_set)
- else VG_BOOL_CLO(arg, "--trace-fork-join", s_drd_trace_fork_join)
- else VG_BOOL_CLO(arg, "--trace-mutex", trace_mutex)
- else VG_BOOL_CLO(arg, "--trace-rwlock", trace_rwlock)
- else VG_BOOL_CLO(arg, "--trace-segment", trace_segment)
- else VG_BOOL_CLO(arg, "--trace-semaphore", trace_semaphore)
- else VG_BOOL_CLO(arg, "--trace-suppr", trace_suppression)
- else VG_BOOL_CLO(arg, "--var-info", s_drd_var_info)
- else VG_STR_CLO (arg, "--trace-addr", trace_address)
+ int exclusive_threshold_ms = -1;
+ int segment_merging = -1;
+ int shared_threshold_ms = -1;
+ int show_confl_seg = -1;
+ int trace_barrier = -1;
+ int trace_clientobj = -1;
+ int trace_cond = -1;
+ int trace_csw = -1;
+ int trace_danger_set = -1;
+ int trace_mutex = -1;
+ int trace_rwlock = -1;
+ int trace_segment = -1;
+ int trace_semaphore = -1;
+ int trace_suppression = -1;
+ Char* trace_address = 0;
+
+ VG_BOOL_CLO (arg, "--check-stack-var", s_drd_check_stack_var)
+ else VG_BOOL_CLO(arg, "--drd-stats", s_drd_print_stats)
+ else VG_BOOL_CLO(arg, "--segment-merging", segment_merging)
+ else VG_BOOL_CLO(arg, "--show-confl-seg", show_confl_seg)
+ else VG_BOOL_CLO(arg, "--show-stack-usage", s_show_stack_usage)
+ else VG_BOOL_CLO(arg, "--trace-barrier", trace_barrier)
+ else VG_BOOL_CLO(arg, "--trace-clientobj", trace_clientobj)
+ else VG_BOOL_CLO(arg, "--trace-cond", trace_cond)
+ else VG_BOOL_CLO(arg, "--trace-csw", trace_csw)
+ else VG_BOOL_CLO(arg, "--trace-danger-set", trace_danger_set)
+ else VG_BOOL_CLO(arg, "--trace-fork-join", s_drd_trace_fork_join)
+ else VG_BOOL_CLO(arg, "--trace-mutex", trace_mutex)
+ else VG_BOOL_CLO(arg, "--trace-rwlock", trace_rwlock)
+ else VG_BOOL_CLO(arg, "--trace-segment", trace_segment)
+ else VG_BOOL_CLO(arg, "--trace-semaphore", trace_semaphore)
+ else VG_BOOL_CLO(arg, "--trace-suppr", trace_suppression)
+ else VG_BOOL_CLO(arg, "--var-info", s_drd_var_info)
+ else VG_NUM_CLO (arg, "--exclusive-threshold", exclusive_threshold_ms)
+ else VG_NUM_CLO (arg, "--shared-threshold", shared_threshold_ms)
+ else VG_STR_CLO (arg, "--trace-addr", trace_address)
else
return VG_(replacement_malloc_process_cmd_line_option)(arg);
+ if (exclusive_threshold_ms != -1)
+ {
+ mutex_set_lock_threshold(exclusive_threshold_ms);
+ rwlock_set_exclusive_threshold(exclusive_threshold_ms);
+ }
+ if (shared_threshold_ms != -1)
+ {
+ rwlock_set_shared_threshold(shared_threshold_ms);
+ }
if (segment_merging != -1)
thread_set_segment_merging(segment_merging);
if (show_confl_seg != -1)
VG_(printf)(
" --check-stack-var=yes|no Whether or not to report data races on\n"
" stack variables [no].\n"
+" --exclusive-threshold=<n> Print an error message if any mutex or\n"
+" writer lock is held longer than the specified time (in milliseconds).\n"
" --segment-merging=yes|no Controls segment merging [yes].\n"
" Segment merging is an algorithm to limit memory usage of the\n"
" data race detection algorithm. Disabling segment merging may\n"
" improve the accuracy of the so-called 'other segments' displayed\n"
" in race reports but can also trigger an out of memory error.\n"
+" --shared-threshold=<n> Print an error message if a reader lock\n"
+" is held longer than the specified time (in milliseconds).\n"
" --show-confl-seg=yes|no Show conflicting segments in race reports [yes].\n"
" --show-stack-usage=yes|no Print stack usage at thread exit time [no].\n"
" --var-info=yes|no Display the names of global, static and\n"
#include "drd_error.h"
#include "drd_mutex.h"
#include "priv_drd_clientreq.h"
+#include "pub_tool_vki.h"
#include "pub_tool_errormgr.h" // VG_(maybe_record_error)()
#include "pub_tool_libcassert.h" // tl_assert()
#include "pub_tool_libcbase.h" // VG_(strlen)
#include "pub_tool_libcprint.h" // VG_(message)()
+#include "pub_tool_libcproc.h" // VG_(read_millisecond_timer)()
#include "pub_tool_machine.h" // VG_(get_IP)()
#include "pub_tool_threadstate.h" // VG_(get_running_tid)()
static Bool s_trace_mutex;
static ULong s_mutex_lock_count;
static ULong s_mutex_segment_creation_count;
+static UInt s_mutex_lock_threshold_ms = 1000 * 1000;
// Function definitions.
s_trace_mutex = trace_mutex;
}
+void mutex_set_lock_threshold(const UInt lock_threshold_ms)
+{
+ s_mutex_lock_threshold_ms = lock_threshold_ms;
+}
+
static
void mutex_initialize(struct mutex_info* const p,
const Addr mutex, const MutexT mutex_type)
p->recursion_count = 0;
p->owner = DRD_INVALID_THREADID;
p->last_locked_segment = 0;
+ p->acquiry_time_ms = 0;
+ p->acquired_at = 0;
}
/** Deallocate the memory that was allocated by mutex_initialize(). */
thread_new_segment(drd_tid);
s_mutex_segment_creation_count++;
- p->owner = drd_tid;
+ p->owner = drd_tid;
+ p->acquiry_time_ms = VG_(read_millisecond_timer)();
+ p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
s_mutex_lock_count++;
}
else if (p->owner != drd_tid)
p->recursion_count++;
}
-/**
- * Update mutex_info state when unlocking the pthread_mutex_t mutex.
- * Note: this function must be called before pthread_mutex_unlock() is called,
- * or a race condition is triggered !
- * @return New value of the mutex recursion count.
- * @param mutex Pointer to pthread_mutex_t data structure in the client space.
- * @param tid ThreadId of the thread calling pthread_mutex_unlock().
- * @param vc Pointer to the current vector clock of thread tid.
+/** Update mutex_info state when unlocking the pthread_mutex_t mutex.
+ *
+ * @param mutex Pointer to pthread_mutex_t data structure in the client space.
+ * @param tid ThreadId of the thread calling pthread_mutex_unlock().
+ * @param vc Pointer to the current vector clock of thread tid.
+ *
+ * @return New value of the mutex recursion count.
+ *
+ * @note This function must be called before pthread_mutex_unlock() is called,
+ * or a race condition is triggered !
*/
void mutex_unlock(const Addr mutex, const MutexT mutex_type)
{
if (p->recursion_count == 0)
{
+ if (s_mutex_lock_threshold_ms > 0)
+ {
+ ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
+ if (held > s_mutex_lock_threshold_ms)
+ {
+ HoldtimeErrInfo HEI
+ = { mutex, p->acquired_at, held, s_mutex_lock_threshold_ms };
+ VG_(maybe_record_error)(vg_tid,
+ HoldtimeErr,
+ VG_(get_IP)(vg_tid),
+ "mutex",
+ &HEI);
+ }
+ }
+
/* This pthread_mutex_unlock() call really unlocks the mutex. Save the */
/* current vector clock of the thread such that it is available when */
/* this mutex is locked again. */
thread_get_latest_segment(&p->last_locked_segment, drd_tid);
thread_new_segment(drd_tid);
+ p->acquired_at = 0;
s_mutex_segment_creation_count++;
}
}
void mutex_set_trace(const Bool trace_mutex);
+void mutex_set_lock_threshold(const UInt lock_threshold_ms);
struct mutex_info* mutex_init(const Addr mutex,
const MutexT mutex_type);
void mutex_post_destroy(const Addr mutex);
#include "drd_error.h"
#include "drd_rwlock.h"
#include "priv_drd_clientreq.h"
+#include "pub_tool_vki.h"
#include "pub_tool_errormgr.h" // VG_(maybe_record_error)()
#include "pub_tool_libcassert.h" // tl_assert()
#include "pub_tool_libcprint.h" // VG_(message)()
+#include "pub_tool_libcproc.h" // VG_(read_millisecond_timer)()
#include "pub_tool_machine.h" // VG_(get_IP)()
#include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
#include "pub_tool_threadstate.h" // VG_(get_running_tid)()
// Local variables.
static Bool s_trace_rwlock;
+static UInt s_exclusive_threshold_ms;
+static UInt s_shared_threshold_ms;
// Function definitions.
s_trace_rwlock = trace_rwlock;
}
+void rwlock_set_exclusive_threshold(const UInt exclusive_threshold_ms)
+{
+ s_exclusive_threshold_ms = exclusive_threshold_ms;
+}
+
+void rwlock_set_shared_threshold(const UInt shared_threshold_ms)
+{
+ s_shared_threshold_ms = shared_threshold_ms;
+}
+
static Bool rwlock_is_rdlocked(struct rwlock_info* p)
{
struct rwlock_thread_info* q;
tl_assert(p->a1 == rwlock);
tl_assert(p->type == ClientRwlock);
- p->cleanup = (void(*)(DrdClientobj*))&rwlock_cleanup;
- p->thread_info = VG_(OSetGen_Create)(0, 0, VG_(malloc), VG_(free));
+ p->cleanup = (void(*)(DrdClientobj*))&rwlock_cleanup;
+ p->thread_info = VG_(OSetGen_Create)(0, 0, VG_(malloc), VG_(free));
+ p->acquiry_time_ms = 0;
+ p->acquired_at = 0;
}
/** Deallocate the memory that was allocated by rwlock_initialize(). */
q->last_lock_was_writer_lock = False;
thread_new_segment(drd_tid);
s_rwlock_segment_creation_count++;
+
+ p->acquiry_time_ms = VG_(read_millisecond_timer)();
+ p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
}
}
rwlock_combine_other_vc(p, drd_tid, True);
thread_new_segment(drd_tid);
s_rwlock_segment_creation_count++;
+ p->acquiry_time_ms = VG_(read_millisecond_timer)();
+ p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
}
/**
q = lookup_or_insert_node(p->thread_info, drd_tid);
tl_assert(q);
if (q->reader_nesting_count > 0)
+ {
q->reader_nesting_count--;
+ if (q->reader_nesting_count == 0 && s_shared_threshold_ms > 0)
+ {
+ ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
+ if (held > s_shared_threshold_ms)
+ {
+ HoldtimeErrInfo HEI
+ = { rwlock, p->acquired_at, held, s_shared_threshold_ms };
+ VG_(maybe_record_error)(vg_tid,
+ HoldtimeErr,
+ VG_(get_IP)(vg_tid),
+ "rwlock",
+ &HEI);
+ }
+ }
+ }
else if (q->writer_nesting_count > 0)
+ {
q->writer_nesting_count--;
+ if (q->writer_nesting_count == 0 && s_exclusive_threshold_ms > 0)
+ {
+ ULong held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
+ if (held > s_exclusive_threshold_ms)
+ {
+ HoldtimeErrInfo HEI
+ = { rwlock, p->acquired_at, held, s_exclusive_threshold_ms };
+ VG_(maybe_record_error)(vg_tid,
+ HoldtimeErr,
+ VG_(get_IP)(vg_tid),
+ "rwlock",
+ &HEI);
+ }
+ }
+ }
else
+ {
tl_assert(False);
+ }
if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0)
{
void rwlock_set_trace(const Bool trace_rwlock);
+void rwlock_set_exclusive_threshold(const UInt exclusive_threshold_ms);
+void rwlock_set_shared_threshold(const UInt shared_threshold_ms);
struct rwlock_info* rwlock_pre_init(const Addr rwlock);
void rwlock_post_destroy(const Addr rwlock);
void rwlock_pre_rdlock(const Addr rwlock);