* Record an access of type access_type at addresses a .. a + size - 1 in
* bitmap bm.
*/
-static inline
+static
void bm_access_range(struct bitmap* const bm,
const Addr a1, const Addr a2,
const BmAccessTypeT access_type)
}
}
+static inline
+void bm_access_aligned_load(struct bitmap* const bm,
+ const Addr a1, const Addr a2)
+{
+ struct bitmap2* bm2;
+
+#if 0
+ /* Commented out the statements below because of performance reasons. */
+ tl_assert(bm);
+ tl_assert(a1 < a2);
+ tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
+ || (a2 - a1) == 4 || (a2 - a1) == 8);
+ tl_assert((a1 & (a2 - a1 - 1)) == 0);
+#endif
+
+ bm2 = bm2_lookup_or_insert(bm, a1 >> ADDR0_BITS);
+ tl_assert(bm2);
+
+ bm0_set_range(bm2->bm1.bm0_r, a1 & ADDR0_MASK, (a2 - 1) & ADDR0_MASK);
+}
+
+static inline
+void bm_access_aligned_store(struct bitmap* const bm,
+ const Addr a1, const Addr a2)
+{
+ struct bitmap2* bm2;
+
+#if 0
+ /* Commented out the statements below because of performance reasons. */
+ tl_assert(bm);
+ tl_assert(a1 < a2);
+ tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
+ || (a2 - a1) == 4 || (a2 - a1) == 8);
+ tl_assert((a1 & (a2 - a1 - 1)) == 0);
+#endif
+
+ bm2 = bm2_lookup_or_insert(bm, a1 >> ADDR0_BITS);
+ tl_assert(bm2);
+
+ bm0_set_range(bm2->bm1.bm0_w, a1 & ADDR0_MASK, (a2 - 1) & ADDR0_MASK);
+}
+
void bm_access_range_load(struct bitmap* const bm,
const Addr a1, const Addr a2)
{
bm_access_range(bm, a1, a2, eLoad);
}
+void bm_access_load_1(struct bitmap* const bm, const Addr a1)
+{
+ bm_access_aligned_load(bm, a1, a1 + 1);
+}
+
+void bm_access_load_2(struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 1) == 0)
+ bm_access_aligned_load(bm, a1, a1 + 2);
+ else
+ bm_access_range(bm, a1, a1 + 2, eLoad);
+}
+
+void bm_access_load_4(struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 3) == 0)
+ bm_access_aligned_load(bm, a1, a1 + 4);
+ else
+ bm_access_range(bm, a1, a1 + 4, eLoad);
+}
+
+void bm_access_load_8(struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 7) == 0)
+ bm_access_aligned_load(bm, a1, a1 + 8);
+ else if ((a1 & 3) == 0)
+ {
+ bm_access_aligned_load(bm, a1 + 0, a1 + 4);
+ bm_access_aligned_load(bm, a1 + 4, a1 + 8);
+ }
+ else
+ bm_access_range(bm, a1, a1 + 8, eLoad);
+}
+
+void bm_access_store_1(struct bitmap* const bm, const Addr a1)
+{
+ bm_access_aligned_store(bm, a1, a1 + 1);
+}
+
+void bm_access_store_2(struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 1) == 0)
+ bm_access_aligned_store(bm, a1, a1 + 2);
+ else
+ bm_access_range(bm, a1, a1 + 2, eStore);
+}
+
+void bm_access_store_4(struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 3) == 0)
+ bm_access_aligned_store(bm, a1, a1 + 4);
+ else
+ bm_access_range(bm, a1, a1 + 4, eStore);
+}
+
+void bm_access_store_8(struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 7) == 0)
+ bm_access_aligned_store(bm, a1, a1 + 8);
+ else if ((a1 & 3) == 0)
+ {
+ bm_access_aligned_store(bm, a1 + 0, a1 + 4);
+ bm_access_aligned_store(bm, a1 + 4, a1 + 8);
+ }
+ else
+ bm_access_range(bm, a1, a1 + 8, eStore);
+}
+
void bm_access_range_store(struct bitmap* const bm,
const Addr a1, const Addr a2)
{
UWord mask;
#if 0
- // Commented out the assert statements below because of performance reasons.
+ /* Commented out the statements below because of performance reasons. */
tl_assert(a1);
tl_assert(a1 <= a2);
tl_assert(UWORD_MSB(a1) == UWORD_MSB(a2)
}
}
-inline
Bool bm_has_conflict_with(const struct bitmap* const bm,
const Addr a1, const Addr a2,
const BmAccessTypeT access_type)
return False;
}
+static inline
+Bool bm_aligned_load_has_conflict_with(const struct bitmap* const bm,
+ const Addr a1, const Addr a2)
+{
+ struct bitmap2* bm2;
+
+#if 0
+ /* Commented out the statements below because of performance reasons. */
+ tl_assert(bm);
+ tl_assert(a1 < a2);
+ tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
+ || (a2 - a1) == 4 || (a2 - a1) == 8);
+ tl_assert((a1 & (a2 - a1 - 1)) == 0);
+#endif
+
+ bm2 = bm_lookup(bm, a1);
+
+ if (bm2
+ && bm0_is_any_set(bm2->bm1.bm0_w, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK))
+ {
+ return True;
+ }
+ return False;
+}
+
+static inline
+Bool bm_aligned_store_has_conflict_with(const struct bitmap* const bm,
+ const Addr a1, const Addr a2)
+{
+ struct bitmap2* bm2;
+
+#if 0
+ /* Commented out the statements below because of performance reasons. */
+ tl_assert(bm);
+ tl_assert(a1 < a2);
+ tl_assert((a2 - a1) == 1 || (a2 - a1) == 2
+ || (a2 - a1) == 4 || (a2 - a1) == 8);
+ tl_assert((a1 & (a2 - a1 - 1)) == 0);
+#endif
+
+ bm2 = bm_lookup(bm, a1);
+
+ if (bm2)
+ {
+ const struct bitmap1* const p1 = &bm2->bm1;
+
+ if (bm0_is_any_set(p1->bm0_r, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK)
+ | bm0_is_any_set(p1->bm0_w, a1 & ADDR0_MASK, (a2-1) & ADDR0_MASK))
+ {
+ return True;
+ }
+ }
+ return False;
+}
+
Bool bm_load_has_conflict_with(const struct bitmap* const bm,
const Addr a1, const Addr a2)
{
return bm_has_conflict_with(bm, a1, a2, eLoad);
}
+Bool bm_load_1_has_conflict_with(const struct bitmap* const bm, const Addr a1)
+{
+ return bm_aligned_load_has_conflict_with(bm, a1, a1 + 1);
+}
+
+Bool bm_load_2_has_conflict_with(const struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 1) == 0)
+ return bm_aligned_load_has_conflict_with(bm, a1, a1 + 2);
+ else
+ return bm_has_conflict_with(bm, a1, a1 + 2, eLoad);
+}
+
+Bool bm_load_4_has_conflict_with(const struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 3) == 0)
+ return bm_aligned_load_has_conflict_with(bm, a1, a1 + 4);
+ else
+ return bm_has_conflict_with(bm, a1, a1 + 4, eLoad);
+}
+
+Bool bm_load_8_has_conflict_with(const struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 7) == 0)
+ return bm_aligned_load_has_conflict_with(bm, a1, a1 + 8);
+ else
+ return bm_has_conflict_with(bm, a1, a1 + 8, eLoad);
+}
+
+Bool bm_store_1_has_conflict_with(const struct bitmap* const bm, const Addr a1)
+{
+ return bm_aligned_store_has_conflict_with(bm, a1, a1 + 1);
+}
+
+Bool bm_store_2_has_conflict_with(const struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 1) == 0)
+ return bm_aligned_store_has_conflict_with(bm, a1, a1 + 2);
+ else
+ return bm_has_conflict_with(bm, a1, a1 + 2, eStore);
+}
+
+Bool bm_store_4_has_conflict_with(const struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 3) == 0)
+ return bm_aligned_store_has_conflict_with(bm, a1, a1 + 4);
+ else
+ return bm_has_conflict_with(bm, a1, a1 + 4, eStore);
+}
+
+Bool bm_store_8_has_conflict_with(const struct bitmap* const bm, const Addr a1)
+{
+ if ((a1 & 7) == 0)
+ return bm_aligned_store_has_conflict_with(bm, a1, a1 + 8);
+ else
+ return bm_has_conflict_with(bm, a1, a1 + 8, eStore);
+}
+
Bool bm_store_has_conflict_with(const struct bitmap* const bm,
const Addr a1, const Addr a2)
{
#define ADDR0_BITS 12
-#define ADDR0_COUNT (1UL << ADDR0_BITS)
+#define ADDR0_COUNT ((UWord)1 << ADDR0_BITS)
#define ADDR0_MASK (ADDR0_COUNT - 1)
static __inline__ UWord bm0_mask(const Addr a)
{
- return (1UL << UWORD_LSB(a));
+ return ((UWord)1 << UWORD_LSB(a));
}
static __inline__ void bm0_set(UWord* bm0, const Addr a)
{
//tl_assert(a < ADDR0_COUNT);
- bm0[a >> BITS_PER_BITS_PER_UWORD] |= 1UL << UWORD_LSB(a);
+ bm0[a >> BITS_PER_BITS_PER_UWORD] |= (UWord)1 << UWORD_LSB(a);
+}
+
+/** Set all of the addresses in range a1..a2 (inclusive) in bitmap bm0. */
+static __inline__ void bm0_set_range(UWord* bm0, const Addr a1, const Addr a2)
+{
+#if 0
+ tl_assert(a1 < ADDR0_COUNT);
+ tl_assert(a2 < ADDR0_COUNT);
+ tl_assert(a1 <= a2);
+ tl_assert(UWORD_MSB(a1) == UWORD_MSB(a2));
+#endif
+ bm0[a1 >> BITS_PER_BITS_PER_UWORD]
+ |= ((UWord)2 << UWORD_LSB(a2)) - ((UWord)1 << UWORD_LSB(a1));
}
static __inline__ void bm0_clear(UWord* bm0, const Addr a)
{
//tl_assert(a < ADDR0_COUNT);
- bm0[a >> BITS_PER_BITS_PER_UWORD] &= ~(1UL << UWORD_LSB(a));
+ bm0[a >> BITS_PER_BITS_PER_UWORD] &= ~((UWord)1 << UWORD_LSB(a));
}
static __inline__ UWord bm0_is_set(const UWord* bm0, const Addr a)
{
//tl_assert(a < ADDR0_COUNT);
- return (bm0[a >> BITS_PER_BITS_PER_UWORD] & (1UL << UWORD_LSB(a)));
+ return (bm0[a >> BITS_PER_BITS_PER_UWORD] & ((UWord)1 << UWORD_LSB(a)));
}
+/** Return true if any of the bits a1..a2 (inclusive) are set in bm0. */
+static __inline__ UWord bm0_is_any_set(const UWord* bm0,
+ const Addr a1, const Addr a2)
+{
+#if 0
+ tl_assert(a1 < ADDR0_COUNT);
+ tl_assert(a2 < ADDR0_COUNT);
+ tl_assert(a1 <= a2);
+ tl_assert(UWORD_MSB(a1) == UWORD_MSB(a2));
+#endif
+ return (bm0[a1 >> BITS_PER_BITS_PER_UWORD]
+ & (((UWord)2 << UWORD_LSB(a2)) - ((UWord)1 << UWORD_LSB(a1))));
+}
struct bitmap2
{
/* Complete bitmap. */
struct bitmap
{
- OSet* oset;
+ Addr last_lookup_a1;
+ struct bitmap2* last_lookup_result;
+ OSet* oset;
};
static __inline__
struct bitmap2* bm_lookup(const struct bitmap* const bm, const Addr a)
{
+ struct bitmap2* result;
const UWord a1 = a >> ADDR0_BITS;
- return VG_(OSetGen_Lookup)(bm->oset, &a1);
+ if (a1 == bm->last_lookup_a1)
+ {
+ //tl_assert(bm->last_lookup_result == VG_(OSetGen_Lookup)(bm->oset, &a1));
+ return bm->last_lookup_result;
+ }
+ result = VG_(OSetGen_Lookup)(bm->oset,&a1);
+ if (result)
+ {
+ ((struct bitmap*)bm)->last_lookup_a1 = a1;
+ ((struct bitmap*)bm)->last_lookup_result = result;
+ }
+ return result;
}
static __inline__
struct bitmap2* bm2_insert(const struct bitmap* const bm,
const UWord a1)
{
- struct bitmap2* const node = VG_(OSetGen_AllocNode)(bm->oset, sizeof(*node));
- node->addr = a1;
- VG_(memset)(&node->bm1, 0, sizeof(node->bm1));
- VG_(OSetGen_Insert)(bm->oset, node);
-
- s_bitmap2_creation_count++;
-
- return node;
+ struct bitmap2* const node = VG_(OSetGen_AllocNode)(bm->oset, sizeof(*node));
+ node->addr = a1;
+ VG_(memset)(&node->bm1, 0, sizeof(node->bm1));
+ VG_(OSetGen_Insert)(bm->oset, node);
+
+ ((struct bitmap*)bm)->last_lookup_a1 = a1;
+ ((struct bitmap*)bm)->last_lookup_result = node;
+
+ s_bitmap2_creation_count++;
+
+ return node;
}
static __inline__
struct bitmap2* bm2_lookup_or_insert(const struct bitmap* const bm,
const UWord a1)
{
- struct bitmap2* p2 = VG_(OSetGen_Lookup)(bm->oset, &a1);
- if (p2 == 0)
- {
- p2 = bm2_insert(bm, a1);
- }
- return p2;
+ struct bitmap2* p2;
+
+ if (a1 == bm->last_lookup_a1)
+ {
+ //tl_assert(bm->last_lookup_result == VG_(OSetGen_Lookup)(bm->oset, &a1));
+ return bm->last_lookup_result;
+ }
+
+ p2 = VG_(OSetGen_Lookup)(bm->oset, &a1);
+ if (p2 == 0)
+ {
+ p2 = bm2_insert(bm, a1);
+ }
+ ((struct bitmap*)bm)->last_lookup_a1 = a1;
+ ((struct bitmap*)bm)->last_lookup_result = p2;
+ return p2;
}
// Implements the thread-related core callbacks.
//
-static
-VG_REGPARM(2) void drd_trace_load(Addr addr, SizeT size)
+static void drd_trace_mem_access(const Addr addr, const SizeT size,
+ const BmAccessTypeT access_type)
+{
+ char vc[80];
+ vc_snprint(vc, sizeof(vc), thread_get_vc(thread_get_running_tid()));
+ VG_(message)(Vg_UserMsg,
+ "%s 0x%lx size %ld %s (vg %d / drd %d / vc %s)",
+ access_type == eLoad ? "load " : "store",
+ addr,
+ size,
+ thread_get_name(thread_get_running_tid()),
+ VG_(get_running_tid)(),
+ thread_get_running_tid(),
+ vc);
+ VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
+ VG_(clo_backtrace_size));
+ tl_assert(DrdThreadIdToVgThreadId(thread_get_running_tid())
+ == VG_(get_running_tid)());
+}
+
+static void drd_report_race(const Addr addr, const SizeT size,
+ const BmAccessTypeT access_type)
+{
+ DataRaceErrInfo drei;
+ drei.tid = VG_(get_running_tid)();
+ drei.addr = addr;
+ drei.size = size;
+ drei.access_type = access_type;
+ VG_(maybe_record_error)(VG_(get_running_tid)(),
+ DataRaceErr,
+ VG_(get_IP)(VG_(get_running_tid)()),
+ "Conflicting accesses",
+ &drei);
+}
+
+static VG_REGPARM(2) void drd_trace_load(Addr addr, SizeT size)
{
Segment* sg;
#if 0
+ /* The assert below has been commented out because of performance reasons.*/
tl_assert(thread_get_running_tid()
== VgThreadIdToDrdThreadId(VG_(get_running_tid())));
#endif
if (! running_thread_is_recording())
return;
-#if 1
if (drd_trace_mem || (addr == drd_trace_address))
{
- char vc[80];
- vc_snprint(vc, sizeof(vc), thread_get_vc(thread_get_running_tid()));
- VG_(message)(Vg_UserMsg, "load 0x%lx size %ld %s (vg %d / drd %d / vc %s)",
- addr,
- size,
- thread_get_name(thread_get_running_tid()),
- VG_(get_running_tid)(),
- thread_get_running_tid(),
- vc);
- VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
- VG_(clo_backtrace_size));
- tl_assert(DrdThreadIdToVgThreadId(thread_get_running_tid())
- == VG_(get_running_tid)());
+ drd_trace_mem_access(addr, size, eLoad);
}
-#endif
- sg = thread_get_segment(thread_get_running_tid());
+ sg = running_thread_get_segment();
bm_access_range_load(sg->bm, addr, addr + size);
if (bm_load_has_conflict_with(thread_get_danger_set(), addr, addr + size)
&& ! drd_is_suppressed(addr, addr + size))
{
- DataRaceErrInfo drei;
- drei.tid = VG_(get_running_tid)();
- drei.addr = addr;
- drei.size = size;
- drei.access_type = eLoad;
- VG_(maybe_record_error)(VG_(get_running_tid)(),
- DataRaceErr,
- VG_(get_IP)(VG_(get_running_tid)()),
- "Conflicting accesses",
- &drei);
+ drd_report_race(addr, size, eLoad);
+ }
+}
+
+static VG_REGPARM(1) void drd_trace_load_1(Addr addr)
+{
+ Segment* sg;
+
+ if (! running_thread_is_recording())
+ return;
+
+ if (drd_trace_mem || (addr == drd_trace_address))
+ {
+ drd_trace_mem_access(addr, 1, eLoad);
+ }
+ sg = running_thread_get_segment();
+ bm_access_load_1(sg->bm, addr);
+ if (bm_load_1_has_conflict_with(thread_get_danger_set(), addr)
+ && ! drd_is_suppressed(addr, addr + 1))
+ {
+ drd_report_race(addr, 1, eLoad);
+ }
+}
+
+static VG_REGPARM(1) void drd_trace_load_2(Addr addr)
+{
+ Segment* sg;
+
+ if (! running_thread_is_recording())
+ return;
+
+ if (drd_trace_mem || (addr == drd_trace_address))
+ {
+ drd_trace_mem_access(addr, 2, eLoad);
+ }
+ sg = running_thread_get_segment();
+ bm_access_load_2(sg->bm, addr);
+ if (bm_load_2_has_conflict_with(thread_get_danger_set(), addr)
+ && ! drd_is_suppressed(addr, addr + 2))
+ {
+ drd_report_race(addr, 2, eLoad);
+ }
+}
+
+static VG_REGPARM(1) void drd_trace_load_4(Addr addr)
+{
+ Segment* sg;
+
+ if (! running_thread_is_recording())
+ return;
+
+ if (drd_trace_mem || (addr == drd_trace_address))
+ {
+ drd_trace_mem_access(addr, 4, eLoad);
+ }
+ sg = running_thread_get_segment();
+ bm_access_load_4(sg->bm, addr);
+ if (bm_load_4_has_conflict_with(thread_get_danger_set(), addr)
+ && ! drd_is_suppressed(addr, addr + 4))
+ {
+ drd_report_race(addr, 4, eLoad);
+ }
+}
+
+static VG_REGPARM(1) void drd_trace_load_8(Addr addr)
+{
+ Segment* sg;
+
+ if (! running_thread_is_recording())
+ return;
+
+ if (drd_trace_mem || (addr == drd_trace_address))
+ {
+ drd_trace_mem_access(addr, 8, eLoad);
+ }
+ sg = running_thread_get_segment();
+ bm_access_load_8(sg->bm, addr);
+ if (bm_load_8_has_conflict_with(thread_get_danger_set(), addr)
+ && ! drd_is_suppressed(addr, addr + 8))
+ {
+ drd_report_race(addr, 8, eLoad);
}
}
Segment* sg;
#if 0
+ /* The assert below has been commented out because of performance reasons.*/
tl_assert(thread_get_running_tid()
== VgThreadIdToDrdThreadId(VG_(get_running_tid())));
#endif
if (! running_thread_is_recording())
return;
-#if 1
if (drd_trace_mem || (addr == drd_trace_address))
{
- char vc[80];
- vc_snprint(vc, sizeof(vc), thread_get_vc(thread_get_running_tid()));
- VG_(message)(Vg_UserMsg, "store 0x%lx size %ld %s (vg %d / drd %d / off %d / vc %s)",
- addr,
- size,
- thread_get_name(thread_get_running_tid()),
- VG_(get_running_tid)(),
- thread_get_running_tid(),
- addr - thread_get_stack_min(thread_get_running_tid()),
- vc);
- VG_(get_and_pp_StackTrace)(VG_(get_running_tid)(),
- VG_(clo_backtrace_size));
- tl_assert(DrdThreadIdToVgThreadId(thread_get_running_tid())
- == VG_(get_running_tid)());
+ drd_trace_mem_access(addr, size, eStore);
}
-#endif
- sg = thread_get_segment(thread_get_running_tid());
+ sg = running_thread_get_segment();
bm_access_range_store(sg->bm, addr, addr + size);
if (bm_store_has_conflict_with(thread_get_danger_set(), addr, addr + size)
&& ! drd_is_suppressed(addr, addr + size))
{
- DataRaceErrInfo drei;
- drei.tid = VG_(get_running_tid)();
- drei.addr = addr;
- drei.size = size;
- drei.access_type = eStore;
- VG_(maybe_record_error)(VG_(get_running_tid)(),
- DataRaceErr,
- VG_(get_IP)(VG_(get_running_tid)()),
- "Conflicting accesses",
- &drei);
+ drd_report_race(addr, size, eStore);
+ }
+}
+
+static VG_REGPARM(1) void drd_trace_store_1(Addr addr)
+{
+ Segment* sg;
+
+ if (! running_thread_is_recording())
+ return;
+
+ if (drd_trace_mem || (addr == drd_trace_address))
+ {
+ drd_trace_mem_access(addr, 1, eStore);
+ }
+ sg = running_thread_get_segment();
+ bm_access_store_1(sg->bm, addr);
+ if (bm_store_1_has_conflict_with(thread_get_danger_set(), addr)
+ && ! drd_is_suppressed(addr, addr + 1))
+ {
+ drd_report_race(addr, 1, eStore);
+ }
+}
+
+static VG_REGPARM(1) void drd_trace_store_2(Addr addr)
+{
+ Segment* sg;
+
+ if (! running_thread_is_recording())
+ return;
+
+ if (drd_trace_mem || (addr == drd_trace_address))
+ {
+ drd_trace_mem_access(addr, 2, eStore);
+ }
+ sg = running_thread_get_segment();
+ bm_access_store_2(sg->bm, addr);
+ if (bm_store_2_has_conflict_with(thread_get_danger_set(), addr)
+ && ! drd_is_suppressed(addr, addr + 2))
+ {
+ drd_report_race(addr, 2, eStore);
+ }
+}
+
+static VG_REGPARM(1) void drd_trace_store_4(Addr addr)
+{
+ Segment* sg;
+
+ if (! running_thread_is_recording())
+ return;
+
+ if (drd_trace_mem || (addr == drd_trace_address))
+ {
+ drd_trace_mem_access(addr, 4, eStore);
+ }
+ sg = running_thread_get_segment();
+ bm_access_store_4(sg->bm, addr);
+ if (bm_store_4_has_conflict_with(thread_get_danger_set(), addr)
+ && ! drd_is_suppressed(addr, addr + 4))
+ {
+ drd_report_race(addr, 4, eStore);
+ }
+}
+
+static VG_REGPARM(1) void drd_trace_store_8(Addr addr)
+{
+ Segment* sg;
+
+ if (! running_thread_is_recording())
+ return;
+
+ if (drd_trace_mem || (addr == drd_trace_address))
+ {
+ drd_trace_mem_access(addr, 8, eStore);
+ }
+ sg = running_thread_get_segment();
+ bm_access_store_8(sg->bm, addr);
+ if (bm_store_8_has_conflict_with(thread_get_danger_set(), addr)
+ && ! drd_is_suppressed(addr, addr + 8))
+ {
+ drd_report_race(addr, 8, eStore);
}
}
# endif
}
+static void instrument_load(IRSB* const bb,
+ IRExpr* const addr_expr,
+ const HWord size)
+{
+ IRExpr* size_expr;
+ IRExpr** argv;
+ IRDirty* di;
+
+ switch (size)
+ {
+ case 1:
+ argv = mkIRExprVec_1(addr_expr);
+ di = unsafeIRDirty_0_N(/*regparms*/1,
+ "drd_trace_load_1",
+ VG_(fnptr_to_fnentry)(drd_trace_load_1),
+ argv);
+ break;
+ case 2:
+ argv = mkIRExprVec_1(addr_expr);
+ di = unsafeIRDirty_0_N(/*regparms*/1,
+ "drd_trace_load_2",
+ VG_(fnptr_to_fnentry)(drd_trace_load_2),
+ argv);
+ break;
+ case 4:
+ argv = mkIRExprVec_1(addr_expr);
+ di = unsafeIRDirty_0_N(/*regparms*/1,
+ "drd_trace_load_4",
+ VG_(fnptr_to_fnentry)(drd_trace_load_4),
+ argv);
+ break;
+ case 8:
+ argv = mkIRExprVec_1(addr_expr);
+ di = unsafeIRDirty_0_N(/*regparms*/1,
+ "drd_trace_load_8",
+ VG_(fnptr_to_fnentry)(drd_trace_load_8),
+ argv);
+ break;
+ default:
+ size_expr = mkIRExpr_HWord(size);
+ argv = mkIRExprVec_2(addr_expr, size_expr);
+ di = unsafeIRDirty_0_N(/*regparms*/2,
+ "drd_trace_load",
+ VG_(fnptr_to_fnentry)(drd_trace_load),
+ argv);
+ break;
+ }
+ addStmtToIRSB(bb, IRStmt_Dirty(di));
+}
+
+static void instrument_store(IRSB* const bb,
+ IRExpr* const addr_expr,
+ const HWord size)
+{
+ IRExpr* size_expr;
+ IRExpr** argv;
+ IRDirty* di;
+
+ switch (size)
+ {
+ case 1:
+ argv = mkIRExprVec_1(addr_expr);
+ di = unsafeIRDirty_0_N(/*regparms*/1,
+ "drd_trace_store_1",
+ VG_(fnptr_to_fnentry)(drd_trace_store_1),
+ argv);
+ break;
+ case 2:
+ argv = mkIRExprVec_1(addr_expr);
+ di = unsafeIRDirty_0_N(/*regparms*/1,
+ "drd_trace_store_2",
+ VG_(fnptr_to_fnentry)(drd_trace_store_2),
+ argv);
+ break;
+ case 4:
+ argv = mkIRExprVec_1(addr_expr);
+ di = unsafeIRDirty_0_N(/*regparms*/1,
+ "drd_trace_store_4",
+ VG_(fnptr_to_fnentry)(drd_trace_store_4),
+ argv);
+ break;
+ case 8:
+ argv = mkIRExprVec_1(addr_expr);
+ di = unsafeIRDirty_0_N(/*regparms*/1,
+ "drd_trace_store_8",
+ VG_(fnptr_to_fnentry)(drd_trace_store_8),
+ argv);
+ break;
+ default:
+ size_expr = mkIRExpr_HWord(size);
+ argv = mkIRExprVec_2(addr_expr, size_expr);
+ di = unsafeIRDirty_0_N(/*regparms*/2,
+ "drd_trace_store",
+ VG_(fnptr_to_fnentry)(drd_trace_store),
+ argv);
+ break;
+ }
+ addStmtToIRSB(bb, IRStmt_Dirty(di));
+}
+
static
IRSB* drd_instrument(VgCallbackClosure* const closure,
IRSB* const bb_in,
Int i;
IRSB* bb;
IRExpr** argv;
- IRExpr* addr_expr;
- IRExpr* size_expr;
Bool instrument = True;
Bool bus_locked = False;
case Ist_Store:
if (instrument && ! bus_locked)
{
- addr_expr = st->Ist.Store.addr;
- size_expr = mkIRExpr_HWord(
- sizeofIRType(typeOfIRExpr(bb->tyenv, st->Ist.Store.data)));
- argv = mkIRExprVec_2(addr_expr, size_expr);
- di = unsafeIRDirty_0_N(/*regparms*/2,
- "drd_trace_store",
- VG_(fnptr_to_fnentry)(drd_trace_store),
- argv);
- addStmtToIRSB(bb, IRStmt_Dirty(di));
+ instrument_store(bb,
+ st->Ist.Store.addr,
+ sizeofIRType(typeOfIRExpr(bb->tyenv,
+ st->Ist.Store.data)));
}
addStmtToIRSB(bb, st);
break;
const IRExpr* const data = st->Ist.WrTmp.data;
if (data->tag == Iex_Load)
{
- addr_expr = data->Iex.Load.addr;
- size_expr = mkIRExpr_HWord(sizeofIRType(data->Iex.Load.ty));
- argv = mkIRExprVec_2(addr_expr, size_expr);
- di = unsafeIRDirty_0_N(/*regparms*/2,
- "drd_trace_load",
- VG_(fnptr_to_fnentry)(drd_trace_load),
- argv);
- addStmtToIRSB(bb, IRStmt_Dirty(di));
+ instrument_load(bb,
+ data->Iex.Load.addr,
+ sizeofIRType(data->Iex.Load.ty));
}
}
addStmtToIRSB(bb, st);
return s_threadinfo[tid].last;
}
+/** Return a pointer to the latest segment for the running thread. */
+static inline
+Segment* running_thread_get_segment(void)
+{
+ return thread_get_segment(s_drd_running_tid);
+}
#endif // __THREAD_H
void bm_delete(struct bitmap* const bm);
void bm_access_range_load(struct bitmap* const bm,
const Addr a1, const Addr a2);
+void bm_access_load_1(struct bitmap* const bm, const Addr a1);
+void bm_access_load_2(struct bitmap* const bm, const Addr a1);
+void bm_access_load_4(struct bitmap* const bm, const Addr a1);
+void bm_access_load_8(struct bitmap* const bm, const Addr a1);
+void bm_access_store_1(struct bitmap* const bm, const Addr a1);
+void bm_access_store_2(struct bitmap* const bm, const Addr a1);
+void bm_access_store_4(struct bitmap* const bm, const Addr a1);
+void bm_access_store_8(struct bitmap* const bm, const Addr a1);
void bm_access_range_store(struct bitmap* const bm,
const Addr a1, const Addr a2);
Bool bm_has(const struct bitmap* const bm,
Bool bm_has_conflict_with(const struct bitmap* const bm,
const Addr a1, const Addr a2,
const BmAccessTypeT access_type);
+Bool bm_load_1_has_conflict_with(const struct bitmap* const bm, const Addr a1);
+Bool bm_load_2_has_conflict_with(const struct bitmap* const bm, const Addr a1);
+Bool bm_load_4_has_conflict_with(const struct bitmap* const bm, const Addr a1);
+Bool bm_load_8_has_conflict_with(const struct bitmap* const bm, const Addr a1);
Bool bm_load_has_conflict_with(const struct bitmap* const bm,
const Addr a1, const Addr a2);
+Bool bm_store_1_has_conflict_with(const struct bitmap* const bm,const Addr a1);
+Bool bm_store_2_has_conflict_with(const struct bitmap* const bm,const Addr a1);
+Bool bm_store_4_has_conflict_with(const struct bitmap* const bm,const Addr a1);
+Bool bm_store_8_has_conflict_with(const struct bitmap* const bm,const Addr a1);
Bool bm_store_has_conflict_with(const struct bitmap* const bm,
const Addr a1, const Addr a2);
void bm_swap(struct bitmap* const bm1, struct bitmap* const bm2);