#include "libhb.h"
-/* fwds for
- Globals needed by other parts of the library. These are set
- once at startup and then never changed. */
-static void (*main_get_stacktrace)( Thr*, Addr*, UWord ) = NULL;
-static ExeContext* (*main_get_EC)( Thr* ) = NULL;
-
/////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////
// //
+// Debugging #defines //
+// //
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
+
+/* Check the sanity of shadow values in the core memory state
+ machine. Change #if 0 to #if 1 to enable this. */
+#if 0
+# define CHECK_MSM 1
+#else
+# define CHECK_MSM 0
+#endif
+
+
+/* Check sanity (reference counts, etc) in the conflicting access
+ machinery. Change #if 0 to #if 1 to enable this. */
+#if 0
+# define CHECK_CEM 1
+#else
+# define CHECK_CEM 0
+#endif
+
+
+/* Check sanity in the compressed shadow memory machinery,
+ particularly in its caching innards. Unfortunately there's no
+ almost-zero-cost way to make them selectable at run time. Hence
+ set the #if 0 to #if 1 and rebuild if you want them. */
+#if 0
+# define CHECK_ZSM 1 /* do sanity-check CacheLine stuff */
+# define inline __attribute__((noinline))
+ /* probably want to ditch -fomit-frame-pointer too */
+#else
+# define CHECK_ZSM 0 /* don't sanity-check CacheLine stuff */
+#endif
+
+
+/////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////
// //
+// Forward declarations //
// //
/////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////
+/* fwds for
+ Globals needed by other parts of the library. These are set
+ once at startup and then never changed. */
+static void (*main_get_stacktrace)( Thr*, Addr*, UWord ) = NULL;
+static ExeContext* (*main_get_EC)( Thr* ) = NULL;
+
+
/////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////
#endif /* ! __HB_ZSM_H */
-/* For the shadow mem cache stuff we may want more intrusive
- checks. Unfortunately there's no almost-zero-cost way to make them
- selectable at run time. Hence set the #if 0 to #if 1 and
- rebuild if you want them. */
-#if 0
-# define SCE_CACHELINE 1 /* do sanity-check CacheLine stuff */
-# define inline __attribute__((noinline))
- /* probably want to ditch -fomit-frame-pointer too */
-#else
-# define SCE_CACHELINE 0 /* don't sanity-check CacheLine stuff */
-#endif
-
-/* For the SegmentID, SegmentSet and SVal stuff we may want more
- intrusive checks. Again there's no zero cost way to do this. Set
- the #if 0 to #if 1 and rebuild if you want them. */
-#if 0
-# define SCE_SVALS 1 /* sanity-check shadow value stuff */
-#else
-# define SCE_SVALS 0
-#endif
-
-
/* Round a up to the next multiple of N. N must be a power of 2 */
#define ROUNDUP(a, N) ((a + N - 1) & ~(N-1))
/* Round a down to the next multiple of N. N must be a power of 2 */
cl->descrs[tno] = normalise_tree( tree );
}
tl_assert(cloff == N_LINE_ARANGE);
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
stats__cline_normalises++;
}
lineZ = &sm->linesZ[zix];
/* Generate the data to be stored */
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
csvalsUsed = -1;
for (k = 0; k < csvalsUsed; k++) {
sv = csvals[k].sval;
- if (SCE_SVALS)
+ if (CHECK_ZSM)
tl_assert(csvals[k].count >= 1 && csvals[k].count <= 8);
/* do we already have it? */
if (sv == lineZ->dict[0]) { j = 0; goto dict_ok; }
if (sv == lineZ->dict[2]) { j = 2; goto dict_ok; }
if (sv == lineZ->dict[3]) { j = 3; goto dict_ok; }
/* no. look for a free slot. */
- if (SCE_SVALS)
+ if (CHECK_ZSM)
tl_assert(sv != SVal_INVALID);
if (lineZ->dict[0]
== SVal_INVALID) { lineZ->dict[0] = sv; j = 0; goto dict_ok; }
lineF->inUse = True;
i = 0;
for (k = 0; k < csvalsUsed; k++) {
- if (SCE_SVALS)
+ if (CHECK_ZSM)
tl_assert(csvals[k].count >= 1 && csvals[k].count <= 8);
sv = csvals[k].sval;
- if (SCE_SVALS)
+ if (CHECK_ZSM)
tl_assert(sv != SVal_INVALID);
for (m = csvals[k].count; m > 0; m--) {
lineF->w64s[i] = sv;
rcinc_LineF(lineF);
stats__cache_F_wbacks++;
}
-
- //if (anyShared)
- // sm->mbHasShared = True;
-
- /* mb_tidy_one_cacheline(); */
}
/* Fetch the cacheline 'wix' from the backing store. The tag
if (is_valid_scache_tag( *tag_old_p )) {
/* EXPENSIVE and REDUNDANT: callee does it */
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
cacheline_wback( wix );
}
/* and reload the new one */
*tag_old_p = tag;
cacheline_fetch( wix );
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
return cl;
}
if (0)
VG_(printf)("libhb: event_map GC at size %lu\n", oldrefTreeN);
- /* Check our counting is sane */
- tl_assert(oldrefTreeN == VG_(sizeSWA)( oldrefTree ));
+ /* Check our counting is sane (expensive) */
+ if (CHECK_CEM)
+ tl_assert(oldrefTreeN == VG_(sizeSWA)( oldrefTree ));
- /* Check the reference counts */
- event_map__check_reference_counts( True/*before*/ );
+ /* Check the reference counts (expensive) */
+ if (CHECK_CEM)
+ event_map__check_reference_counts( True/*before*/ );
/* Compute the distribution of generation values in the ref tree.
There are likely only to be a few different generation numbers
}
}
- /* Check the reference counts */
- event_map__check_reference_counts( False/*after*/ );
+ /* Check the reference counts (expensive) */
+ if (CHECK_CEM)
+ event_map__check_reference_counts( False/*after*/ );
//if (0)
//VG_(printf)("XXXX final sizes: oldrefTree %ld, contextTree %ld\n\n",
/* Logic in msm_read/msm_write updated/verified after re-analysis,
19 Nov 08. */
-#define MSM_CONFACC 1
-
-#define MSM_CHECK 0
-
/* 19 Nov 08: it seems that MSM_RACE2ERR == 1 is a bad idea. When
nonzero, the effect is that when a race is detected for a location,
that location is put into a special 'error' state and no further
subsequent race -- including ones we want to see -- will never be
detected until the location is deallocated and reallocated.
- Hence set MSM_CHECK to zero. This causes raced-on locations to
+ Hence set MSM_RACE2ERR to zero. This causes raced-on locations to
remain in the normal 'C' (constrained) state, but places on them
the constraint that the next accesses happen-after both the
existing constraint and the relevant vector clock of the thread
- doing the racing access.
+ doing the racing access.
*/
#define MSM_RACE2ERR 0
stats__msm_read++;
/* Redundant sanity check on the constraints */
- if (MSM_CHECK) {
+ if (CHECK_MSM) {
tl_assert(is_sane_SVal_C(svOld));
}
tl_assert(ordxx == POrd_EQ || ordxx == POrd_LT);
svNew = MSM_RACE2ERR
? SVal__mkE()
-#if 0
- //std
+ /* see comments on corresponding fragment in
+ msm_write for explanation. */
+ /* aggressive setting: */
+ /*
: SVal__mkC( VtsID__join2(wmini,tviR),
VtsID__join2(wmini,tviW) );
-#else
- // relaxed
+ */
+ /* "consistent" setting: */
: SVal__mkC( VtsID__join2(rmini,tviR),
VtsID__join2(wmini,tviW) );
-#endif
record_race_info( acc_thr, acc_addr, szB, False/*!isWrite*/,
svOld, svNew );
goto out;
tl_assert(0);
out:
- if (MSM_CHECK) {
+ if (CHECK_MSM) {
tl_assert(is_sane_SVal_C(svNew));
}
tl_assert(svNew != SVal_INVALID);
if (svNew != svOld) {
- if (MSM_CONFACC && SVal__isC(svOld) && SVal__isC(svNew)) {
+ if (SVal__isC(svOld) && SVal__isC(svNew)) {
event_map_bind( acc_addr, szB, False/*!isWrite*/, acc_thr );
stats__msm_read_change++;
}
stats__msm_write++;
/* Redundant sanity check on the constraints */
- if (MSM_CHECK) {
+ if (CHECK_MSM) {
tl_assert(is_sane_SVal_C(svOld));
}
tl_assert(ordxx == POrd_EQ || ordxx == POrd_LT);
svNew = MSM_RACE2ERR
? SVal__mkE()
-#if 0
- // std
+ /* One possibility is, after a race is seen, to
+ set the location's constraints as aggressively
+ (as far ahead) as possible. However, that just
+ causes lots more races to be reported, which is
+ very confusing. Hence don't do this. */
+ /*
: SVal__mkC( VtsID__join2(wmini,tviR),
VtsID__join2(wmini,tviW) );
-#else
- // relaxed
+ */
+ /* instead, re-set the constraints in a way which
+ is consistent with (ie, as they would have been
+ computed anyway) had no race been detected. */
: SVal__mkC( VtsID__join2(rmini,tviR),
VtsID__join2(wmini,tviW) );
-#endif
record_race_info( acc_thr, acc_addr, szB, True/*isWrite*/,
svOld, svNew );
goto out;
tl_assert(0);
out:
- if (MSM_CHECK) {
+ if (CHECK_MSM) {
tl_assert(is_sane_SVal_C(svNew));
}
tl_assert(svNew != SVal_INVALID);
if (svNew != svOld) {
- if (MSM_CONFACC && SVal__isC(svOld) && SVal__isC(svNew)) {
+ if (SVal__isC(svOld) && SVal__isC(svNew)) {
event_map_bind( acc_addr, szB, True/*isWrite*/, acc_thr );
stats__msm_write_change++;
}
if (UNLIKELY( !(descr & (TREE_DESCR_8_0 << toff)) )) {
SVal* tree = &cl->svals[tno << 3];
cl->descrs[tno] = pulldown_to_8(tree, toff, descr);
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
}
svOld = cl->svals[cloff];
if (UNLIKELY( !(descr & (TREE_DESCR_8_0 << toff)) )) {
SVal* tree = &cl->svals[tno << 3];
cl->descrs[tno] = pulldown_to_8(tree, toff, descr);
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
}
svOld = cl->svals[cloff];
SVal* tree = &cl->svals[tno << 3];
cl->descrs[tno] = pulldown_to_16(tree, toff, descr);
}
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
}
svOld = cl->svals[cloff];
SVal* tree = &cl->svals[tno << 3];
cl->descrs[tno] = pulldown_to_16(tree, toff, descr);
}
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
}
svOld = cl->svals[cloff];
} else {
goto slowcase;
}
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
}
svOld = cl->svals[cloff];
} else {
goto slowcase;
}
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
}
svOld = cl->svals[cloff];
if (UNLIKELY( !(descr & (TREE_DESCR_8_0 << toff)) )) {
SVal* tree = &cl->svals[tno << 3];
cl->descrs[tno] = pulldown_to_8(tree, toff, descr);
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
}
tl_assert(svNew != SVal_INVALID);
its parent. So first, pull down to this level. */
SVal* tree = &cl->svals[tno << 3];
cl->descrs[tno] = pulldown_to_16(tree, toff, descr);
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
}
}
its parent. So first, pull down to this level. */
SVal* tree = &cl->svals[tno << 3];
cl->descrs[tno] = pulldown_to_32(tree, toff, descr);
- if (SCE_CACHELINE)
+ if (CHECK_ZSM)
tl_assert(is_sane_CacheLine(cl)); /* EXPENSIVE */
} else {
/* Writing at this level. Need to fix up 'descr'. */