return (start_of_this_sm(a) == a);
}
+STATIC_ASSERT(SM_CHUNKS % 2 == 0);
+
typedef
- struct {
+ union {
UChar vabits8[SM_CHUNKS];
+ UShort vabits16[SM_CHUNKS/2];
}
SecMap;
&& nBits == 64 && VG_IS_8_ALIGNED(a))) {
SecMap* sm = get_secmap_for_reading(a);
UWord sm_off16 = SM_OFF_16(a);
- UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
+ UWord vabits16 = sm->vabits16[sm_off16];
if (LIKELY(vabits16 == VA_BITS16_DEFINED))
return V_BITS64_DEFINED;
if (LIKELY(vabits16 == VA_BITS16_UNDEFINED))
&& nBits == 64 && VG_IS_8_ALIGNED(a))) {
SecMap* sm = get_secmap_for_reading(a);
UWord sm_off16 = SM_OFF_16(a);
- UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
+ UWord vabits16 = sm->vabits16[sm_off16];
if (LIKELY( !is_distinguished_sm(sm) &&
(VA_BITS16_DEFINED == vabits16 ||
VA_BITS16_UNDEFINED == vabits16) )) {
/* is mapped, and is addressible. */
// Convert full V-bits in register to compact 2-bit form.
if (LIKELY(V_BITS64_DEFINED == vbytes)) {
- ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
+ sm->vabits16[sm_off16] = VA_BITS16_DEFINED;
return;
} else if (V_BITS64_UNDEFINED == vbytes) {
- ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
+ sm->vabits16[sm_off16] = VA_BITS16_UNDEFINED;
return;
}
/* else fall into the slow case */
if (lenA < 8) break;
PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A);
sm_off16 = SM_OFF_16(a);
- ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
+ sm->vabits16[sm_off16] = vabits16;
a += 8;
lenA -= 8;
}
if (lenB < 8) break;
PROF_EVENT(MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B);
sm_off16 = SM_OFF_16(a);
- ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
+ sm->vabits16[sm_off16] = vabits16;
a += 8;
lenB -= 8;
}
sm = get_secmap_for_writing_low(a);
sm_off16 = SM_OFF_16(a);
- ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
+ sm->vabits16[sm_off16] = VA_BITS16_UNDEFINED;
}
#endif
}
sm = get_secmap_for_writing_low(a);
sm_off16 = SM_OFF_16(a);
- ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
+ sm->vabits16[sm_off16] = VA_BITS16_NOACCESS;
//// BEGIN inlined, specialised version of MC_(helperc_b_store8)
//// Clear the origins for a+0 .. a+7.
/* Now we know that the entire address range falls within a
single secondary map, and that that secondary 'lives' in
the main primary map. */
- SecMap* sm = get_secmap_for_writing_low(a_lo);
- UWord v_off = SM_OFF(a_lo);
- UShort* p = (UShort*)(&sm->vabits8[v_off]);
+ SecMap* sm = get_secmap_for_writing_low(a_lo);
+ UWord v_off16 = SM_OFF_16(a_lo);
+ UShort* p = &sm->vabits16[v_off16];
p[ 0] = VA_BITS16_UNDEFINED;
p[ 1] = VA_BITS16_UNDEFINED;
p[ 2] = VA_BITS16_UNDEFINED;
/* Now we know that the entire address range falls within a
single secondary map, and that that secondary 'lives' in
the main primary map. */
- SecMap* sm = get_secmap_for_writing_low(a_lo);
- UWord v_off = SM_OFF(a_lo);
- UShort* p = (UShort*)(&sm->vabits8[v_off]);
+ SecMap* sm = get_secmap_for_writing_low(a_lo);
+ UWord v_off16 = SM_OFF_16(a_lo);
+ UShort* p = &sm->vabits16[v_off16];
p[ 0] = VA_BITS16_UNDEFINED;
p[ 1] = VA_BITS16_UNDEFINED;
p[ 2] = VA_BITS16_UNDEFINED;
/* Now we know that the entire address range falls within a
single secondary map, and that that secondary 'lives' in
the main primary map. */
- SecMap* sm = get_secmap_for_writing_low(a_lo);
- UWord v_off = SM_OFF(a_lo);
- UShort* p = (UShort*)(&sm->vabits8[v_off]);
+ SecMap* sm = get_secmap_for_writing_low(a_lo);
+ UWord v_off16 = SM_OFF_16(a_lo);
+ UShort* p = &sm->vabits16[v_off16];
p[ 0] = VA_BITS16_UNDEFINED;
p[ 1] = VA_BITS16_UNDEFINED;
p[ 2] = VA_BITS16_UNDEFINED;
/* Now we know that the entire address range falls within a
single secondary map, and that that secondary 'lives' in
the main primary map. */
- SecMap* sm = get_secmap_for_writing_low(a_lo);
- UWord v_off = SM_OFF(a_lo);
- UShort* p = (UShort*)(&sm->vabits8[v_off]);
+ SecMap* sm = get_secmap_for_writing_low(a_lo);
+ UWord v_off16 = SM_OFF_16(a_lo);
+ UShort* p = &sm->vabits16[v_off16];
p[ 0] = VA_BITS16_UNDEFINED;
p[ 1] = VA_BITS16_UNDEFINED;
p[ 2] = VA_BITS16_UNDEFINED;
PROF_EVENT(MCPE_MAKE_STACK_UNINIT_128_NO_O_ALIGNED_16);
SecMap* sm = get_secmap_for_writing_low(a_lo);
UWord v_off = SM_OFF(a_lo);
- UInt* w32 = (UInt*)(&sm->vabits8[v_off]);
+ UInt* w32 = ASSUME_ALIGNED(UInt*, &sm->vabits8[v_off]);
w32[ 0] = VA_BITS32_UNDEFINED;
w32[ 1] = VA_BITS32_UNDEFINED;
w32[ 2] = VA_BITS32_UNDEFINED;
/* Now we know that the entire address range falls within a
single secondary map, and that that secondary 'lives' in
the main primary map. */
- SecMap* sm = get_secmap_for_writing_low(a_lo);
- UWord v_off = SM_OFF(a_lo);
- UShort* w16 = (UShort*)(&sm->vabits8[v_off]);
- UInt* w32 = (UInt*)(&w16[1]);
+ SecMap* sm = get_secmap_for_writing_low(a_lo);
+ UWord v_off16 = SM_OFF_16(a_lo);
+ UShort* w16 = &sm->vabits16[v_off16];
+ UInt* w32 = ASSUME_ALIGNED(UInt*, &w16[1]);
/* The following assertion is commented out for obvious
performance reasons, but was verified as valid when
running the entire testsuite and also Firefox. */
for (j = 0; j < nULongs; j++) {
sm = get_secmap_for_reading_low(a + 8*j);
sm_off16 = SM_OFF_16(a + 8*j);
- vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
+ vabits16 = sm->vabits16[sm_off16];
// Convert V bits from compact memory form to expanded
// register form.
sm = get_secmap_for_reading_low(a);
sm_off16 = SM_OFF_16(a);
- vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
+ vabits16 = sm->vabits16[sm_off16];
// Handle common case quickly: a is suitably aligned, is mapped, and
// addressible.
sm = get_secmap_for_reading_low(a);
sm_off16 = SM_OFF_16(a);
- vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
+ vabits16 = sm->vabits16[sm_off16];
// To understand the below cleverness, see the extensive comments
// in MC_(helperc_STOREV8).
return;
}
if (!is_distinguished_sm(sm) && VA_BITS16_UNDEFINED == vabits16) {
- ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
+ sm->vabits16[sm_off16] = VA_BITS16_DEFINED;
return;
}
PROF_EVENT(MCPE_STOREV64_SLOW2);
return;
}
if (!is_distinguished_sm(sm) && VA_BITS16_DEFINED == vabits16) {
- ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
+ sm->vabits16[sm_off16] = VA_BITS16_UNDEFINED;
return;
}
PROF_EVENT(MCPE_STOREV64_SLOW3);