From 87b4da1f0bcbd88f8f0c08c6fc2d3bad078cf9b5 Mon Sep 17 00:00:00 2001 From: Julian Seward Date: Fri, 8 Jul 2005 01:29:33 +0000 Subject: [PATCH] Create and use bigendian versions of MC_(helperc_{LOAD,STORE}V{2,4,8}). This involved some serious nastyness from the Department of Cpp Abuse. Memcheck still bombs on ppc32 for unknown reasons. There are still endianness issues within these functions, I think. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@4129 --- memcheck/mc_include.h | 18 +- memcheck/mc_main.c | 656 +++++++++++++++++++++------------------- memcheck/mc_translate.c | 43 ++- 3 files changed, 388 insertions(+), 329 deletions(-) diff --git a/memcheck/mc_include.h b/memcheck/mc_include.h index e27418c207..6f11a24eab 100644 --- a/memcheck/mc_include.h +++ b/memcheck/mc_include.h @@ -60,15 +60,21 @@ extern void MC_(helperc_value_check4_fail) ( void ); extern void MC_(helperc_value_check1_fail) ( void ); extern void MC_(helperc_value_check0_fail) ( void ); +extern VG_REGPARM(1) void MC_(helperc_STOREV8be) ( Addr, ULong ); extern VG_REGPARM(1) void MC_(helperc_STOREV8le) ( Addr, ULong ); +extern VG_REGPARM(2) void MC_(helperc_STOREV4be) ( Addr, UWord ); extern VG_REGPARM(2) void MC_(helperc_STOREV4le) ( Addr, UWord ); +extern VG_REGPARM(2) void MC_(helperc_STOREV2be) ( Addr, UWord ); extern VG_REGPARM(2) void MC_(helperc_STOREV2le) ( Addr, UWord ); -extern VG_REGPARM(2) void MC_(helperc_STOREV1le) ( Addr, UWord ); - -extern VG_REGPARM(1) UWord MC_(helperc_LOADV1le) ( Addr ); -extern VG_REGPARM(1) UWord MC_(helperc_LOADV2le) ( Addr ); -extern VG_REGPARM(1) UWord MC_(helperc_LOADV4le) ( Addr ); -extern VG_REGPARM(1) ULong MC_(helperc_LOADV8le) ( Addr ); +extern VG_REGPARM(2) void MC_(helperc_STOREV1) ( Addr, UWord ); + +extern VG_REGPARM(1) ULong MC_(helperc_LOADV8be) ( Addr ); +extern VG_REGPARM(1) ULong MC_(helperc_LOADV8le) ( Addr ); +extern VG_REGPARM(1) UWord MC_(helperc_LOADV4be) ( Addr ); +extern VG_REGPARM(1) UWord MC_(helperc_LOADV4le) ( Addr ); +extern VG_REGPARM(1) UWord MC_(helperc_LOADV2be) ( Addr ); +extern VG_REGPARM(1) UWord MC_(helperc_LOADV2le) ( Addr ); +extern VG_REGPARM(1) UWord MC_(helperc_LOADV1) ( Addr ); extern void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len ); diff --git a/memcheck/mc_main.c b/memcheck/mc_main.c index fa3dbe6fb3..4bd965a81d 100644 --- a/memcheck/mc_main.c +++ b/memcheck/mc_main.c @@ -1472,296 +1472,320 @@ static Bool mc_recognised_suppression ( Char* name, Supp* su ) /* ------------------------ Size = 8 ------------------------ */ -VG_REGPARM(1) -ULong MC_(helperc_LOADV8le) ( Addr aA ) -{ - PROF_EVENT(200, "helperc_LOADV8le"); - -# if VG_DEBUG_MEMORY >= 2 - return mc_LOADVn_slow( aA, 8, False/*littleendian*/ ); -# else - - const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); - UWord a = (UWord)aA; - - /* If any part of 'a' indicated by the mask is 1, either 'a' is not - naturally aligned, or 'a' exceeds the range covered by the - primary map. Either way we defer to the slow-path case. */ - if (EXPECTED_NOT_TAKEN(a & mask)) { - PROF_EVENT(201, "helperc_LOADV8le-slow1"); - return (UWord)mc_LOADVn_slow( aA, 8, False/*littleendian*/ ); - } - - UWord sec_no = (UWord)(a >> 16); - -# if VG_DEBUG_MEMORY >= 1 - tl_assert(sec_no < N_PRIMARY_MAP); -# endif - - SecMap* sm = primary_map[sec_no]; - UWord v_off = a & 0xFFFF; - UWord a_off = v_off >> 3; - UWord abits = (UWord)(sm->abits[a_off]); - - if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { - /* Handle common case quickly: a is suitably aligned, is mapped, - and is addressible. */ - return ((ULong*)(sm->vbyte))[ v_off >> 3 ]; - } else { - /* Slow but general case. */ - PROF_EVENT(202, "helperc_LOADV8le-slow2"); - return mc_LOADVn_slow( a, 8, False/*littleendian*/ ); - } - -# endif -} - -VG_REGPARM(1) -void MC_(helperc_STOREV8le) ( Addr aA, ULong vbytes ) -{ - PROF_EVENT(210, "helperc_STOREV8le"); - -# if VG_DEBUG_MEMORY >= 2 - mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ ); -# else - - const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); - UWord a = (UWord)aA; - - /* If any part of 'a' indicated by the mask is 1, either 'a' is not - naturally aligned, or 'a' exceeds the range covered by the - primary map. Either way we defer to the slow-path case. */ - if (EXPECTED_NOT_TAKEN(a & mask)) { - PROF_EVENT(211, "helperc_STOREV8le-slow1"); - mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ ); - return; - } - - UWord sec_no = (UWord)(a >> 16); - -# if VG_DEBUG_MEMORY >= 1 - tl_assert(sec_no < N_PRIMARY_MAP); -# endif +#define MAKE_LOADV8(nAME,iS_BIGENDIAN) \ + \ + VG_REGPARM(1) \ + ULong nAME ( Addr aA ) \ + { \ + PROF_EVENT(200, #nAME); \ + \ + if (VG_DEBUG_MEMORY >= 2) \ + return mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \ + \ + const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \ + UWord a = (UWord)aA; \ + \ + /* If any part of 'a' indicated by the mask is 1, either */ \ + /* 'a' is not naturally aligned, or 'a' exceeds the range */ \ + /* covered by the primary map. Either way we defer to the */ \ + /* slow-path case. */ \ + if (EXPECTED_NOT_TAKEN(a & mask)) { \ + PROF_EVENT(201, #nAME"-slow1"); \ + return (UWord)mc_LOADVn_slow( aA, 8, iS_BIGENDIAN ); \ + } \ + \ + UWord sec_no = (UWord)(a >> 16); \ + \ + if (VG_DEBUG_MEMORY >= 1) \ + tl_assert(sec_no < N_PRIMARY_MAP); \ + \ + SecMap* sm = primary_map[sec_no]; \ + UWord v_off = a & 0xFFFF; \ + UWord a_off = v_off >> 3; \ + UWord abits = (UWord)(sm->abits[a_off]); \ + \ + if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \ + /* Handle common case quickly: a is suitably aligned, */ \ + /* is mapped, and is addressible. */ \ + return ((ULong*)(sm->vbyte))[ v_off >> 3 ]; \ + } else { \ + /* Slow but general case. */ \ + PROF_EVENT(202, #nAME"-slow2"); \ + return mc_LOADVn_slow( a, 8, iS_BIGENDIAN ); \ + } \ + } + +MAKE_LOADV8( MC_(helperc_LOADV8be), True /*bigendian*/ ); +MAKE_LOADV8( MC_(helperc_LOADV8le), False/*littleendian*/ ); + + +#define MAKE_STOREV8(nAME,iS_BIGENDIAN) \ + \ + VG_REGPARM(1) \ + void nAME ( Addr aA, ULong vbytes ) \ + { \ + PROF_EVENT(210, #nAME); \ + \ + if (VG_DEBUG_MEMORY >= 2) \ + mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \ + \ + const UWord mask = ~((0x10000-8) | ((N_PRIMARY_MAP-1) << 16)); \ + UWord a = (UWord)aA; \ + \ + /* If any part of 'a' indicated by the mask is 1, either */ \ + /* 'a' is not naturally aligned, or 'a' exceeds the range */ \ + /* covered by the primary map. Either way we defer to the */ \ + /* slow-path case. */ \ + if (EXPECTED_NOT_TAKEN(a & mask)) { \ + PROF_EVENT(211, #nAME"-slow1"); \ + mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \ + return; \ + } \ + \ + UWord sec_no = (UWord)(a >> 16); \ + \ + if (VG_DEBUG_MEMORY >= 1) \ + tl_assert(sec_no < N_PRIMARY_MAP); \ + \ + SecMap* sm = primary_map[sec_no]; \ + UWord v_off = a & 0xFFFF; \ + UWord a_off = v_off >> 3; \ + UWord abits = (UWord)(sm->abits[a_off]); \ + \ + if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \ + && abits == VGM_BYTE_VALID)) { \ + /* Handle common case quickly: a is suitably aligned, */ \ + /* is mapped, and is addressible. */ \ + ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes; \ + } else { \ + /* Slow but general case. */ \ + PROF_EVENT(212, #nAME"-slow2"); \ + mc_STOREVn_slow( aA, 8, vbytes, iS_BIGENDIAN ); \ + } \ + } + +MAKE_STOREV8( MC_(helperc_STOREV8be), True /*bigendian*/ ); +MAKE_STOREV8( MC_(helperc_STOREV8le), False/*littleendian*/ ); - SecMap* sm = primary_map[sec_no]; - UWord v_off = a & 0xFFFF; - UWord a_off = v_off >> 3; - UWord abits = (UWord)(sm->abits[a_off]); - - if (EXPECTED_TAKEN(!is_distinguished_sm(sm) - && abits == VGM_BYTE_VALID)) { - /* Handle common case quickly: a is suitably aligned, is mapped, - and is addressible. */ - ((ULong*)(sm->vbyte))[ v_off >> 3 ] = vbytes; - } else { - /* Slow but general case. */ - PROF_EVENT(212, "helperc_STOREV8le-slow2"); - mc_STOREVn_slow( aA, 8, vbytes, False/*littleendian*/ ); - } -# endif -} /* ------------------------ Size = 4 ------------------------ */ -VG_REGPARM(1) -UWord MC_(helperc_LOADV4le) ( Addr aA ) -{ - PROF_EVENT(220, "helperc_LOADV4le"); - -# if VG_DEBUG_MEMORY >= 2 - return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ ); -# else - - const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); - UWord a = (UWord)aA; - - /* If any part of 'a' indicated by the mask is 1, either 'a' is not - naturally aligned, or 'a' exceeds the range covered by the - primary map. Either way we defer to the slow-path case. */ - if (EXPECTED_NOT_TAKEN(a & mask)) { - PROF_EVENT(221, "helperc_LOADV4le-slow1"); - return (UWord)mc_LOADVn_slow( aA, 4, False/*littleendian*/ ); - } - - UWord sec_no = (UWord)(a >> 16); - -# if VG_DEBUG_MEMORY >= 1 - tl_assert(sec_no < N_PRIMARY_MAP); -# endif - - SecMap* sm = primary_map[sec_no]; - UWord v_off = a & 0xFFFF; - UWord a_off = v_off >> 3; - UWord abits = (UWord)(sm->abits[a_off]); - abits >>= (a & 4); - abits &= 15; - if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) { - /* Handle common case quickly: a is suitably aligned, is mapped, - and is addressible. */ - /* On a 32-bit platform, simply hoick the required 32 bits out of - the vbyte array. On a 64-bit platform, also set the upper 32 - bits to 1 ("undefined"), just in case. This almost certainly - isn't necessary, but be paranoid. */ - UWord ret = (UWord)0xFFFFFFFF00000000ULL; - ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] ); - return ret; - } else { - /* Slow but general case. */ - PROF_EVENT(222, "helperc_LOADV4le-slow2"); - return (UWord)mc_LOADVn_slow( a, 4, False/*littleendian*/ ); - } - -# endif -} - -VG_REGPARM(2) -void MC_(helperc_STOREV4le) ( Addr aA, UWord vbytes ) -{ - PROF_EVENT(230, "helperc_STOREV4le"); - -# if VG_DEBUG_MEMORY >= 2 - mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ ); -# else - - const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); - UWord a = (UWord)aA; - - /* If any part of 'a' indicated by the mask is 1, either 'a' is not - naturally aligned, or 'a' exceeds the range covered by the - primary map. Either way we defer to the slow-path case. */ - if (EXPECTED_NOT_TAKEN(a & mask)) { - PROF_EVENT(231, "helperc_STOREV4le-slow1"); - mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ ); - return; - } - - UWord sec_no = (UWord)(a >> 16); - -# if VG_DEBUG_MEMORY >= 1 - tl_assert(sec_no < N_PRIMARY_MAP); -# endif +#define MAKE_LOADV4(nAME,iS_BIGENDIAN) \ + \ + VG_REGPARM(1) \ + UWord nAME ( Addr aA ) \ + { \ + PROF_EVENT(220, #nAME); \ + \ + if (VG_DEBUG_MEMORY >= 2) \ + return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \ + \ + const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \ + UWord a = (UWord)aA; \ + \ + /* If any part of 'a' indicated by the mask is 1, either */ \ + /* 'a' is not naturally aligned, or 'a' exceeds the range */ \ + /* covered by the primary map. Either way we defer to the */ \ + /* slow-path case. */ \ + if (EXPECTED_NOT_TAKEN(a & mask)) { \ + PROF_EVENT(221, #nAME"-slow1"); \ + return (UWord)mc_LOADVn_slow( aA, 4, iS_BIGENDIAN ); \ + } \ + \ + UWord sec_no = (UWord)(a >> 16); \ + \ + if (VG_DEBUG_MEMORY >= 1) \ + tl_assert(sec_no < N_PRIMARY_MAP); \ + \ + SecMap* sm = primary_map[sec_no]; \ + UWord v_off = a & 0xFFFF; \ + UWord a_off = v_off >> 3; \ + UWord abits = (UWord)(sm->abits[a_off]); \ + abits >>= (a & 4); \ + abits &= 15; \ + if (EXPECTED_TAKEN(abits == VGM_NIBBLE_VALID)) { \ + /* Handle common case quickly: a is suitably aligned, */ \ + /* is mapped, and is addressible. */ \ + /* On a 32-bit platform, simply hoick the required 32 */ \ + /* bits out of the vbyte array. On a 64-bit platform, */ \ + /* also set the upper 32 bits to 1 ("undefined"), just */ \ + /* in case. This almost certainly isn't necessary, */ \ + /* but be paranoid. */ \ + UWord ret = (UWord)0xFFFFFFFF00000000ULL; \ + ret |= (UWord)( ((UInt*)(sm->vbyte))[ v_off >> 2 ] ); \ + return ret; \ + } else { \ + /* Slow but general case. */ \ + PROF_EVENT(222, #nAME"-slow2"); \ + return (UWord)mc_LOADVn_slow( a, 4, iS_BIGENDIAN ); \ + } \ + } + +MAKE_LOADV4( MC_(helperc_LOADV4be), True /*bigendian*/ ); +MAKE_LOADV4( MC_(helperc_LOADV4le), False/*littleendian*/ ); + + +#define MAKE_STOREV4(nAME,iS_BIGENDIAN) \ + \ + VG_REGPARM(2) \ + void nAME ( Addr aA, UWord vbytes ) \ + { \ + PROF_EVENT(230, #nAME); \ + \ + if (VG_DEBUG_MEMORY >= 2) \ + mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \ + \ + const UWord mask = ~((0x10000-4) | ((N_PRIMARY_MAP-1) << 16)); \ + UWord a = (UWord)aA; \ + \ + /* If any part of 'a' indicated by the mask is 1, either */ \ + /* 'a' is not naturally aligned, or 'a' exceeds the range */ \ + /* covered by the primary map. Either way we defer to the */ \ + /* slow-path case. */ \ + if (EXPECTED_NOT_TAKEN(a & mask)) { \ + PROF_EVENT(231, #nAME"-slow1"); \ + mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \ + return; \ + } \ + \ + UWord sec_no = (UWord)(a >> 16); \ + \ + if (VG_DEBUG_MEMORY >= 1) \ + tl_assert(sec_no < N_PRIMARY_MAP); \ + \ + SecMap* sm = primary_map[sec_no]; \ + UWord v_off = a & 0xFFFF; \ + UWord a_off = v_off >> 3; \ + UWord abits = (UWord)(sm->abits[a_off]); \ + abits >>= (a & 4); \ + abits &= 15; \ + if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \ + && abits == VGM_NIBBLE_VALID)) { \ + /* Handle common case quickly: a is suitably aligned, */ \ + /* is mapped, and is addressible. */ \ + ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes; \ + } else { \ + /* Slow but general case. */ \ + PROF_EVENT(232, #nAME"-slow2"); \ + mc_STOREVn_slow( aA, 4, (ULong)vbytes, iS_BIGENDIAN ); \ + } \ + } + +MAKE_STOREV4( MC_(helperc_STOREV4be), True /*bigendian*/ ); +MAKE_STOREV4( MC_(helperc_STOREV4le), False/*littleendian*/ ); - SecMap* sm = primary_map[sec_no]; - UWord v_off = a & 0xFFFF; - UWord a_off = v_off >> 3; - UWord abits = (UWord)(sm->abits[a_off]); - abits >>= (a & 4); - abits &= 15; - if (EXPECTED_TAKEN(!is_distinguished_sm(sm) - && abits == VGM_NIBBLE_VALID)) { - /* Handle common case quickly: a is suitably aligned, is mapped, - and is addressible. */ - ((UInt*)(sm->vbyte))[ v_off >> 2 ] = (UInt)vbytes; - } else { - /* Slow but general case. */ - PROF_EVENT(232, "helperc_STOREV4le-slow2"); - mc_STOREVn_slow( aA, 4, (ULong)vbytes, False/*littleendian*/ ); - } -# endif -} /* ------------------------ Size = 2 ------------------------ */ -VG_REGPARM(1) -UWord MC_(helperc_LOADV2le) ( Addr aA ) -{ - PROF_EVENT(240, "helperc_LOADV2le"); - -# if VG_DEBUG_MEMORY >= 2 - return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ ); -# else - - const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); - UWord a = (UWord)aA; - - /* If any part of 'a' indicated by the mask is 1, either 'a' is not - naturally aligned, or 'a' exceeds the range covered by the - primary map. Either way we defer to the slow-path case. */ - if (EXPECTED_NOT_TAKEN(a & mask)) { - PROF_EVENT(241, "helperc_LOADV2le-slow1"); - return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ ); - } - - UWord sec_no = (UWord)(a >> 16); - -# if VG_DEBUG_MEMORY >= 1 - tl_assert(sec_no < N_PRIMARY_MAP); -# endif - - SecMap* sm = primary_map[sec_no]; - UWord v_off = a & 0xFFFF; - UWord a_off = v_off >> 3; - UWord abits = (UWord)(sm->abits[a_off]); - if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { - /* Handle common case quickly: a is mapped, and the entire - word32 it lives in is addressible. */ - /* Set the upper 16/48 bits of the result to 1 ("undefined"), - just in case. This almost certainly isn't necessary, but be - paranoid. */ - return (~(UWord)0xFFFF) - | - (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] ); - } else { - /* Slow but general case. */ - PROF_EVENT(242, "helperc_LOADV2le-slow2"); - return (UWord)mc_LOADVn_slow( aA, 2, False/*littleendian*/ ); - } - -# endif -} - -VG_REGPARM(2) -void MC_(helperc_STOREV2le) ( Addr aA, UWord vbytes ) -{ - PROF_EVENT(250, "helperc_STOREV2le"); - -# if VG_DEBUG_MEMORY >= 2 - mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ ); -# else - - const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); - UWord a = (UWord)aA; - - /* If any part of 'a' indicated by the mask is 1, either 'a' is not - naturally aligned, or 'a' exceeds the range covered by the - primary map. Either way we defer to the slow-path case. */ - if (EXPECTED_NOT_TAKEN(a & mask)) { - PROF_EVENT(251, "helperc_STOREV2le-slow1"); - mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ ); - return; - } - - UWord sec_no = (UWord)(a >> 16); +#define MAKE_LOADV2(nAME,iS_BIGENDIAN) \ + \ + VG_REGPARM(1) \ + UWord nAME ( Addr aA ) \ + { \ + PROF_EVENT(240, #nAME); \ + \ + if (VG_DEBUG_MEMORY >= 2) \ + return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \ + \ + const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \ + UWord a = (UWord)aA; \ + \ + /* If any part of 'a' indicated by the mask is 1, either */ \ + /* 'a' is not naturally aligned, or 'a' exceeds the range */ \ + /* covered by the primary map. Either way we defer to the */ \ + /* slow-path case. */ \ + if (EXPECTED_NOT_TAKEN(a & mask)) { \ + PROF_EVENT(241, #nAME"-slow1"); \ + return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \ + } \ + \ + UWord sec_no = (UWord)(a >> 16); \ + \ + if (VG_DEBUG_MEMORY >= 1) \ + tl_assert(sec_no < N_PRIMARY_MAP); \ + \ + SecMap* sm = primary_map[sec_no]; \ + UWord v_off = a & 0xFFFF; \ + UWord a_off = v_off >> 3; \ + UWord abits = (UWord)(sm->abits[a_off]); \ + if (EXPECTED_TAKEN(abits == VGM_BYTE_VALID)) { \ + /* Handle common case quickly: a is mapped, and the */ \ + /* entire word32 it lives in is addressible. */ \ + /* Set the upper 16/48 bits of the result to 1 */ \ + /* ("undefined"), just in case. This almost certainly */ \ + /* isn't necessary, but be paranoid. */ \ + return (~(UWord)0xFFFF) \ + | \ + (UWord)( ((UShort*)(sm->vbyte))[ v_off >> 1 ] ); \ + } else { \ + /* Slow but general case. */ \ + PROF_EVENT(242, #nAME"-slow2"); \ + return (UWord)mc_LOADVn_slow( aA, 2, iS_BIGENDIAN ); \ + } \ + } + +MAKE_LOADV2( MC_(helperc_LOADV2be), True /*bigendian*/ ); +MAKE_LOADV2( MC_(helperc_LOADV2le), False/*littleendian*/ ); + + +#define MAKE_STOREV2(nAME,iS_BIGENDIAN) \ + \ + VG_REGPARM(2) \ + void nAME ( Addr aA, UWord vbytes ) \ + { \ + PROF_EVENT(250, #nAME); \ + \ + if (VG_DEBUG_MEMORY >= 2) \ + mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \ + \ + const UWord mask = ~((0x10000-2) | ((N_PRIMARY_MAP-1) << 16)); \ + UWord a = (UWord)aA; \ + \ + /* If any part of 'a' indicated by the mask is 1, either */ \ + /* 'a' is not naturally aligned, or 'a' exceeds the range */ \ + /* covered by the primary map. Either way we defer to the */ \ + /* slow-path case. */ \ + if (EXPECTED_NOT_TAKEN(a & mask)) { \ + PROF_EVENT(251, #nAME"-slow1"); \ + mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \ + return; \ + } \ + \ + UWord sec_no = (UWord)(a >> 16); \ + \ + if (VG_DEBUG_MEMORY >= 1) \ + tl_assert(sec_no < N_PRIMARY_MAP); \ + \ + SecMap* sm = primary_map[sec_no]; \ + UWord v_off = a & 0xFFFF; \ + UWord a_off = v_off >> 3; \ + UWord abits = (UWord)(sm->abits[a_off]); \ + if (EXPECTED_TAKEN(!is_distinguished_sm(sm) \ + && abits == VGM_BYTE_VALID)) { \ + /* Handle common case quickly. */ \ + ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes; \ + } else { \ + /* Slow but general case. */ \ + PROF_EVENT(252, #nAME"-slow2"); \ + mc_STOREVn_slow( aA, 2, (ULong)vbytes, iS_BIGENDIAN ); \ + } \ + } + + +MAKE_STOREV2( MC_(helperc_STOREV2be), True /*bigendian*/ ); +MAKE_STOREV2( MC_(helperc_STOREV2le), False/*littleendian*/ ); -# if VG_DEBUG_MEMORY >= 1 - tl_assert(sec_no < N_PRIMARY_MAP); -# endif - - SecMap* sm = primary_map[sec_no]; - UWord v_off = a & 0xFFFF; - UWord a_off = v_off >> 3; - UWord abits = (UWord)(sm->abits[a_off]); - if (EXPECTED_TAKEN(!is_distinguished_sm(sm) - && abits == VGM_BYTE_VALID)) { - /* Handle common case quickly. */ - ((UShort*)(sm->vbyte))[ v_off >> 1 ] = (UShort)vbytes; - } else { - /* Slow but general case. */ - PROF_EVENT(252, "helperc_STOREV2le-slow2"); - mc_STOREVn_slow( aA, 2, (ULong)vbytes, False/*littleendian*/ ); - } -# endif -} /* ------------------------ Size = 1 ------------------------ */ +/* Note: endianness is irrelevant for size == 1 */ VG_REGPARM(1) -UWord MC_(helperc_LOADV1le) ( Addr aA ) +UWord MC_(helperc_LOADV1) ( Addr aA ) { - PROF_EVENT(260, "helperc_LOADV1le"); + PROF_EVENT(260, "helperc_LOADV1"); # if VG_DEBUG_MEMORY >= 2 - return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ ); + return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ ); # else const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16)); @@ -1771,8 +1795,8 @@ UWord MC_(helperc_LOADV1le) ( Addr aA ) exceeds the range covered by the primary map. In which case we defer to the slow-path case. */ if (EXPECTED_NOT_TAKEN(a & mask)) { - PROF_EVENT(261, "helperc_LOADV1le-slow1"); - return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ ); + PROF_EVENT(261, "helperc_LOADV1-slow1"); + return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ ); } UWord sec_no = (UWord)(a >> 16); @@ -1796,20 +1820,20 @@ UWord MC_(helperc_LOADV1le) ( Addr aA ) (UWord)( ((UChar*)(sm->vbyte))[ v_off ] ); } else { /* Slow but general case. */ - PROF_EVENT(262, "helperc_LOADV1le-slow2"); - return (UWord)mc_LOADVn_slow( aA, 1, False/*littleendian*/ ); + PROF_EVENT(262, "helperc_LOADV1-slow2"); + return (UWord)mc_LOADVn_slow( aA, 1, False/*irrelevant*/ ); } # endif } VG_REGPARM(2) -void MC_(helperc_STOREV1le) ( Addr aA, UWord vbyte ) +void MC_(helperc_STOREV1) ( Addr aA, UWord vbyte ) { - PROF_EVENT(270, "helperc_STOREV1le"); + PROF_EVENT(270, "helperc_STOREV1"); # if VG_DEBUG_MEMORY >= 2 - mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ ); + mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ ); # else const UWord mask = ~((0x10000-1) | ((N_PRIMARY_MAP-1) << 16)); @@ -1818,8 +1842,8 @@ void MC_(helperc_STOREV1le) ( Addr aA, UWord vbyte ) exceeds the range covered by the primary map. In which case we defer to the slow-path case. */ if (EXPECTED_NOT_TAKEN(a & mask)) { - PROF_EVENT(271, "helperc_STOREV1le-slow1"); - mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ ); + PROF_EVENT(271, "helperc_STOREV1-slow1"); + mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ ); return; } @@ -1839,8 +1863,8 @@ void MC_(helperc_STOREV1le) ( Addr aA, UWord vbyte ) lives in is addressible. */ ((UChar*)(sm->vbyte))[ v_off ] = (UChar)vbyte; } else { - PROF_EVENT(272, "helperc_STOREV1le-slow2"); - mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*littleendian*/ ); + PROF_EVENT(272, "helperc_STOREV1-slow2"); + mc_STOREVn_slow( aA, 1, (ULong)vbyte, False/*irrelevant*/ ); } # endif @@ -2220,7 +2244,7 @@ typedef Addr start; SizeT size; ExeContext* where; - Char* desc; + Char* desc; } CGenBlock; @@ -2332,7 +2356,7 @@ static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai ) ai->blksize = cgbs[i].size; ai->rwoffset = (Int)(a) - (Int)(cgbs[i].start); ai->lastchange = cgbs[i].where; - ai->desc = cgbs[i].desc; + ai->desc = cgbs[i].desc; return True; } } @@ -2361,7 +2385,7 @@ static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret ) mc_record_user_error ( tid, bad_addr, /*isWrite*/True, /*isUnaddr*/True ); *ret = ok ? (UWord)NULL : bad_addr; - break; + break; case VG_USERREQ__CHECK_READABLE: { /* check readable */ MC_ReadResult res; @@ -2373,56 +2397,56 @@ static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret ) mc_record_user_error ( tid, bad_addr, /*isWrite*/False, /*isUnaddr*/False ); *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr ); - break; + break; } case VG_USERREQ__DO_LEAK_CHECK: mc_detect_memory_leaks(tid, arg[1] ? LC_Summary : LC_Full); - *ret = 0; /* return value is meaningless */ - break; + *ret = 0; /* return value is meaningless */ + break; case VG_USERREQ__MAKE_NOACCESS: /* make no access */ mc_make_noaccess ( arg[1], arg[2] ); - *ret = -1; - break; + *ret = -1; + break; case VG_USERREQ__MAKE_WRITABLE: /* make writable */ mc_make_writable ( arg[1], arg[2] ); *ret = -1; - break; + break; case VG_USERREQ__MAKE_READABLE: /* make readable */ mc_make_readable ( arg[1], arg[2] ); - *ret = -1; + *ret = -1; break; case VG_USERREQ__CREATE_BLOCK: /* describe a block */ - if (arg[1] != 0 && arg[2] != 0) { - i = alloc_client_block(); - /* VG_(printf)("allocated %d %p\n", i, cgbs); */ - cgbs[i].start = arg[1]; - cgbs[i].size = arg[2]; - cgbs[i].desc = VG_(strdup)((Char *)arg[3]); - cgbs[i].where = VG_(record_ExeContext) ( tid ); - - *ret = i; - } else - *ret = -1; - break; + if (arg[1] != 0 && arg[2] != 0) { + i = alloc_client_block(); + /* VG_(printf)("allocated %d %p\n", i, cgbs); */ + cgbs[i].start = arg[1]; + cgbs[i].size = arg[2]; + cgbs[i].desc = VG_(strdup)((Char *)arg[3]); + cgbs[i].where = VG_(record_ExeContext) ( tid ); + + *ret = i; + } else + *ret = -1; + break; case VG_USERREQ__DISCARD: /* discard */ if (cgbs == NULL || arg[2] >= cgb_used || - (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) { + (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) { *ret = 1; - } else { - tl_assert(arg[2] >= 0 && arg[2] < cgb_used); - cgbs[arg[2]].start = cgbs[arg[2]].size = 0; - VG_(free)(cgbs[arg[2]].desc); - cgb_discards++; - *ret = 0; - } - break; + } else { + tl_assert(arg[2] >= 0 && arg[2] < cgb_used); + cgbs[arg[2]].start = cgbs[arg[2]].size = 0; + VG_(free)(cgbs[arg[2]].desc); + cgb_discards++; + *ret = 0; + } + break; //zz case VG_USERREQ__GET_VBITS: //zz /* Returns: 1 == OK, 2 == alignment error, 3 == addressing diff --git a/memcheck/mc_translate.c b/memcheck/mc_translate.c index 0ea31b2b07..49590ee598 100644 --- a/memcheck/mc_translate.c +++ b/memcheck/mc_translate.c @@ -1990,14 +1990,29 @@ IRAtom* expr2vbits_Load_WRK ( MCEnv* mce, case Ity_I16: helper = &MC_(helperc_LOADV2le); hname = "MC_(helperc_LOADV2le)"; break; - case Ity_I8: helper = &MC_(helperc_LOADV1le); - hname = "MC_(helperc_LOADV1le)"; + case Ity_I8: helper = &MC_(helperc_LOADV1); + hname = "MC_(helperc_LOADV1)"; break; default: ppIRType(ty); VG_(tool_panic)("memcheck:do_shadow_Load(LE)"); } } else { - VG_(tool_panic)("memcheck:do_shadow_Load(BE):bigendian not implemented"); + switch (ty) { + case Ity_I64: helper = &MC_(helperc_LOADV8be); + hname = "MC_(helperc_LOADV8be)"; + break; + case Ity_I32: helper = &MC_(helperc_LOADV4be); + hname = "MC_(helperc_LOADV4be)"; + break; + case Ity_I16: helper = &MC_(helperc_LOADV2be); + hname = "MC_(helperc_LOADV2be)"; + break; + case Ity_I8: helper = &MC_(helperc_LOADV1); + hname = "MC_(helperc_LOADV1)"; + break; + default: ppIRType(ty); + VG_(tool_panic)("memcheck:do_shadow_Load(BE)"); + } } /* Generate the actual address into addrAct. */ @@ -2238,13 +2253,28 @@ void do_shadow_Store ( MCEnv* mce, case Ity_I16: helper = &MC_(helperc_STOREV2le); hname = "MC_(helperc_STOREV2le)"; break; - case Ity_I8: helper = &MC_(helperc_STOREV1le); - hname = "MC_(helperc_STOREV1le)"; + case Ity_I8: helper = &MC_(helperc_STOREV1); + hname = "MC_(helperc_STOREV1)"; break; default: VG_(tool_panic)("memcheck:do_shadow_Store(LE)"); } } else { - VG_(tool_panic)("memcheck:do_shadow_Store(BE):bigendian not implemented"); + switch (ty) { + case Ity_V128: /* we'll use the helper twice */ + case Ity_I64: helper = &MC_(helperc_STOREV8be); + hname = "MC_(helperc_STOREV8be)"; + break; + case Ity_I32: helper = &MC_(helperc_STOREV4be); + hname = "MC_(helperc_STOREV4be)"; + break; + case Ity_I16: helper = &MC_(helperc_STOREV2be); + hname = "MC_(helperc_STOREV2be)"; + break; + case Ity_I8: helper = &MC_(helperc_STOREV1); + hname = "MC_(helperc_STOREV1)"; + break; + default: VG_(tool_panic)("memcheck:do_shadow_Store(BE)"); + } } if (ty == Ity_V128) { @@ -2258,7 +2288,6 @@ void do_shadow_Store ( MCEnv* mce, offLo64 = 0; offHi64 = 8; } else { - tl_assert(0 /* awaiting test case */); offLo64 = 8; offHi64 = 0; } -- 2.47.3