From: Julian Seward Date: Sun, 16 Aug 2015 17:23:50 +0000 (+0000) Subject: arm32: make sure we actually generate real 64-bit loads/stores for the X-Git-Tag: svn/VALGRIND_3_11_0~99 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=683436b8eaa68c79b21672dd1550cdce1bfa9755;p=thirdparty%2Fvalgrind.git arm32: make sure we actually generate real 64-bit loads/stores for the 64 bit tests, rather than two 32 bit transactions. git-svn-id: svn://svn.valgrind.org/valgrind/trunk@15558 --- diff --git a/memcheck/tests/sh-mem-random.c b/memcheck/tests/sh-mem-random.c index 5e4361c39d..ae82248ae6 100644 --- a/memcheck/tests/sh-mem-random.c +++ b/memcheck/tests/sh-mem-random.c @@ -102,7 +102,10 @@ static inline U1 randomU1 ( void ) return 0xFF & (randomU4() >> 13); } -#define N_BYTES 300000 +// NB! 300000 is really not enough to shake out all failures. +// Increasing it by a factor of 256 is, but makes the test take +// the best part of an hour. +#define N_BYTES (300000 /* * 256 */) #define N_EVENTS (5 * N_BYTES) @@ -188,6 +191,16 @@ void do_test_at ( U1* arr ) "emms" : : "r"(arr+dst), "r"(arr+src) : "memory" ); +#elif defined(__linux__) && defined(__arm__) && !defined(__aarch64__) + /* On arm32, many compilers generate a 64-bit float move + using two 32 bit integer registers, which completely + defeats this test. Hence force a 64-bit NEON load and + store. I guess this will break the build on non-NEON + capable targets. */ + __asm__ __volatile__ ( + "vld1.64 {d7},[%0] ; vst1.64 {d7},[%1] " + : : "r"(arr+src), "r"(arr+dst) : "d7","memory" + ); #else /* Straightforward. On amd64, this gives a load/store of the bottom half of an xmm register. On ppc32/64 this