Update the tests so they can be compiled for nanoMIPS.
Patch by Dimitrije Nikolic and Aleksandra Karadzic.
# define PLAT_s390x_linux 1
#elif defined(__linux__) && defined(__mips__)
# define PLAT_mips32_linux 1
+#elif defined(__linux__) && defined(__nanomips__)
+# define PLAT_nanomips_linux 1
#elif defined(__sun__) && defined(__i386__)
# define PLAT_x86_solaris 1
#elif defined(__sun__) && defined(__x86_64__)
: /*out*/ : /*in*/ "r"(&(_lval)) \
: /*trash*/ "$8", "$9", "$10", "cc", "memory" \
)
+#elif defined(PLAT_nanomips_linux)
+# define INC(_lval,_lqual) \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " move $t0, %0\n" \
+ " ll $t1, 0($t0)\n" \
+ " addiu $t1, $t1, 1\n" \
+ " sc $t1, 0($t0)\n" \
+ " beqc $t1, $zero, 1b\n" \
+ : /*out*/ : /*in*/ "r"(&(_lval)) \
+ : /*trash*/ "$t0", "$t1", "memory" \
+ )
#else
# error "Fix Me for this platform"
#endif
#else
# define PLAT_mips32_linux 1
#endif
+#elif defined(__linux__) && defined(__nanomips__)
+# define PLAT_nanomips_linux 1
#elif defined(__sun__) && defined(__i386__)
# define PLAT_x86_solaris 1
#elif defined(__sun__) && defined(__x86_64__)
: /*out*/ : /*in*/ "r"(&(_lval)) \
: /*trash*/ "t0", "t1", "memory" \
)
+#elif defined(PLAT_nanomips_linux)
+# define INC(_lval,_lqual) \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " move $t0, %0\n" \
+ " ll $t1, 0($t0)\n" \
+ " addiu $t1, $t1, 1\n" \
+ " sc $t1, 0($t0)\n" \
+ " beqc $t1, $zero, 1b\n" \
+ : /*out*/ : /*in*/ "r"(&(_lval)) \
+ : /*trash*/ "$t0", "$t1", "memory" \
+ )
#else
# error "Fix Me for this platform"
#endif
# define PLAT_s390x_linux 1
#elif defined(__linux__) && defined(__mips__)
# define PLAT_mips32_linux 1
+#elif defined(__linux__) && defined(__nanomips__)
+# define PLAT_nanomips_linux 1
#elif defined(__sun__) && defined(__i386__)
# define PLAT_x86_solaris 1
#elif defined(__sun__) && defined(__x86_64__)
: "$12", "$13", "$14", "memory", "cc" \
)
+# define XCHG_M_R_with_redundant_LOCK(_addr,_lval) \
+ XCHG_M_R(_addr,_lval)
+#elif defined(PLAT_nanomips_linux)
+# define XCHG_M_R(_addr,_lval) \
+ __asm__ __volatile__( \
+ "move $t0, %2\n" \
+ "move $t1, %1\n" \
+ "ll $t2, 0($t1)\n" \
+ "sc $t0, 0($t1)\n" \
+ "move %0, $t2\n" \
+ : /*out*/ "=r"(_lval) \
+ : /*in*/ "r"(&_addr), "r"(_lval) \
+ : "$t0", "$t1", "$t1", "memory" \
+ )
+
# define XCHG_M_R_with_redundant_LOCK(_addr,_lval) \
XCHG_M_R(_addr,_lval)
: "+m" (*p), "+m" (dummy)
: "d" (n)
: "cc", "memory", "0", "1");
-#elif defined(VGA_mips32)
+#elif defined(VGA_mips32) || defined (VGA_nanomips)
/* We rely on the fact that p is 4-aligned. Otherwise 'll' may throw an
exception that can cause this function to fail. */
#if defined (_MIPSEL)
: "+m" (*p), "+m" (dummy)
: "d" (n)
: "cc", "memory", "0", "1");
-#elif defined(VGA_mips32)
+#elif defined(VGA_mips32) || defined (VGA_nanomips)
/* We rely on the fact that p is 4-aligned. Otherwise 'll' may throw an
exception that can cause this function to fail. */
#if defined (_MIPSEL)
: "+m" (*p)
: "d" (n)
: "cc", "memory", "0", "1");
-#elif defined(VGA_mips32)
+#elif defined(VGA_mips32) || defined (VGA_nanomips)
unsigned int block[3]
= { (unsigned int)p, (unsigned int)n, 0x0 };
do {
__attribute__((noinline)) void atomic_add_64bit ( long long int* p, int n )
{
-#if defined(VGA_x86) || defined(VGA_ppc32) || defined(VGA_mips32)
+#if defined(VGA_x86) || defined(VGA_ppc32) || defined(VGA_mips32) \
+ || defined (VGA_nanomips)
/* do nothing; is not supported */
#elif defined(VGA_amd64)
// this is a bit subtle. It relies on the fact that, on a 64-bit platform,
__asm__ __volatile__( "li 11, 0" : : :/*trash*/"r11" ); \
__asm__ __volatile__( "li 12, 0" : : :/*trash*/"r12" ); \
} while (0)
+#elif defined(__nanomips__)
+#define CLEAR_CALLER_SAVED_REGS \
+ do { \
+ __asm__ __volatile__ (".set push \n\t" \
+ ".set noat \n\t" \
+ "move $at, $zero \n\t" \
+ "move $t4, $zero \n\t" \
+ "move $t5, $zero \n\t" \
+ "move $a0, $zero \n\t" \
+ "move $a1, $zero \n\t" \
+ "move $a2, $zero \n\t" \
+ "move $a3, $zero \n\t" \
+ "move $a4, $zero \n\t" \
+ "move $a5, $zero \n\t" \
+ "move $a6, $zero \n\t" \
+ "move $a7, $zero \n\t" \
+ "move $t0, $zero \n\t" \
+ "move $t1, $zero \n\t" \
+ "move $t2, $zero \n\t" \
+ "move $t3, $zero \n\t" \
+ "move $t8, $zero \n\t" \
+ "move $t9, $zero \n\t" \
+ ".set pop \n\t" \
+ : : : "$at", "$t4", "$t5", "$a0", "$a1", "$a2", \
+ "$a3", "$a4", "$a5", "$a6", "$a7", "$t0", \
+ "$t1", "$t2", "$t3", "$t8", "$t9"); \
+ } while (0)
#elif (__mips == 32)
#define CLEAR_CALLER_SAVED_REGS \
do { \
unsigned long VKI_PAGE_SIZE = 1UL << 12;
#elif defined(VGP_arm64_linux)
unsigned long VKI_PAGE_SIZE = 1UL << 16;
-#elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+#elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined (VGP_nanomips_linux)
#include <unistd.h>
unsigned long VKI_PAGE_SIZE;
#endif
*ga = VexArchMIPS32;
#elif defined(VGA_mips64)
*ga = VexArchMIPS64;
+#elif defined(VGA_nanomips)
+ *ga = VexArchNANOMIPS;
#else
missing arch;
#endif
case VexArchS390X: return VexEndnessBE;
case VexArchMIPS32:
case VexArchMIPS64:
+ case VexArchNANOMIPS:
/* mips32/64 supports BE or LE, but at compile time.
If mips64 is compiled on a non mips system, the VEX lib
is missing bit and pieces of code related to endianness.
VexArch ga;
get_guest_arch( &ga);
- if (ga == VexArchMIPS64 || ga == VexArchMIPS32)
+ if (ga == VexArchMIPS64 || ga == VexArchMIPS32 || ga == VexArchNANOMIPS)
return running_endness();
else
return VexEndnessBE;
case VexArchMIPS32: return VEX_PRID_COMP_MIPS;
case VexArchMIPS64: return VEX_PRID_COMP_MIPS | VEX_MIPS_HOST_FR;
#endif
+ case VexArchNANOMIPS: return 0;
default: failure_exit();
}
}
case VexArchS390X: return True;
case VexArchMIPS32: return False;
case VexArchMIPS64: return True;
+ case VexArchNANOMIPS: return False;
default: failure_exit();
}
}
// explicitly via command line arguments.
if (multiarch) {
VexArch va;
- for (va = VexArchX86; va <= VexArchMIPS64; va++) {
+ for (va = VexArchX86; va <= VexArchNANOMIPS; va++) {
vta.arch_host = va;
vta.archinfo_host.endness = arch_endness (vta.arch_host);
vta.archinfo_host.hwcaps = arch_hwcaps (vta.arch_host);
#if defined(__mips__)
syscall(__NR_cacheflush, a, FN_SIZE * n_fns, ICACHE);
+#elif defined(__nanomips__)
+ __builtin___clear_cache(a, (char*)a + FN_SIZE * n_fns);
#endif
for (h = 0; h < n_reps; h += 1) {