Necessary changes to support nanoMIPS on Linux.
Part 2/4 - Coregrind changes
Patch by Aleksandar Rikalo, Dimitrije Nikolic, Tamara Vlahovic and
Aleksandra Karadzic.
Related KDE issue: #400872.
m_dispatch/dispatch-s390x-linux.S \
m_dispatch/dispatch-mips32-linux.S \
m_dispatch/dispatch-mips64-linux.S \
+ m_dispatch/dispatch-nanomips-linux.S \
m_dispatch/dispatch-x86-darwin.S \
m_dispatch/dispatch-amd64-darwin.S \
m_dispatch/dispatch-x86-solaris.S \
m_gdbserver/valgrind-low-s390x.c \
m_gdbserver/valgrind-low-mips32.c \
m_gdbserver/valgrind-low-mips64.c \
+ m_gdbserver/valgrind-low-nanomips.c \
m_gdbserver/version.c \
m_initimg/initimg-linux.c \
m_initimg/initimg-darwin.c \
m_sigframe/sigframe-s390x-linux.c \
m_sigframe/sigframe-mips32-linux.c \
m_sigframe/sigframe-mips64-linux.c \
+ m_sigframe/sigframe-nanomips-linux.c \
m_sigframe/sigframe-x86-darwin.c \
m_sigframe/sigframe-amd64-darwin.c \
m_sigframe/sigframe-solaris.c \
m_syswrap/syscall-s390x-linux.S \
m_syswrap/syscall-mips32-linux.S \
m_syswrap/syscall-mips64-linux.S \
+ m_syswrap/syscall-nanomips-linux.S \
m_syswrap/syscall-x86-darwin.S \
m_syswrap/syscall-amd64-darwin.S \
m_syswrap/syscall-x86-solaris.S \
m_syswrap/syswrap-s390x-linux.c \
m_syswrap/syswrap-mips32-linux.c \
m_syswrap/syswrap-mips64-linux.c \
+ m_syswrap/syswrap-nanomips-linux.c \
m_syswrap/syswrap-x86-darwin.c \
m_syswrap/syswrap-amd64-darwin.c \
m_syswrap/syswrap-xen.c \
#define EM_PPC64 21 // ditto
#endif
+#ifndef EM_NANOMIPS
+#define EM_NANOMIPS 249
+#endif
+
#ifndef E_MIPS_ABI_O32
#define E_MIPS_ABI_O32 0x00001000
#endif
(header.ehdr32.e_flags & E_MIPS_ABI2)) {
platform = "mips64-linux";
}
+ else
+ if (header.ehdr32.e_machine == EM_NANOMIPS &&
+ (header.ehdr32.e_ident[EI_OSABI] == ELFOSABI_SYSV ||
+ header.ehdr32.e_ident[EI_OSABI] == ELFOSABI_LINUX)) {
+ platform = "nanomips-linux";
+ }
}
else if (header.c[EI_DATA] == ELFDATA2MSB) {
if (header.ehdr32.e_machine == EM_PPC &&
(header.ehdr32.e_flags & E_MIPS_ABI2)) {
platform = "mips64-linux";
}
+ else
+ if (header.ehdr32.e_machine == EM_NANOMIPS &&
+ (header.ehdr32.e_ident[EI_OSABI] == ELFOSABI_SYSV ||
+ header.ehdr32.e_ident[EI_OSABI] == ELFOSABI_LINUX)) {
+ platform = "nanomips-linux";
+ }
}
} else if (n_bytes >= sizeof(Elf64_Ehdr) && header.c[EI_CLASS] == ELFCLASS64) {
(0==strcmp(VG_PLATFORM,"arm64-linux")) ||
(0==strcmp(VG_PLATFORM,"s390x-linux")) ||
(0==strcmp(VG_PLATFORM,"mips32-linux")) ||
- (0==strcmp(VG_PLATFORM,"mips64-linux")))
+ (0==strcmp(VG_PLATFORM,"mips64-linux")) ||
+ (0==strcmp(VG_PLATFORM,"nanomips-linux")))
default_platform = VG_PLATFORM;
# elif defined(VGO_solaris)
if ((0==strcmp(VG_PLATFORM,"x86-solaris")) ||
res = VG_(do_syscall6)(__NR3264_mmap, (UWord)start, length,
prot, flags, fd, offset);
# elif defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
- || defined(VGP_arm_linux)
+ || defined(VGP_arm_linux) || defined(VGP_nanomips_linux)
/* mmap2 uses 4096 chunks even if actual page size is bigger. */
aspacem_assert((offset % 4096) == 0);
res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
SysRes ML_(am_open) ( const HChar* pathname, Int flags, Int mode )
{
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
/* ARM64 wants to use __NR_openat rather than __NR_open. */
SysRes res = VG_(do_syscall4)(__NR_openat,
VKI_AT_FDCWD, (UWord)pathname, flags, mode);
Int ML_(am_readlink)(const HChar* path, HChar* buf, UInt bufsiz)
{
SysRes res;
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
(UWord)path, (UWord)buf, bufsiz);
# elif defined(VGO_linux) || defined(VGO_darwin)
Int ML_(am_fcntl) ( Int fd, Int cmd, Addr arg )
{
# if defined(VGO_linux) || defined(VGO_solaris)
+# if defined(VGP_nanomips_linux)
+ SysRes res = VG_(do_syscall3)(__NR_fcntl64, fd, cmd, arg);
+# else
SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
+# endif
# elif defined(VGO_darwin)
SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg);
# else
#elif defined(VGA_arm) || defined(VGA_ppc32) || \
defined(VGA_ppc64be) || defined(VGA_ppc64le) || \
- defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_arm64)
+ defined(VGA_mips32) || defined(VGA_mips64) || \
+ defined(VGA_arm64) || defined(VGA_nanomips)
static Bool
get_cache_info(VexArchInfo *vai)
{
ehdr->e_entry = 0;
ehdr->e_phoff = sizeof(ESZ(Ehdr));
ehdr->e_shoff = 0;
+#if defined(VGP_nanomips_linux)
+ ehdr->e_flags = VKI_EF_NANOMIPS_ABI_P32;
+#else
ehdr->e_flags = 0;
+#endif
ehdr->e_ehsize = sizeof(ESZ(Ehdr));
ehdr->e_phentsize = sizeof(ESZ(Phdr));
ehdr->e_phnum = num_phdrs;
/*OUT*/struct vki_elf_prstatus *prs,
const vki_siginfo_t *si)
{
-#if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+#if defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined(VGP_nanomips_linux)
vki_elf_greg_t *regs;
#else
struct vki_user_regs_struct *regs;
#if defined(VGP_s390x_linux)
/* prs->pr_reg has struct type. Need to take address. */
regs = (struct vki_user_regs_struct *)&(prs->pr_reg);
-#elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+#elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined(VGP_nanomips_linux)
regs = (vki_elf_greg_t *)prs->pr_reg;
#else
regs = (struct vki_user_regs_struct *)prs->pr_reg;
regs[VKI_MIPS64_EF_HI] = arch->vex.guest_HI;
regs[VKI_MIPS64_EF_CP0_STATUS] = arch->vex.guest_CP0_status;
regs[VKI_MIPS64_EF_CP0_EPC] = arch->vex.guest_PC;
+#elif defined(VGP_nanomips_linux)
+# define DO(n) regs[VKI_MIPS32_EF_R##n] = arch->vex.guest_r##n
+ DO(1); DO(2); DO(3); DO(4); DO(5); DO(6); DO(7); DO(8);
+ DO(9); DO(10); DO(11); DO(12); DO(13); DO(14); DO(15); DO(16);
+ DO(17); DO(18); DO(19); DO(20); DO(21); DO(22); DO(23); DO(24);
+ DO(25); DO(28); DO(29); DO(30); DO(31);
+ regs[VKI_MIPS32_EF_CP0_STATUS] = arch->vex.guest_CP0_status;
+ regs[VKI_MIPS32_EF_CP0_EPC] = arch->vex.guest_PC;
+# undef DO
#else
# error Unknown ELF platform
#endif
DO(16); DO(17); DO(18); DO(19); DO(20); DO(21); DO(22); DO(23);
DO(24); DO(25); DO(26); DO(27); DO(28); DO(29); DO(30); DO(31);
# undef DO
+#elif defined(VGP_nanomips_linux)
+
#else
# error Unknown ELF platform
#endif
# if !defined(VGPV_arm_linux_android) \
&& !defined(VGPV_x86_linux_android) \
&& !defined(VGPV_mips32_linux_android) \
- && !defined(VGPV_arm64_linux_android)
+ && !defined(VGPV_arm64_linux_android) \
+ && !defined(VGP_nanomips_linux)
add_note(notelist, "CORE", NT_FPREGSET, &fpu, sizeof(fpu));
# endif
# elif defined(VGP_s390x_linux)
if (regno == 15) { *a = regs->sp; return True; }
if (regno == 11) { *a = regs->fp; return True; }
-# elif defined(VGP_mips32_linux)
+# elif defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
if (regno == 29) { *a = regs->sp; return True; }
if (regno == 30) { *a = regs->fp; return True; }
# elif defined(VGP_mips64_linux)
is_ro_map = False;
# if defined(VGA_x86) || defined(VGA_ppc32) || defined(VGA_mips32) \
- || defined(VGA_mips64)
+ || defined(VGA_mips64) || defined(VGA_nanomips)
is_rx_map = seg->hasR && seg->hasX;
is_rw_map = seg->hasR && seg->hasW;
# elif defined(VGA_amd64) || defined(VGA_ppc64be) || defined(VGA_ppc64le) \
case Creg_S390_SP: return eec->uregs->sp;
case Creg_S390_FP: return eec->uregs->fp;
case Creg_S390_LR: return eec->uregs->lr;
-# elif defined(VGA_mips32) || defined(VGA_mips64)
+# elif defined(VGA_mips32) || defined(VGA_mips64) \
+ || defined(VGA_nanomips)
case Creg_IA_IP: return eec->uregs->pc;
case Creg_IA_SP: return eec->uregs->sp;
case Creg_IA_BP: return eec->uregs->fp;
case CFIC_IA_BPREL:
cfa = cfsi_m->cfa_off + uregs->fp;
break;
-# elif defined(VGA_mips32) || defined(VGA_mips64)
+# elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
case CFIC_IA_SPREL:
cfa = cfsi_m->cfa_off + uregs->sp;
break;
ipHere = uregsHere->r15;
# elif defined(VGA_s390x)
ipHere = uregsHere->ia;
-# elif defined(VGA_mips32) || defined(VGA_mips64)
+# elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
ipHere = uregsHere->pc;
# elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
# elif defined(VGP_arm64_linux)
COMPUTE(uregsPrev.f5, uregsHere->f5, cfsi_m->f5_how, cfsi_m->f5_off);
COMPUTE(uregsPrev.f6, uregsHere->f6, cfsi_m->f6_how, cfsi_m->f6_off);
COMPUTE(uregsPrev.f7, uregsHere->f7, cfsi_m->f7_how, cfsi_m->f7_off);
-# elif defined(VGA_mips32) || defined(VGA_mips64)
+# elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
COMPUTE(uregsPrev.pc, uregsHere->pc, cfsi_m->ra_how, cfsi_m->ra_off);
COMPUTE(uregsPrev.sp, uregsHere->sp, cfsi_m->sp_how, cfsi_m->sp_off);
COMPUTE(uregsPrev.fp, uregsHere->fp, cfsi_m->fp_how, cfsi_m->fp_off);
Int f7_off;
}
DiCfSI_m;
-#elif defined(VGA_mips32) || defined(VGA_mips64)
+#elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
typedef
struct {
UChar cfa_how; /* a CFIC_ value */
# define FP_REG 11 // sometimes s390 has a frame pointer in r11
# define SP_REG 15 // stack is always r15
# define RA_REG_DEFAULT 14 // the return address is in r14
-#elif defined(VGP_mips32_linux)
+#elif defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
# define FP_REG 30
# define SP_REG 29
# define RA_REG_DEFAULT 31
might exist, for Neon/VFP-v3. */
#if defined(VGP_ppc32_linux) || defined(VGP_ppc64be_linux) \
|| defined(VGP_ppc64le_linux) || defined(VGP_mips32_linux) \
- || defined(VGP_mips64_linux)
+ || defined(VGP_nanomips_linux) || defined(VGP_mips64_linux)
# define N_CFI_REGS 72
#elif defined(VGP_arm_linux)
# define N_CFI_REGS 320
if (ctxs->cfa_is_regoff && ctxs->cfa_reg == SP_REG) {
si_m->cfa_off = ctxs->cfa_off;
# if defined(VGA_x86) || defined(VGA_amd64) || defined(VGA_s390x) \
- || defined(VGA_mips32) || defined(VGA_mips64)
+ || defined(VGA_mips32) || defined(VGA_nanomips) || defined(VGA_mips64)
si_m->cfa_how = CFIC_IA_SPREL;
# elif defined(VGA_arm)
si_m->cfa_how = CFIC_ARM_R13REL;
if (ctxs->cfa_is_regoff && ctxs->cfa_reg == FP_REG) {
si_m->cfa_off = ctxs->cfa_off;
# if defined(VGA_x86) || defined(VGA_amd64) || defined(VGA_s390x) \
- || defined(VGA_mips32) || defined(VGA_mips64)
+ || defined(VGA_mips32) || defined(VGA_nanomips) || defined(VGA_mips64)
si_m->cfa_how = CFIC_IA_BPREL;
# elif defined(VGA_arm)
si_m->cfa_how = CFIC_ARM_R12REL;
return True;
-# elif defined(VGA_mips32) || defined(VGA_mips64)
+# elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
/* --- entire tail of this fn specialised for mips --- */
return ML_(CfiExpr_CfiReg)( dstxa, Creg_S390_FP );
if (dwreg == srcuc->ra_reg)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_S390_IA );
-# elif defined(VGA_mips32) || defined(VGA_mips64)
+# elif defined(VGA_mips32) || defined(VGA_mips64) \
+ || defined(VGA_nanomips)
if (dwreg == SP_REG)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_IA_SP );
if (dwreg == FP_REG)
while (tries > 0) {
SysRes res;
-#if defined(VGP_arm64_linux)
+#if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
(UWord)path, (UWord)buf, bufsiz);
#elif defined(VGO_linux) || defined(VGO_darwin)
# if defined(VGP_x86_linux) || defined(VGP_amd64_linux) \
|| defined(VGP_arm_linux) || defined (VGP_s390x_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux) \
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
|| defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
/* Accept .plt where mapped as rx (code) */
if (0 == VG_(strcmp)(name, ".plt")) {
SHOW_HOW(si_m->f6_how, si_m->f6_off);
VG_(printf)(" F7=");
SHOW_HOW(si_m->f7_how, si_m->f7_off);
-# elif defined(VGA_mips32) || defined(VGA_mips64)
+# elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
VG_(printf)(" SP=");
SHOW_HOW(si_m->sp_how, si_m->sp_off);
VG_(printf)(" FP=");
return v0;
}
+#elif defined(VGP_nanomips_linux)
+
+__attribute__((noinline))
+static UInt local_sys_write_stderr ( const HChar* buf, Int n )
+{
+ register RegWord t4 asm("2");
+ register RegWord a0 asm("4");
+ register RegWord a1 asm("5");
+ register RegWord a2 asm("6");
+ t4 = __NR_write;
+ a2 = n;
+ a1 = (RegWord)(Addr)buf;
+ a0 = 2; // stderr
+ __asm__ volatile (
+ "syscall[32] \n\t"
+ : "+d" (t4), "+d" (a0), "+d" (a1), "+d" (a2)
+ :
+ : "$at", "$t5", "$a3", "$a4", "$a5", "$a6", "$a7", "$t0", "$t1", "$t2",
+ "$t3", "$t8", "$t9"
+ );
+ return a0;
+}
+
+__attribute__((noinline))
+static UInt local_sys_getpid ( void )
+{
+ register RegWord t4 asm("2");
+ register RegWord a0 asm("4");
+ t4 = __NR_getpid;
+ __asm__ volatile (
+ "syscall[32] \n\t"
+ : "+d" (t4), "=d" (a0)
+ :
+ : "$at", "$t5", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$t0",
+ "$t1", "$t2", "$t3", "$t8", "$t9"
+ );
+ return a0;
+}
+
#elif defined(VGP_x86_solaris)
static UInt local_sys_write_stderr ( const HChar* buf, Int n )
{
--- /dev/null
+/*--------------------------------------------------------------------*/
+/*--- The core dispatch loop, for jumping to a code address. ---*/
+/*--- dispatch-nanomips-linux.S ---*/
+/*--------------------------------------------------------------------*/
+
+# This file is part of Valgrind, a dynamic binary instrumentation
+# framework.
+
+# Copyright (C) 2017-2018 RT-RK
+# mips-valgrind@rt-rk.com
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2 of the
+# License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+# 02111-1307, USA.
+
+# The GNU General Public License is contained in the file COPYING.
+
+#include "pub_core_basics_asm.h"
+
+#if defined(VGP_nanomips_linux)
+
+#include "pub_core_dispatch_asm.h"
+#include "pub_core_transtab_asm.h"
+#include "libvex_guest_offsets.h" /* for OFFSET_mips_PC */
+
+# Signature:
+# void VG_(disp_run_translations)( UWord* two_words,
+# void* guest_state,
+# Addr host_addr);
+
+# The dispatch loop. VG_(disp_run_translations) is used to run all
+# translations, including no-redir ones.
+
+.text
+.globl VG_(disp_run_translations)
+VG_(disp_run_translations):
+# a0 holds two_words
+# a1 holds guest_state
+# a2 holds host_addr
+ save 32, $s0-$s7
+ save 32, $fp-$ra
+ sw $gp, 20($sp)
+ sw $a0, 16($sp)
+
+# Load address of guest_state into guest state register ($s7)
+ move $s7, $a1
+
+# And jump into the code cache. Chained translations in
+# the code cache run, until for whatever reason, they can't
+# continue. When that happens, the translation in question
+# will jump (or call) to one of the continuation points
+# VG_(cp_...) below.
+
+ jrc $a2
+
+# * Postamble and exit:
+ postamble:
+# At this point, $t4 and $t5 contain two
+# words to be returned to the caller. $t4
+# holds a TRC value, and $t5 optionally may
+# hold another word (for CHAIN_ME exits, the
+# address of the place to patch.)
+
+# Restore $a0 from stack; holds address of two_words
+ lw $a0, 16($sp)
+ sw $t4, 0($a0) # Store $t4 to two_words[0]
+ sw $t5, 4($a0) # Store $t5 to two_words[1]
+
+ lw $gp, 20($sp)
+ restore 32, $fp-$ra
+ restore 32, $s0-$s7
+
+ jrc $ra
+
+# * Continuation points:
+
+.global VG_(disp_cp_chain_me_to_slowEP)
+VG_(disp_cp_chain_me_to_slowEP):
+# We got called. The return address indicates
+# where the patching needs to happen. Collect
+# the return address and, exit back to C land,
+# handing the caller the pair (Chain_me_S, RA) */
+ li $t4, VG_TRC_CHAIN_ME_TO_SLOW_EP
+# 8 = mkLoadImm32_EXACTLY2
+# 4 = jalrc $9
+ addiu $t5, $ra, -12
+ bc postamble
+
+.global VG_(disp_cp_chain_me_to_fastEP)
+VG_(disp_cp_chain_me_to_fastEP):
+# We got called. The return address indicates
+# where the patching needs to happen. Collect
+# the return address and, exit back to C land,
+# handing the caller the pair (Chain_me_S, RA) */
+ li $t4, VG_TRC_CHAIN_ME_TO_FAST_EP
+# 8 = mkLoadImm32_EXACTLY2
+# 4 = jalrc $9
+ addiu $t5, $ra, -12
+ bc postamble
+
+.global VG_(disp_cp_xindir)
+VG_(disp_cp_xindir):
+# /* Where are we going? */
+ lw $a7, OFFSET_mips32_PC($s7)
+ lw $t1, VG_(stats__n_xIndirs_32)
+ addiu $t1, $t1, 1
+ sw $t1, VG_(stats__n_xIndirs_32)
+
+# try a fast lookup in the translation cache
+# t2 = VG_TT_FAST_HASH(addr) * sizeof(ULong*)
+# = (t2 >> 2 & VG_TT_FAST_MASK) << 3
+ move $t2, $a7
+ li $t0, VG_TT_FAST_MASK
+ srl $t2, $t2, 2
+ and $t2, $t2, $t0
+ sll $t2, $t2, 3
+
+# t1 = (addr of VG_(tt_fast)) + t2
+ la $t1, VG_(tt_fast)
+ addu $t1, $t1, $t2
+
+# t9 = VG_(tt_fast)[hash] :: ULong*
+ lw $t0, 0($t1)
+ addiu $t1, $t1, 4
+ lw $t9, 0($t1)
+
+# little-endian, so comparing 1st 32bit word
+ bnec $t0, $a7, fast_lookup_failed
+ jrc $t9
+
+ fast_lookup_failed:
+# %PC is up to date */
+# back out decrement of the dispatch counter */
+# hold dispatch_ctr in t0 (r8) */
+ lw $t1, VG_(stats__n_xIndirs_32)
+ addiu $t1, $t1, 0x1
+ sw $t1, VG_(stats__n_xIndirs_32)
+ li $t4, VG_TRC_INNER_FASTMISS
+ move $t5, $zero
+ bc postamble
+
+.global VG_(disp_cp_xassisted)
+VG_(disp_cp_xassisted):
+# guest-state-pointer contains the TRC. Put the value into the
+# return register
+ move $t4, $s7
+ move $t5, $zero
+ bc postamble
+
+.global VG_(disp_cp_evcheck_fail)
+VG_(disp_cp_evcheck_fail):
+ li $t4, VG_TRC_INNER_COUNTERZERO
+ move $t5, $zero
+ bc postamble
+
+.size VG_(disp_run_translations), . - VG_(disp_run_translations)
+
+#endif
+
+# Let the linker know we don't need an executable stack
+MARK_STACK_NO_EXEC
+
+/*--------------------------------------------------------------------*/
+/*--- end dispatch-nanomips-linux.S ---*/
+/*--------------------------------------------------------------------*/
mips32_init_architecture(&the_low_target);
#elif defined(VGA_mips64)
mips64_init_architecture(&the_low_target);
+#elif defined(VGA_nanomips)
+ nanomips_init_architecture(&the_low_target);
#else
#error "architecture missing in target.c valgrind_initialize_target"
#endif
--- /dev/null
+/* Low level interface to valgrind, for the remote server for GDB integrated
+ in valgrind.
+ Copyright (C) 2012
+ Free Software Foundation, Inc.
+
+ This file is part of VALGRIND.
+ It has been inspired from a file from gdbserver in gdb 6.6.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ Boston, MA 02110-1301, USA. */
+
+#include "server.h"
+#include "target.h"
+#include "regdef.h"
+#include "regcache.h"
+
+#include "pub_core_machine.h"
+#include "pub_core_debuginfo.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_transtab.h"
+#include "pub_core_gdbserver.h"
+
+#include "valgrind_low.h"
+
+#include "libvex_guest_mips32.h"
+
+static struct reg regs[] = {
+ { "r0", 0, 32 },
+ { "r1", 32, 32 },
+ { "r2", 64, 32 },
+ { "r3", 96, 32 },
+ { "r4", 128, 32 },
+ { "r5", 160, 32 },
+ { "r6", 192, 32 },
+ { "r7", 224, 32 },
+ { "r8", 256, 32 },
+ { "r9", 288, 32 },
+ { "r10", 320, 32 },
+ { "r11", 352, 32 },
+ { "r12", 384, 32 },
+ { "r13", 416, 32 },
+ { "r14", 448, 32 },
+ { "r15", 480, 32 },
+ { "r16", 512, 32 },
+ { "r17", 544, 32 },
+ { "r18", 576, 32 },
+ { "r19", 608, 32 },
+ { "r20", 640, 32 },
+ { "r21", 672, 32 },
+ { "r22", 704, 32 },
+ { "r23", 736, 32 },
+ { "r24", 768, 32 },
+ { "r25", 800, 32 },
+ { "r26", 832, 32 },
+ { "r27", 864, 32 },
+ { "r28", 896, 32 },
+ { "r29", 928, 32 },
+ { "r30", 960, 32 },
+ { "r31", 992, 32 },
+ { "status", 1024, 32 },
+ { "badvaddr", 1120, 32 },
+ { "cause", 1152, 32 },
+ { "pc", 1184, 32 },
+};
+
+#define num_regs (sizeof (regs) / sizeof (regs[0]))
+
+static const char *expedite_regs[] = { "r29", "pc", 0 };
+
+static
+CORE_ADDR get_pc (void)
+{
+ unsigned long pc;
+
+ collect_register_by_name ("pc", &pc);
+
+ dlog(1, "stop pc is %p\n", (void *) pc);
+ return pc;
+}
+
+static
+void set_pc (CORE_ADDR newpc)
+{
+ Bool mod;
+ supply_register_by_name ("pc", &newpc, &mod);
+ if (mod)
+ dlog(1, "set pc to %p\n", C2v (newpc));
+ else
+ dlog(1, "set pc not changed %p\n", C2v (newpc));
+}
+
+/* These are the fields of 32 bit mips instructions. */
+#define itype_op(x) (x >> 26)
+#define itype_rs(x) ((x >> 21) & 0x1f)
+#define itype_rt(x) ((x >> 16) & 0x1f)
+#define rtype_funct(x) (x & 0x3f)
+
+/* Do a endian load of a 32-bit word, regardless of the
+ endianness of the underlying host. */
+static inline UInt getUInt(UChar * p)
+{
+ UInt w = 0;
+#if defined (_MIPSEL)
+ w = (w << 8) | p[3];
+ w = (w << 8) | p[2];
+ w = (w << 8) | p[1];
+ w = (w << 8) | p[0];
+#elif defined (_MIPSEB)
+ w = (w << 8) | p[0];
+ w = (w << 8) | p[1];
+ w = (w << 8) | p[2];
+ w = (w << 8) | p[3];
+#endif
+ return w;
+}
+
+/* store registers in the guest state (gdbserver_to_valgrind)
+ or fetch register from the guest state (valgrind_to_gdbserver). */
+static
+void transfer_register (ThreadId tid, int abs_regno, void * buf,
+ transfer_direction dir, int size, Bool *mod)
+{
+ ThreadState* tst = VG_(get_ThreadState)(tid);
+ int set = abs_regno / num_regs;
+ int regno = abs_regno % num_regs;
+ *mod = False;
+
+ VexGuestMIPS32State* mips1 = (VexGuestMIPS32State*) get_arch (set, tst);
+
+ switch (regno) {
+ case 0: VG_(transfer) (&mips1->guest_r0, buf, dir, size, mod); break;
+ case 1: VG_(transfer) (&mips1->guest_r1, buf, dir, size, mod); break;
+ case 2: VG_(transfer) (&mips1->guest_r2, buf, dir, size, mod); break;
+ case 3: VG_(transfer) (&mips1->guest_r3, buf, dir, size, mod); break;
+ case 4: VG_(transfer) (&mips1->guest_r4, buf, dir, size, mod); break;
+ case 5: VG_(transfer) (&mips1->guest_r5, buf, dir, size, mod); break;
+ case 6: VG_(transfer) (&mips1->guest_r6, buf, dir, size, mod); break;
+ case 7: VG_(transfer) (&mips1->guest_r7, buf, dir, size, mod); break;
+ case 8: VG_(transfer) (&mips1->guest_r8, buf, dir, size, mod); break;
+ case 9: VG_(transfer) (&mips1->guest_r9, buf, dir, size, mod); break;
+ case 10: VG_(transfer) (&mips1->guest_r10, buf, dir, size, mod); break;
+ case 11: VG_(transfer) (&mips1->guest_r11, buf, dir, size, mod); break;
+ case 12: VG_(transfer) (&mips1->guest_r12, buf, dir, size, mod); break;
+ case 13: VG_(transfer) (&mips1->guest_r13, buf, dir, size, mod); break;
+ case 14: VG_(transfer) (&mips1->guest_r14, buf, dir, size, mod); break;
+ case 15: VG_(transfer) (&mips1->guest_r15, buf, dir, size, mod); break;
+ case 16: VG_(transfer) (&mips1->guest_r16, buf, dir, size, mod); break;
+ case 17: VG_(transfer) (&mips1->guest_r17, buf, dir, size, mod); break;
+ case 18: VG_(transfer) (&mips1->guest_r18, buf, dir, size, mod); break;
+ case 19: VG_(transfer) (&mips1->guest_r19, buf, dir, size, mod); break;
+ case 20: VG_(transfer) (&mips1->guest_r20, buf, dir, size, mod); break;
+ case 21: VG_(transfer) (&mips1->guest_r21, buf, dir, size, mod); break;
+ case 22: VG_(transfer) (&mips1->guest_r22, buf, dir, size, mod); break;
+ case 23: VG_(transfer) (&mips1->guest_r23, buf, dir, size, mod); break;
+ case 24: VG_(transfer) (&mips1->guest_r24, buf, dir, size, mod); break;
+ case 25: VG_(transfer) (&mips1->guest_r25, buf, dir, size, mod); break;
+ case 26: VG_(transfer) (&mips1->guest_r26, buf, dir, size, mod); break;
+ case 27: VG_(transfer) (&mips1->guest_r27, buf, dir, size, mod); break;
+ case 28: VG_(transfer) (&mips1->guest_r28, buf, dir, size, mod); break;
+ case 29: VG_(transfer) (&mips1->guest_r29, buf, dir, size, mod); break;
+ case 30: VG_(transfer) (&mips1->guest_r30, buf, dir, size, mod); break;
+ case 31: VG_(transfer) (&mips1->guest_r31, buf, dir, size, mod); break;
+ case 32: *mod = False; break; // GDBTD???? VEX { "status", 1024, 32 },
+ case 33: VG_(transfer) (&mips1->guest_LO, buf, dir, size, mod); break;
+ case 34: VG_(transfer) (&mips1->guest_HI, buf, dir, size, mod); break;
+ case 35: *mod = False; break; // GDBTD???? VEX { "badvaddr", 1120, 32 },
+ case 36: *mod = False; break; // GDBTD???? VEX { "cause", 1152, 32 },
+ case 37: VG_(transfer) (&mips1->guest_PC, buf, dir, size, mod); break;
+ case 72: *mod = False; break; // GDBTD???? VEX{ "restart", 2304, 32 },
+ default: VG_(printf)("regno: %d\n", regno); vg_assert(0);
+ }
+}
+
+static
+const char* target_xml (Bool shadow_mode)
+{
+ if (shadow_mode) {
+ return "mips-linux-valgrind.xml";
+ } else {
+ return "mips-linux.xml";
+ }
+}
+
+static CORE_ADDR** target_get_dtv (ThreadState *tst)
+{
+ VexGuestMIPS32State* mips32 = (VexGuestMIPS32State*)&tst->arch.vex;
+ // Top of nanoMIPS tcbhead structure is located 0x7000 bytes before the value
+ // of ULR. Dtv is the first of two pointers in tcbhead structure.
+ return (CORE_ADDR**)((CORE_ADDR)mips32->guest_ULR
+ - 0x7000 - 2 * sizeof(CORE_ADDR));
+}
+
+static struct valgrind_target_ops low_target = {
+ num_regs,
+ regs,
+ 29, //sp = r29, which is register offset 29 in regs
+ transfer_register,
+ get_pc,
+ set_pc,
+ "mips",
+ target_xml,
+ target_get_dtv
+};
+
+void nanomips_init_architecture (struct valgrind_target_ops *target)
+{
+ *target = low_target;
+ set_register_cache (regs, num_regs);
+ gdbserver_expedite_regs = expedite_regs;
+}
extern void s390x_init_architecture (struct valgrind_target_ops *target);
extern void mips32_init_architecture (struct valgrind_target_ops *target);
extern void mips64_init_architecture (struct valgrind_target_ops *target);
+extern void nanomips_init_architecture (struct valgrind_target_ops *target);
#endif
# if !defined(VGP_ppc32_linux) && !defined(VGP_ppc64be_linux) \
&& !defined(VGP_ppc64le_linux) \
- && !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux)
+ && !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux) \
+ && !defined(VGP_nanomips_linux)
case AT_SYSINFO_EHDR: {
/* Trash this, because we don't reproduce it */
const NSegment* ehdrseg = VG_(am_find_nsegment)((Addr)auxv->u.a_ptr);
process startup. */
#define PRECISE_GUEST_REG_DEFINEDNESS_AT_STARTUP 1
-# elif defined(VGP_mips32_linux)
+# elif defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
vg_assert(0 == sizeof(VexGuestMIPS32State) % LibVEX_GUEST_STATE_ALIGN);
/* Zero out the initial state, and set up the simulated FPU in a
sane way. */
arch->vex.guest_PC = iifii.initial_client_IP;
arch->vex.guest_r31 = iifii.initial_client_SP;
+# if !defined(VGP_nanomips_linux)
if (iifii.arch_elf_state.overall_fp_mode == VKI_FP_FR1) {
arch->vex.guest_CP0_status |= MIPS_CP0_STATUS_FR;
}
-# elif defined(VGP_mips64_linux)
+# endif
+# elif defined(VGP_mips64_linux)
vg_assert(0 == sizeof(VexGuestMIPS64State) % LibVEX_GUEST_STATE_ALIGN);
/* Zero out the initial state, and set up the simulated FPU in a
sane way. */
(srP)->misc.MIPS64.r31 = (ULong)ra; \
(srP)->misc.MIPS64.r28 = (ULong)gp; \
}
+#elif defined(VGP_nanomips_linux)
+# define GET_STARTREGS(srP) \
+ { UInt pc=0, sp=0, fp=0, ra=0, gp=0; \
+ asm("addiupc[32] %0, -4 \n\t" \
+ "move %1, $sp \n\t" \
+ "move %2, $fp \n\t" \
+ "move %3, $ra \n\t" \
+ "move %4, $gp \n\t" \
+ : "=r" (pc), \
+ "=r" (sp), \
+ "=r" (fp), \
+ "=r" (ra), \
+ "=r" (gp) \
+ ); \
+ (srP)->r_pc = (UInt)pc; \
+ (srP)->r_sp = (UInt)sp; \
+ (srP)->misc.MIPS32.r30 = (UInt)fp; \
+ (srP)->misc.MIPS32.r31 = (UInt)ra; \
+ (srP)->misc.MIPS32.r28 = (UInt)gp; \
+ }
#else
# error Unknown platform
#endif
SysRes VG_(mknod) ( const HChar* pathname, Int mode, UWord dev )
{
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
/* ARM64 wants to use __NR_mknodat rather than __NR_mknod. */
SysRes res = VG_(do_syscall4)(__NR_mknodat,
VKI_AT_FDCWD, (UWord)pathname, mode, dev);
SysRes VG_(open) ( const HChar* pathname, Int flags, Int mode )
{
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
/* ARM64 wants to use __NR_openat rather than __NR_open. */
SysRes res = VG_(do_syscall4)(__NR_openat,
VKI_AT_FDCWD, (UWord)pathname, flags, mode);
} else {
return -1;
}
-# elif defined(VGP_arm64_linux)
+# elif defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
SysRes res = VG_(do_syscall2)(__NR_pipe2, (UWord)fd, 0);
return sr_isError(res) ? -1 : 0;
# elif defined(VGO_linux)
SysRes VG_(dup2) ( Int oldfd, Int newfd )
{
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
/* We only have dup3, that means we have to mimic dup2.
The only real difference is when oldfd == newfd.
dup3 always returns an error, but dup2 returns only an
Int VG_(fcntl) ( Int fd, Int cmd, Addr arg )
{
# if defined(VGO_linux) || defined(VGO_solaris)
+# if defined(VGP_nanomips_linux)
+ SysRes res = VG_(do_syscall3)(__NR_fcntl64, fd, cmd, arg);
+# else
SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
+# endif
# elif defined(VGO_darwin)
SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg);
# else
# if defined(VGO_solaris) || defined(VGP_arm64_linux)
SysRes res = VG_(do_syscall4)(__NR_renameat, VKI_AT_FDCWD, (UWord)old_name,
VKI_AT_FDCWD, (UWord)new_name);
+# elif defined(VGP_nanomips_linux)
+ SysRes res = VG_(do_syscall5)(__NR_renameat2, VKI_AT_FDCWD, (UWord)old_name,
+ VKI_AT_FDCWD, (UWord)new_name, 0);
+
# elif defined(VGO_linux) || defined(VGO_darwin)
SysRes res = VG_(do_syscall2)(__NR_rename, (UWord)old_name, (UWord)new_name);
# else
Int VG_(unlink) ( const HChar* file_name )
{
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
SysRes res = VG_(do_syscall2)(__NR_unlinkat, VKI_AT_FDCWD,
(UWord)file_name);
# elif defined(VGO_linux) || defined(VGO_darwin)
SysRes VG_(poll) (struct vki_pollfd *fds, Int nfds, Int timeout)
{
SysRes res;
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
/* ARM64 wants to use __NR_ppoll rather than __NR_poll. */
struct vki_timespec timeout_ts;
if (timeout >= 0) {
{
SysRes res;
/* res = readlink( path, buf, bufsiz ); */
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
(UWord)path, (UWord)buf, bufsiz);
# elif defined(VGO_linux) || defined(VGO_darwin)
UWord w = (irusr ? VKI_R_OK : 0)
| (iwusr ? VKI_W_OK : 0)
| (ixusr ? VKI_X_OK : 0);
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
SysRes res = VG_(do_syscall3)(__NR_faccessat, VKI_AT_FDCWD, (UWord)path, w);
# elif defined(VGO_linux) || defined(VGO_darwin)
SysRes res = VG_(do_syscall2)(__NR_access, (UWord)path, w);
0, // Padding needed on PPC32
0, offset); // Big endian long long
return res;
-# elif defined(VGP_mips32_linux) && (VKI_LITTLE_ENDIAN)
+# elif (defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)) \
+ && (VKI_LITTLE_ENDIAN)
vg_assert(sizeof(OffT) == 4);
res = VG_(do_syscall6)(__NR_pread64, fd, (UWord)buf, count,
0, offset, 0);
return res;
-# elif defined(VGP_mips32_linux) && (VKI_BIG_ENDIAN)
+# elif (defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)) \
+ && (VKI_BIG_ENDIAN)
vg_assert(sizeof(OffT) == 4);
res = VG_(do_syscall6)(__NR_pread64, fd, (UWord)buf, count,
0, 0, offset);
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
SysRes res;
res = VG_(do_syscall3)(__NR_socket, domain, type, protocol );
return sr_isError(res) ? -1 : sr_Res(res);
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
SysRes res;
res = VG_(do_syscall3)(__NR_connect, sockfd, (UWord)serv_addr, addrlen);
return sr_isError(res) ? -1 : sr_Res(res);
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
SysRes res;
res = VG_(do_syscall6)(__NR_sendto, sd, (UWord)msg,
count, VKI_MSG_NOSIGNAL, 0,0);
return sr_isError(res) ? -1 : sr_Res(res);
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
- || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
+ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
+ || defined(VGP_nanomips_linux)
SysRes res;
res = VG_(do_syscall3)( __NR_getsockname,
(UWord)sd, (UWord)name, (UWord)namelen );
return sr_isError(res) ? -1 : sr_Res(res);
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
- || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
+ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
+ || defined(VGP_nanomips_linux)
SysRes res;
res = VG_(do_syscall3)( __NR_getpeername,
(UWord)sd, (UWord)name, (UWord)namelen );
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
SysRes res;
res = VG_(do_syscall5)( __NR_getsockopt,
(UWord)sd, (UWord)level, (UWord)optname,
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
SysRes res;
res = VG_(do_syscall5)( __NR_setsockopt,
(UWord)sd, (UWord)level, (UWord)optname,
res = VG_(do_syscall2)(__NR_ugetrlimit, resource, (UWord)rlim);
# endif
if (sr_isError(res) && sr_Err(res) == VKI_ENOSYS)
+# if defined(VGP_nanomips_linux)
+ {
+ struct vki_rlimit64 new_rlimit;
+ res = VG_(do_syscall4)(__NR_prlimit64, 0, resource, 0, (UWord)&new_rlimit);
+ if (new_rlimit.rlim_cur > 2147483647 || new_rlimit.rlim_max > 2147483647)
+ res = VG_(mk_SysRes_Error)(VKI_ENOSYS);
+ else {
+ rlim->rlim_cur = new_rlimit.rlim_cur;
+ rlim->rlim_max = new_rlimit.rlim_max;
+ }
+ }
+# else
res = VG_(do_syscall2)(__NR_getrlimit, resource, (UWord)rlim);
+# endif
return sr_isError(res) ? -1 : sr_Res(res);
}
{
SysRes res;
/* res = setrlimit( resource, rlim ); */
+# if defined(VGP_nanomips_linux)
+ struct vki_rlimit64 new_rlimit;
+ new_rlimit.rlim_cur = rlim->rlim_cur;
+ new_rlimit.rlim_max = rlim->rlim_max;
+ res = VG_(do_syscall4)(__NR_prlimit64, 0, resource, (UWord)&new_rlimit, 0);
+# else
res = VG_(do_syscall2)(__NR_setrlimit, resource, (UWord)rlim);
+# endif
return sr_isError(res) ? -1 : sr_Res(res);
}
* the /proc/self link is pointing...
*/
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
(UWord)"/proc/self",
(UWord)pid, sizeof(pid));
Int VG_(getpgrp) ( void )
{
/* ASSUMES SYSCALL ALWAYS SUCCEEDS */
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
return sr_Res( VG_(do_syscall1)(__NR_getpgid, 0) );
# elif defined(VGO_linux) || defined(VGO_darwin)
return sr_Res( VG_(do_syscall0)(__NR_getpgrp) );
|| defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
|| defined(VGO_darwin) || defined(VGP_s390x_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_arm64_linux) \
- || defined(VGO_solaris)
+ || defined(VGO_solaris) || defined(VGP_nanomips_linux)
SysRes sres;
sres = VG_(do_syscall2)(__NR_getgroups, size, (Addr)list);
if (sr_isError(sres))
Int VG_(fork) ( void )
{
-# if defined(VGP_arm64_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
SysRes res;
res = VG_(do_syscall5)(__NR_clone, VKI_SIGCHLD,
(UWord)NULL, (UWord)NULL, (UWord)NULL, (UWord)NULL);
".previous \n\t"
);
#endif /* VGP_mips64_linux */
+
+#if defined(VGP_nanomips_linux)
+__asm__(
+".text \n\t"
+".globl VG_MINIMAL_SETJMP; \n\t"
+".set push \n\t"
+".set noreorder \n\t"
+"VG_MINIMAL_SETJMP: \n\t"
+" sw $s0, 0($a0) \n\t"
+" sw $s1, 4($a0) \n\t"
+" sw $s2, 8($a0) \n\t"
+" sw $s3, 12($a0) \n\t"
+" sw $s4, 16($a0) \n\t"
+" sw $s5, 20($a0) \n\t"
+" sw $s6, 24($a0) \n\t"
+" sw $s7, 28($a0) \n\t"
+" sw $gp, 32($a0) \n\t"
+" sw $sp, 36($a0) \n\t"
+" sw $fp, 40($a0) \n\t"
+" sw $ra, 44($a0) \n\t"
+" move $a0, $zero \n\t"
+" jrc $ra \n\t"
+".set pop \n\t"
+".previous \n\t"
+" \n\t"
+".text \n\t"
+".globl VG_MINIMAL_LONGJMP; \n\t"
+".set push \n\t"
+".set noreorder \n\t"
+"VG_MINIMAL_LONGJMP: \n\t"
+" lw $s0, 0($a0) \n\t"
+" lw $s1, 4($a0) \n\t"
+" lw $s2, 8($a0) \n\t"
+" lw $s3, 12($a0) \n\t"
+" lw $s4, 16($a0) \n\t"
+" lw $s5, 20($a0) \n\t"
+" lw $s6, 24($a0) \n\t"
+" lw $s7, 28($a0) \n\t"
+" lw $gp, 32($a0) \n\t"
+" lw $sp, 36($a0) \n\t"
+" lw $fp, 40($a0) \n\t"
+" lw $ra, 44($a0) \n\t"
+" bnezc $a1, 1f \n\t"
+" addiu $a1, $a1, 1 \n\t"
+"1: \n\t"
+" move $a0, $a1 \n\t"
+" jrc $ra \n\t"
+".set pop \n\t"
+".previous \n\t"
+);
+#endif /* VGP_nanomips_linux */
+
/*--------------------------------------------------------------------*/
/*--- end ---*/
/*--------------------------------------------------------------------*/
= VG_(threads)[tid].arch.vex.guest_v6.w64[0];
regs->misc.S390X.r_f7
= VG_(threads)[tid].arch.vex.guest_v7.w64[0];
-# elif defined(VGA_mips32)
+# elif defined(VGA_mips32) || defined(VGP_nanomips_linux)
regs->r_pc = VG_(threads)[tid].arch.vex.guest_PC;
regs->r_sp = VG_(threads)[tid].arch.vex.guest_r29;
regs->misc.MIPS32.r30
(*f)(tid, "r13", vex->guest_r13);
(*f)(tid, "r14", vex->guest_r14);
(*f)(tid, "r15", vex->guest_r15);
-#elif defined(VGA_mips32) || defined(VGA_mips64)
+#elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGP_nanomips_linux)
(*f)(tid, "r0" , vex->guest_r0 );
(*f)(tid, "r1" , vex->guest_r1 );
(*f)(tid, "r2" , vex->guest_r2 );
/* For hwcaps detection on ppc32/64, s390x, and arm we'll need to do SIGILL
testing, so we need a VG_MINIMAL_JMP_BUF. */
#if defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le) \
- || defined(VGA_arm) || defined(VGA_s390x) || defined(VGA_mips32) || defined(VGA_mips64)
+ || defined(VGA_arm) || defined(VGA_s390x) || defined(VGA_mips32) \
+ || defined(VGA_mips64)
#include "pub_core_libcsetjmp.h"
static VG_MINIMAL_JMP_BUF(env_unsup_insn);
static void handler_unsup_insn ( Int x ) {
return True;
}
+#elif defined(VGP_nanomips_linux)
+ {
+ va = VexArchNANOMIPS;
+ vai.hwcaps = 0;
+
+# if defined(VKI_LITTLE_ENDIAN)
+ vai.endness = VexEndnessLE;
+# elif defined(VKI_BIG_ENDIAN)
+ vai.endness = VexEndnessBE;
+# else
+ vai.endness = VexEndness_INVALID;
+# endif
+
+ VG_(debugLog)(1, "machine", "hwcaps = 0x%x\n", vai.hwcaps);
+
+ VG_(machine_get_cache_info)(&vai);
+
+ return True;
+ }
#else
# error "Unknown arch"
#endif
/* ARM64 always has Neon, AFAICS. */
return 16;
-# elif defined(VGA_mips32)
+# elif defined(VGA_mips32) || defined(VGP_nanomips_linux)
/* The guest state implies 4, but that can't really be true, can
it? */
return 8;
|| defined(VGP_ppc32_linux) || defined(VGP_ppc64le_linux) \
|| defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
|| defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
- || defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
+ || defined(VGP_x86_solaris) || defined(VGP_amd64_solaris) \
+ || defined(VGP_nanomips_linux)
return f;
# elif defined(VGP_ppc64be_linux)
/* ppc64-linux uses the AIX scheme, in which f is a pointer to a
sizeof(VG_(threads)[tid].arch.vex.guest_GPR12));
# endif
/* mips-linux note: we need to set t9 */
-# if defined(VGP_mips32_linux)
+# if defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
VG_(threads)[tid].arch.vex.guest_r25 = freeres_wrapper;
VG_TRACK(post_reg_write, Vg_CoreClientReq, tid,
offsetof(VexGuestMIPS32State, guest_r25),
VG_TRACK(post_reg_write, Vg_CoreClientReq, tid,
offsetof(VexGuestARM64State, guest_X0),
sizeof(VG_(threads)[tid].arch.vex.guest_X0));
-# elif defined(VGA_mips32)
+# elif defined(VGA_mips32) || defined(VGA_nanomips)
VG_(threads)[tid].arch.vex.guest_r4 = to_run;
VG_TRACK(post_reg_write, Vg_CoreClientReq, tid,
offsetof(VexGuestMIPS32State, guest_r4),
"\tnop\n"
".previous\n"
);
+#elif defined(VGP_nanomips_linux)
+ asm(
+".text \n\t"
+".globl __start \n\t"
+".type __start,@function \n\t"
+"__start: \n\t"
+ ".set push \n\t"
+ ".set noreorder \n\t"
+ "li $t1, vgPlain_interim_stack \n\t"
+ "li $t0, "VG_STRINGIFY(VG_STACK_GUARD_SZB)" \n\t"
+ "addu $t1, $t1, $t0 \n\t"
+ "li $t0, "VG_STRINGIFY(VG_DEFAULT_STACK_ACTIVE_SZB)"\n\t"
+ "addu $t1, $t1, $t0 \n\t"
+ "li $t0, 0xFFFFFF00 \n\t"
+ "and $t1, $t1, $t0 \n\t"
+ "move $a0, $sp \n\t"
+ "move $sp, $t1 \n\t"
+ "li $t0, _start_in_C_linux \n\t"
+ "jrc $t0 \n\t"
+ "break \n\t"
+ ".set pop \n\t"
+".previous \n\t"
+);
#else
# error "Unknown linux platform"
#endif
# if defined(VGP_ppc32_linux) || defined(VGP_ppc64be_linux) \
|| defined(VGP_ppc64le_linux) || defined(VGP_arm64_linux) \
- || defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+ || defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined(VGP_nanomips_linux)
{
/* ppc32/ppc64, arm64, mips32/64 can be configured with different
page sizes. Determine this early. This is an ugly hack and really
#if defined(VGPV_arm_linux_android) \
|| defined(VGPV_x86_linux_android) \
|| defined(VGPV_mips32_linux_android) \
- || defined(VGPV_arm64_linux_android)
+ || defined(VGPV_arm64_linux_android) \
+ || defined(VGP_nanomips_linux)
VgVgdb VG_(clo_vgdb) = Vg_VgdbNo; // currently disabled on Android
#else
VgVgdb VG_(clo_vgdb) = Vg_VgdbYes;
VgSmc VG_(clo_smc_check) = Vg_SmcAllNonFile;
#elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le) \
|| defined(VGA_arm) || defined(VGA_arm64) \
- || defined(VGA_mips32) || defined(VGA_mips64)
+ || defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
VgSmc VG_(clo_smc_check) = Vg_SmcStack;
#else
# error "Unknown arch"
# endif
}
+# elif defined(VGP_nanomips_linux)
+ if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+
+ add_hardwired_spec(
+ "ld.so.1", "strlen",
+ (Addr)&VG_(nanomips_linux_REDIR_FOR_strlen),
+ complain_about_stripped_glibc_ldso
+ );
+ add_hardwired_spec(
+ "ld.so.1", "index",
+ (Addr)&VG_(nanomips_linux_REDIR_FOR_index),
+ complain_about_stripped_glibc_ldso
+ );
+ }
+
# elif defined(VGP_x86_solaris)
/* If we're using memcheck, use these intercepts right from
the start, otherwise ld.so makes a lot of noise. */
/* Invalidate any in-flight LL/SC transactions, in the case that we're
using the fallback LL/SC implementation. See bugs 344524 and 369459. */
-# if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+# if defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined(VGP_nanomips_linux)
tst->arch.vex.guest_LLaddr = (RegWord)(-1);
# elif defined(VGP_arm64_linux)
tst->arch.vex.guest_LLSC_SIZE = 0;
#elif defined (VGA_s390x)
# define VG_CLREQ_ARGS guest_r2
# define VG_CLREQ_RET guest_r3
-#elif defined(VGA_mips32) || defined(VGA_mips64)
+#elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
# define VG_CLREQ_ARGS guest_r12
# define VG_CLREQ_RET guest_r11
#else
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- Create/destroy signal delivery frames. ---*/
+/*--- sigframe-nanomips-linux.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2017-2018 RT-RK
+ mips-valgrind@rt-rk.com
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_nanomips_linux)
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_machine.h"
+#include "pub_core_options.h"
+#include "pub_core_sigframe.h"
+#include "pub_core_signals.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_trampoline.h"
+#include "priv_sigframe.h"
+
+struct vg_sig_private {
+ UInt magicPI;
+ UInt sigNo_private;
+ VexGuestMIPS32State vex_shadow1;
+ VexGuestMIPS32State vex_shadow2;
+};
+
+struct rt_sigframe {
+ UInt rs_ass[4]; /* argument save space */
+ UInt rs_pad[2]; /* Was: signal trampoline */
+ vki_siginfo_t rs_info;
+ struct vki_ucontext rs_uc;
+ struct vg_sig_private priv;
+};
+
+static
+void setup_sigcontext2 ( ThreadState* tst, struct vki_sigcontext **sc1,
+ const vki_siginfo_t *si) {
+ struct vki_sigcontext *sc = *sc1;
+ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal frame mcontext",
+ (Addr)sc, sizeof(unsigned long long) * 34 );
+ sc->sc_regs[1] = tst->arch.vex.guest_r1;
+ sc->sc_regs[2] = tst->arch.vex.guest_r2;
+ sc->sc_regs[3] = tst->arch.vex.guest_r3;
+ sc->sc_regs[4] = tst->arch.vex.guest_r4;
+ sc->sc_regs[5] = tst->arch.vex.guest_r5;
+ sc->sc_regs[6] = tst->arch.vex.guest_r6;
+ sc->sc_regs[7] = tst->arch.vex.guest_r7;
+ sc->sc_regs[8] = tst->arch.vex.guest_r8;
+ sc->sc_regs[9] = tst->arch.vex.guest_r9;
+ sc->sc_regs[10] = tst->arch.vex.guest_r10;
+ sc->sc_regs[11] = tst->arch.vex.guest_r11;
+ sc->sc_regs[12] = tst->arch.vex.guest_r12;
+ sc->sc_regs[13] = tst->arch.vex.guest_r13;
+ sc->sc_regs[14] = tst->arch.vex.guest_r14;
+ sc->sc_regs[15] = tst->arch.vex.guest_r15;
+ sc->sc_regs[16] = tst->arch.vex.guest_r16;
+ sc->sc_regs[17] = tst->arch.vex.guest_r17;
+ sc->sc_regs[18] = tst->arch.vex.guest_r18;
+ sc->sc_regs[19] = tst->arch.vex.guest_r19;
+ sc->sc_regs[20] = tst->arch.vex.guest_r20;
+ sc->sc_regs[21] = tst->arch.vex.guest_r21;
+ sc->sc_regs[22] = tst->arch.vex.guest_r22;
+ sc->sc_regs[23] = tst->arch.vex.guest_r23;
+ sc->sc_regs[24] = tst->arch.vex.guest_r24;
+ sc->sc_regs[25] = tst->arch.vex.guest_r25;
+ sc->sc_regs[26] = tst->arch.vex.guest_r26;
+ sc->sc_regs[27] = tst->arch.vex.guest_r27;
+ sc->sc_regs[28] = tst->arch.vex.guest_r28;
+ sc->sc_regs[29] = tst->arch.vex.guest_r29;
+ sc->sc_regs[30] = tst->arch.vex.guest_r30;
+ sc->sc_regs[31] = tst->arch.vex.guest_r31;
+ sc->sc_pc = tst->arch.vex.guest_PC;
+}
+
+/* EXPORTED */
+void VG_(sigframe_create)( ThreadId tid,
+ Bool on_altstack,
+ Addr sp_top_of_frame,
+ const vki_siginfo_t *siginfo,
+ const struct vki_ucontext *siguc,
+ void *handler,
+ UInt flags,
+ const vki_sigset_t *mask,
+ void *restorer ) {
+ Addr sp;
+ ThreadState* tst = VG_(get_ThreadState)(tid);
+ Int sigNo = siginfo->si_signo;
+ struct vg_sig_private *priv;
+ tst = VG_(get_ThreadState)(tid);
+ sp = sp_top_of_frame - sizeof(struct rt_sigframe);
+ sp &= ~0xF;
+
+ if (! ML_(sf_maybe_extend_stack)(tst, sp, sp_top_of_frame - sp, flags))
+ return;
+
+ vg_assert(VG_IS_16_ALIGNED(sp));
+ struct rt_sigframe *frame = (struct rt_sigframe *) sp;
+ struct vki_ucontext *ucp = &frame->rs_uc;
+
+ if (VG_(clo_trace_signals))
+ VG_(printf)("rt_sigframe\n");
+
+ /* Create siginfo. */
+ VG_TRACK( pre_mem_write, Vg_CoreSignal, tid, "signal frame siginfo",
+ (Addr)&frame->rs_info, sizeof(frame->rs_info) );
+ VG_(memcpy)(&frame->rs_info, siginfo, sizeof(*siginfo));
+ VG_TRACK( post_mem_write, Vg_CoreSignal, tid,
+ (Addr)&frame->rs_info, sizeof(frame->rs_info) );
+ /* Create the ucontext. */
+ VG_TRACK( pre_mem_write, Vg_CoreSignal, tid, "signal frame ucontext",
+ (Addr)ucp, offsetof(struct vki_ucontext, uc_mcontext) );
+ ucp->uc_flags = 0;
+ ucp->uc_link = 0;
+ ucp->uc_stack = tst->altstack;
+ VG_TRACK( post_mem_write, Vg_CoreSignal, tid, (Addr)ucp,
+ offsetof(struct vki_ucontext, uc_mcontext) );
+ struct vki_sigcontext *scp = &(frame->rs_uc.uc_mcontext);
+ setup_sigcontext2(tst, &(scp), siginfo);
+ ucp->uc_sigmask = tst->sig_mask;
+ priv = &frame->priv;
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe.
+ */
+ tst->arch.vex.guest_r4 = siginfo->si_signo;
+ tst->arch.vex.guest_r5 = (Addr) &frame->rs_info;
+ tst->arch.vex.guest_r6 = (Addr) &frame->rs_uc;
+ tst->arch.vex.guest_r29 = (Addr) frame;
+ tst->arch.vex.guest_r25 = (Addr) handler;
+
+ if (flags & VKI_SA_RESTORER) {
+ tst->arch.vex.guest_r31 = (Addr) restorer;
+ } else {
+ tst->arch.vex.guest_r31 = (Addr)&VG_(nanomips_linux_SUBST_FOR_rt_sigreturn);
+ }
+
+ priv->magicPI = 0x31415927;
+ priv->sigNo_private = sigNo;
+ priv->vex_shadow1 = tst->arch.vex_shadow1;
+ priv->vex_shadow2 = tst->arch.vex_shadow2;
+ /* Set the thread so it will next run the handler. */
+ /* tst->m_sp = sp; also notify the tool we've updated SP */
+ VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));
+
+ if (VG_(clo_trace_signals))
+ VG_(printf)("handler = %p\n", handler);
+
+ tst->arch.vex.guest_PC = (Addr) handler;
+ /* This thread needs to be marked runnable, but we leave that the
+ caller to do. */
+}
+
+void VG_(sigframe_destroy)( ThreadId tid, Bool isRT ) {
+ ThreadState *tst;
+ struct rt_sigframe *frame;
+ struct vg_sig_private *priv1;
+ struct vki_ucontext *ucp;
+ struct vki_sigcontext *mc;
+
+ vg_assert(isRT);
+ vg_assert(VG_(is_valid_tid)(tid));
+ tst = VG_(get_ThreadState)(tid);
+ frame = (struct rt_sigframe *)(Addr)tst->arch.vex.guest_r29;
+ priv1 = &frame->priv;
+ ucp = &frame->rs_uc;
+ mc = &ucp->uc_mcontext;
+ vg_assert(priv1->magicPI == 0x31415927);
+ tst->arch.vex.guest_r1 = mc->sc_regs[1];
+ tst->arch.vex.guest_r2 = mc->sc_regs[2];
+ tst->arch.vex.guest_r3 = mc->sc_regs[3];
+ tst->arch.vex.guest_r4 = mc->sc_regs[4];
+ tst->arch.vex.guest_r5 = mc->sc_regs[5];
+ tst->arch.vex.guest_r6 = mc->sc_regs[6];
+ tst->arch.vex.guest_r7 = mc->sc_regs[7];
+ tst->arch.vex.guest_r8 = mc->sc_regs[8];
+ tst->arch.vex.guest_r9 = mc->sc_regs[9];
+ tst->arch.vex.guest_r10 = mc->sc_regs[10];
+ tst->arch.vex.guest_r11 = mc->sc_regs[11];
+ tst->arch.vex.guest_r12 = mc->sc_regs[12];
+ tst->arch.vex.guest_r13 = mc->sc_regs[13];
+ tst->arch.vex.guest_r14 = mc->sc_regs[14];
+ tst->arch.vex.guest_r15 = mc->sc_regs[15];
+ tst->arch.vex.guest_r16 = mc->sc_regs[16];
+ tst->arch.vex.guest_r17 = mc->sc_regs[17];
+ tst->arch.vex.guest_r18 = mc->sc_regs[18];
+ tst->arch.vex.guest_r19 = mc->sc_regs[19];
+ tst->arch.vex.guest_r20 = mc->sc_regs[20];
+ tst->arch.vex.guest_r21 = mc->sc_regs[21];
+ tst->arch.vex.guest_r22 = mc->sc_regs[22];
+ tst->arch.vex.guest_r23 = mc->sc_regs[23];
+ tst->arch.vex.guest_r24 = mc->sc_regs[24];
+ tst->arch.vex.guest_r25 = mc->sc_regs[25];
+ tst->arch.vex.guest_r26 = mc->sc_regs[26];
+ tst->arch.vex.guest_r27 = mc->sc_regs[27];
+ tst->arch.vex.guest_r28 = mc->sc_regs[28];
+ tst->arch.vex.guest_r30 = mc->sc_regs[30];
+ tst->arch.vex.guest_PC = mc->sc_pc;
+ tst->arch.vex.guest_r31 = mc->sc_regs[31];
+ tst->arch.vex.guest_r29 = mc->sc_regs[29];
+ tst->arch.vex_shadow1 = priv1->vex_shadow1;
+ tst->arch.vex_shadow2 = priv1->vex_shadow2;
+ VG_TRACK(die_mem_stack_signal, (Addr)frame, sizeof(*frame));
+
+ if (VG_(clo_trace_signals))
+ VG_(message)( Vg_DebugMsg,
+ "VG_(signal_return) (thread %u): isRT=%d valid magic; EIP=%#x\n",
+ tid, isRT, tst->arch.vex.guest_PC);
+
+ VG_TRACK(post_deliver_signal, tid, priv1->sigNo_private);
+}
+
+#endif
+
+/*--------------------------------------------------------------------*/
+/*--- end sigframe-nanomips-linux.c ---*/
+/*--------------------------------------------------------------------*/
#endif
/* ------------------------ mips 32/64 ------------------------- */
-#if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+#if defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined(VGP_nanomips_linux)
UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
/*OUT*/Addr* ips, UInt max_n_ips,
/*OUT*/Addr* sps, /*OUT*/Addr* fps,
uregs.sp = startRegs->r_sp;
Addr fp_min = uregs.sp - VG_STACK_REDZONE_SZB;
-#if defined(VGP_mips32_linux)
+#if defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
uregs.fp = startRegs->misc.MIPS32.r30;
uregs.ra = startRegs->misc.MIPS32.r31;
#elif defined(VGP_mips64_linux)
Int* child_tid, //stack 16 48
Int* parent_tid, //stack 20 52
void* tls_ptr); //stack 24 56
+extern UInt do_syscall_clone_nanomips_linux ( Word (*fn) (void *), /* a0 - 4 */
+ void* stack, /* a1 - 5 */
+ Int flags, /* a2 - 6 */
+ void* arg, /* a3 - 7 */
+ Int* child_tid, /* a4 - 8 */
+ Int* parent_tid, /* a5 - 9 */
+ void* tls_ptr); /* a6 - 10 */
#endif // __PRIV_SYSWRAP_LINUX_H
/*--------------------------------------------------------------------*/
|| defined(VGP_ppc32_linux) \
|| defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
|| defined(VGP_arm_linux) || defined(VGP_s390x_linux) \
- || defined(VGP_arm64_linux)
+ || defined(VGP_arm64_linux) \
+ || defined(VGP_nanomips_linux)
Int o_arg1;
Int o_arg2;
Int o_arg3;
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- Support for doing system calls. syscall-nanomips-linux.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2017-2018 RT-RK
+ mips-valgrind@rt-rk.com
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#include "pub_core_basics_asm.h"
+
+#if defined(VGP_nanomips_linux)
+
+#include "pub_core_vkiscnums_asm.h"
+#include "libvex_guest_offsets.h"
+
+
+/*----------------------------------------------------------------*/
+/*
+ Perform a syscall for the client. This will run a syscall
+ with the client's specific per-thread signal mask.
+
+ The structure of this function is such that, if the syscall is
+ interrupted by a signal, we can determine exactly what
+ execution state we were in with respect to the execution of
+ the syscall by examining the value of IP in the signal
+ handler. This means that we can always do the appropriate
+ thing to precisely emulate the kernel's signal/syscall
+ interactions.
+
+ The syscall number is taken from the argument, even though it
+ should also be in regs->v0. The syscall result is written
+ back to regs->v0 on completion.
+
+ Returns 0 if the syscall was successfully called (even if the
+ syscall itself failed), or a nonzero error code in the lowest
+ 8 bits if one of the sigprocmasks failed (there's no way to
+ determine which one failed). And there's no obvious way to
+ recover from that either, but nevertheless we want to know.
+
+ VG_(fixup_guest_state_after_syscall_interrupted) does the
+ thread state fixup in the case where we were interrupted by a
+ signal.
+
+ Prototype:
+
+ UWord ML_(do_syscall_for_client_WRK)(
+ Int syscallno, // a0
+ void* guest_state, // a1
+ const vki_sigset_t *sysmask, // a2
+ const vki_sigset_t *postmask, // a3
+ Int nsigwords) // a4
+*/
+/* from vki_arch.h */
+#define VKI_SIG_SETMASK 2
+.globl ML_(do_syscall_for_client_WRK)
+ML_(do_syscall_for_client_WRK):
+
+ save 32, $a0-$a4
+
+1: li $t4, __NR_rt_sigprocmask
+ li $a0, VKI_SIG_SETMASK
+ move $a1, $a2 # sysmask
+ move $a2, $a3 # postmask
+ move $a3, $a4 # nsigwords
+ syscall[32]
+
+ bnezc $a0, 5f # error
+
+ lw $t4, 28($sp) # t4 = syscallno
+ lw $t0, 24($sp) # t0 = &guest_state
+
+ lw $a0, OFFSET_mips32_r4($t0)
+ lw $a1, OFFSET_mips32_r5($t0)
+ lw $a2, OFFSET_mips32_r6($t0)
+ lw $a3, OFFSET_mips32_r7($t0)
+ lw $a4, OFFSET_mips32_r8($t0)
+ lw $a5, OFFSET_mips32_r9($t0)
+ lw $a6, OFFSET_mips32_r10($t0)
+ lw $a7, OFFSET_mips32_r11($t0)
+2: syscall[32]
+
+3: lw $t0, 24($sp) # t0 = &guest_state
+
+ sw $a0, OFFSET_mips32_r4($t0)
+
+4: li $t4, __NR_rt_sigprocmask
+ li $a0, VKI_SIG_SETMASK
+ lw $a1, 16($sp)
+ li $a2, 0
+ lw $a3, 12($sp)
+ syscall[32]
+
+ bnezc $a0, 5f # error
+
+ li $a0, 0
+ bc 6f
+
+5: #error, return 0x8000
+ lui $a0, 0x00008
+
+6: addu $sp, $sp, 32
+ jrc $ra
+
+.section .rodata
+/* export the ranges so that
+ VG_(fixup_guest_state_after_syscall_interrupted) can do the
+ right thing */
+
+.globl ML_(blksys_setup)
+.globl ML_(blksys_restart)
+.globl ML_(blksys_complete)
+.globl ML_(blksys_committed)
+.globl ML_(blksys_finished)
+ML_(blksys_setup): .long 1b
+ML_(blksys_restart): .long 2b
+ML_(blksys_complete): .long 3b
+ML_(blksys_committed): .long 4b
+ML_(blksys_finished): .long 6b
+
+#endif
+
+/* Let the linker know we don't need an executable stack */
+MARK_STACK_NO_EXEC
+
+/*--------------------------------------------------------------------*/
+/*--- end syscall-nanomips-linux.S ---*/
+/*--------------------------------------------------------------------*/
: "r" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
: "cc", "memory" , "v0", "a0"
);
+#elif defined(VGP_nanomips_linux)
+ asm volatile (
+ "sw %1, %0 \n\t" /* set tst->status = VgTs_Empty */
+ "li $t4, %2 \n\t" /* set t4 = __NR_exit */
+ "lw $a0, %3 \n\t" /* set a0 = tst->os_state.exitcode */
+ "syscall[32] \n\t" /* exit(tst->os_state.exitcode) */
+ : "=m" (tst->status)
+ : "r" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
+ : "memory" , "$t4", "$a0"
+ );
#else
# error Unknown platform
#endif
/* High half word64 is syscall return value. Low half is
the entire CR, from which we need to extract CR0.SO. */
res = VG_ (mk_SysRes_mips32_linux) (/*val */ ret, 0, /*errflag */ 0);
+#elif defined(VGP_nanomips_linux)
+ UInt ret = 0;
+ ctst->arch.vex.guest_r2 = 0;
+ ret = do_syscall_clone_nanomips_linux
+ (ML_(start_thread_NORETURN), stack, flags, ctst,
+ child_tidptr, parent_tidptr, NULL);
+ res = VG_ (mk_SysRes_nanomips_linux) (ret, 0);
#else
# error Unknown platform
#endif
#elif defined(VGP_mips64_linux)
ctst->arch.vex.guest_ULR = tlsaddr;
ctst->arch.vex.guest_r27 = tlsaddr;
-#elif defined(VGP_mips32_linux)
+#elif defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
ctst->arch.vex.guest_ULR = tlsaddr;
ctst->arch.vex.guest_r27 = tlsaddr;
#else
|| defined(VGP_ppc32_linux) \
|| defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
|| defined(VGP_arm_linux) || defined(VGP_mips32_linux) \
- || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
+ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
+ || defined(VGP_nanomips_linux)
res = VG_(do_syscall5)( __NR_clone, flags,
(UWord)NULL, (UWord)parent_tidptr,
(UWord)NULL, (UWord)child_tidptr );
|| defined(VGP_ppc32_linux) \
|| defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
|| defined(VGP_arm_linux) || defined(VGP_mips32_linux) \
- || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
+ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
+ || defined(VGP_nanomips_linux)
#define ARG_CHILD_TIDPTR ARG5
#define PRA_CHILD_TIDPTR PRA5
#define ARG_TLS ARG4
// (XXX: so how is it that PRE(sys_sigpending) above doesn't need
// conditional compilation like this?)
#if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
- || defined(VGP_arm_linux) || defined(VGP_mips32_linux)
+ || defined(VGP_arm_linux) || defined(VGP_mips32_linux) \
+ || defined(VGP_nanomips_linux)
PRE(sys_sigprocmask)
{
vki_old_sigset_t* set;
}
}
+#if !defined(VGP_nanomips_linux)
PRE(sys_newfstatat)
{
FUSE_COMPATIBLE_MAY_BLOCK();
{
POST_MEM_WRITE( ARG3, sizeof(struct vki_stat) );
}
+#endif
PRE(sys_unlinkat)
{
#endif
#if defined(VGP_amd64_linux) || defined(VGP_s390x_linux) \
- || defined(VGP_arm64_linux)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
PRE(sys_lookup_dcookie)
{
*flags |= SfMayBlock;
canonical->arg7 = gst->guest_r10; // a6
canonical->arg8 = gst->guest_r11; // a7
+#elif defined(VGP_nanomips_linux)
+ VexGuestMIPS32State* gst = (VexGuestMIPS32State*)gst_vanilla;
+ canonical->sysno = gst->guest_r2; // t4
+ canonical->arg1 = gst->guest_r4; // a0
+ canonical->arg2 = gst->guest_r5; // a1
+ canonical->arg3 = gst->guest_r6; // a2
+ canonical->arg4 = gst->guest_r7; // a3
+ canonical->arg5 = gst->guest_r8; // a4
+ canonical->arg6 = gst->guest_r9; // a5
+ canonical->arg7 = gst->guest_r10; // a6
+ canonical->arg8 = gst->guest_r11; // a7
#elif defined(VGP_x86_darwin)
VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
UWord *stack = (UWord *)gst->guest_ESP;
*((UInt*) (gst->guest_r29 + 28)) = canonical->arg7; // 28(sp)
}
+#elif defined(VGP_nanomips_linux)
+ VexGuestMIPS32State* gst = (VexGuestMIPS32State*)gst_vanilla;
+ gst->guest_r2 = canonical->sysno;
+ gst->guest_r4 = canonical->arg1;
+ gst->guest_r5 = canonical->arg2;
+ gst->guest_r6 = canonical->arg3;
+ gst->guest_r7 = canonical->arg4;
+ gst->guest_r8 = canonical->arg5;
+ gst->guest_r9 = canonical->arg6;
+ gst->guest_r10 = canonical->arg7;
+ gst->guest_r11 = canonical->arg8;
#elif defined(VGP_mips64_linux)
VexGuestMIPS64State* gst = (VexGuestMIPS64State*)gst_vanilla;
gst->guest_r2 = canonical->sysno;
canonical->sres = VG_(mk_SysRes_mips64_linux)(v0, v1, a3);
canonical->what = SsComplete;
+# elif defined(VGP_nanomips_linux)
+ VexGuestMIPS32State* gst = (VexGuestMIPS32State*)gst_vanilla;
+ RegWord a0 = gst->guest_r4; // a0
+ RegWord a1 = gst->guest_r5; // a1
+ canonical->sres = VG_(mk_SysRes_nanomips_linux)(a0, a1);
+ canonical->what = SsComplete;
+
# elif defined(VGP_x86_darwin)
/* duplicates logic in m_signals.VG_UCONTEXT_SYSCALL_SYSRES */
VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
OFFSET_mips64_r7, sizeof(UWord) );
+# elif defined(VGP_nanomips_linux)
+ VexGuestMIPS32State* gst = (VexGuestMIPS32State*)gst_vanilla;
+ vg_assert(canonical->what == SsComplete);
+ gst->guest_r4 = canonical->sres._val;
+ gst->guest_r5 = canonical->sres._valEx;
+ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
+ OFFSET_mips32_r4, sizeof(UWord) );
+ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
+ OFFSET_mips32_r5, sizeof(UWord) );
+
# elif defined(VGP_x86_solaris)
VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
SysRes sres = canonical->sres;
layout->s_arg7 = sizeof(UWord) * 6;
layout->uu_arg8 = -1; /* impossible value */
+#elif defined(VGP_nanomips_linux)
+ layout->o_sysno = OFFSET_mips32_r2;
+ layout->o_arg1 = OFFSET_mips32_r4;
+ layout->o_arg2 = OFFSET_mips32_r5;
+ layout->o_arg3 = OFFSET_mips32_r6;
+ layout->o_arg4 = OFFSET_mips32_r7;
+ layout->o_arg5 = OFFSET_mips32_r8;
+ layout->o_arg6 = OFFSET_mips32_r9;
+ layout->uu_arg7 = -1; /* impossible value */
+ layout->uu_arg8 = -1; /* impossible value */
+
#elif defined(VGP_mips64_linux)
layout->o_sysno = OFFSET_mips64_r2;
layout->o_arg1 = OFFSET_mips64_r4;
sysno = sci->args.sysno;
getSyscallStatusFromGuestState( &test_status, &tst->arch.vex );
- if (!(sci->flags & SfNoWriteResult))
+ if (!(sci->flags & SfNoWriteResult)) {
vg_assert(eq_SyscallStatus( sysno, &sci->status, &test_status ));
+ }
/* Failure of the above assertion on Darwin can indicate a problem
in the syscall wrappers that pre-fail or pre-succeed the
syscall, by calling SET_STATUS_Success or SET_STATUS_Failure,
vg_assert(p[0] == 0x0A);
}
-#elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+#elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined(VGP_nanomips_linux)
arch->vex.guest_PC -= 4; // sizeof(mips instr)
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- Platform-specific syscalls stuff. syswrap-nanomips-linux.c ----*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2017-2018 RT-RK
+ mips-valgrind@rt-rk.com
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_nanomips_linux)
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_options.h"
+#include "pub_core_scheduler.h"
+#include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
+#include "pub_core_signals.h"
+#include "pub_core_syscall.h"
+#include "pub_core_syswrap.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_transtab.h" // VG_(discard_translations)
+#include "priv_types_n_macros.h"
+#include "priv_syswrap-generic.h" /* for decls of generic wrappers */
+#include "priv_syswrap-linux.h" /* for decls of linux-ish wrappers */
+#include "priv_syswrap-main.h"
+
+#include "pub_core_debuginfo.h" // VG_(di_notify_*)
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h" // VG_(brk_base), VG_(brk_limit)
+#include "pub_core_errormgr.h"
+#include "pub_core_gdbserver.h" // VG_(gdbserver)
+#include "pub_core_libcfile.h"
+#include "pub_core_machine.h" // VG_(get_SP)
+#include "pub_core_mallocfree.h"
+#include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
+#include "pub_core_ume.h"
+#include "priv_syswrap-generic.h"
+#include "config.h"
+
+#include <errno.h>
+
+/* ---------------------------------------------------------------------
+ clone() handling
+ ------------------------------------------------------------------ */
+/* Call f(arg1), but first switch stacks, using 'stack' as the new
+ stack, and use 'retaddr' as f's return-to address. Also, clear all
+ the integer registers before entering f.*/
+
+__attribute__ ((noreturn))
+void ML_ (call_on_new_stack_0_1) (Addr stack, Addr retaddr,
+ void (*f) (Word), Word arg1);
+// a0 = stack
+// a1 = retaddr
+// a2 = f
+// a3 = arg1
+asm (
+ ".text\n"
+ ".globl vgModuleLocal_call_on_new_stack_0_1 \n"
+ "vgModuleLocal_call_on_new_stack_0_1: \n"
+ " move $sp, $a0 \n\t" // stack to $sp
+ " move $ra, $a1 \n\t" // retaddr to $ra
+ " move $t9, $a2 \n\t" // f to t9
+ " move $a0, $a3 \n\t" // arg1 to $a0
+ " li $t4, 0\n\t" // zero all GP regs
+ " li $t5, 0\n\t"
+ " li $a1, 0\n\t"
+ " li $a2, 0\n\t"
+ " li $a3, 0\n\t"
+ " li $t0, 0\n\t"
+ " li $t1, 0\n\t"
+ " li $t2, 0\n\t"
+ " li $t3, 0\n\t"
+ " li $s0, 0\n\t"
+ " li $s1, 0\n\t"
+ " li $s2, 0\n\t"
+ " li $s3, 0\n\t"
+ " li $s4, 0\n\t"
+ " li $s5, 0\n\t"
+ " li $s6, 0\n\t"
+ " li $s7, 0\n\t"
+ " li $t8, 0\n\t"
+ " jrc $t9 \n\t" // jump to dst
+ " break 0x7 \n" // should never get here
+ ".previous\n"
+);
+
+/*
+ Perform a clone system call. clone is strange because it has
+ fork()-like return-twice semantics, so it needs special
+ handling here.
+ Upon entry, we have:
+ int (fn)(void*) in $a0 0
+ void* child_stack in $a1 4
+ int flags in $a2 8
+ void* arg in $a3 12
+ pid_t* child_tid in $a4 16
+ pid_t* parent_tid in $a5 20
+ void* tls_ptr in $a6 24
+
+ System call requires:
+ int $__NR_clone in $t4
+ int flags in $a0 0
+ void* child_stack in $a1 4
+ pid_t* parent_tid in $a2 8
+ void* tls_ptr in $a3 12
+ pid_t* child_tid in $a4 16
+
+ int clone(int (*fn)(void *arg), void *child_stack, int flags, void *arg,
+ void *parent_tidptr, void *tls, void *child_tidptr)
+
+ Returns an Int encoded in the linux-mips way, not a SysRes.
+ */
+#define __NR_CLONE VG_STRINGIFY(__NR_clone)
+#define __NR_EXIT VG_STRINGIFY(__NR_exit)
+
+// See priv_syswrap-linux.h for arg profile.
+asm (
+" .text\n"
+" .set noreorder\n"
+" .set nomacro\n"
+" .globl do_syscall_clone_nanomips_linux\n"
+" do_syscall_clone_nanomips_linux:\n"
+" addiu $sp, $sp, -32\n"
+" sw $ra, 0($sp) \n\t"
+" sw $fp, 4($sp) \n\t"
+" sw $gp, 8($sp) \n\t"
+" sw $t4, 12($sp) \n\t"
+
+" addiu $a1, $a1, -32\n"
+" sw $a0, 0($a1)\n" /* fn */
+" sw $a3, 4($a1)\n" /* arg */
+" sw $a2, 8($a1)\n" /* flags */
+
+
+/* 1. arg for syscalls */
+" move $a0, $a2\n" /* flags */
+" move $a2, $a5\n" /* parent */
+" move $a3, $a6\n" /* tls */
+
+/* 2. do a syscall to clone */
+" li $t4, " __NR_CLONE "\n\t" /* __NR_clone */
+" syscall 1\n"
+
+/* 3. See if we are a child, call fn and after that exit */
+" bnezc $a0, p_or_error\n"
+
+" lw $t9,0($sp)\n"
+" lw $a0,4($sp)\n"
+" jalrc $t9\n"
+
+" li $t4, " __NR_EXIT "\n\t" /* NR_exit */
+" syscall 1\n\t"
+/* 4. If we are parent or error, just return to caller */
+" p_or_error:\n"
+" lw $ra, 0($sp)\n"
+" lw $fp, 4($sp)\n"
+" lw $gp, 8($sp)\n"
+" lw $t4, 12($sp)\n"
+" addiu $sp,$sp, 32\n"
+
+" jrc $ra\n"
+" .previous\n"
+);
+
+#undef __NR_CLONE
+#undef __NR_EXIT
+
+// forward declarations
+static SysRes sys_set_tls (ThreadId tid, Addr tlsptr);
+static SysRes nanomips_PRE_sys_mmap (ThreadId tid,
+ UWord arg1, UWord arg2, UWord arg3,
+ UWord arg4, UWord arg5, Off64T arg6);
+/* ---------------------------------------------------------------------
+ More thread stuff
+ ------------------------------------------------------------------ */
+
+// MIPS doesn't have any architecture specific thread stuff that
+// needs to be cleaned up
+void
+VG_ (cleanup_thread) (ThreadArchState * arch) { }
+
+SysRes sys_set_tls ( ThreadId tid, Addr tlsptr )
+{
+ VG_(threads)[tid].arch.vex.guest_ULR = tlsptr;
+ return VG_(mk_SysRes_Success)( 0 );
+}
+
+/* ---------------------------------------------------------------------
+ mips handler for mmap and mmap2
+ ------------------------------------------------------------------ */
+static void notify_core_of_mmap(Addr a, SizeT len, UInt prot,
+ UInt flags, Int fd, Off64T offset)
+{
+ Bool d;
+
+ /* 'a' is the return value from a real kernel mmap, hence: */
+ vg_assert(VG_IS_PAGE_ALIGNED(a));
+ /* whereas len is whatever the syscall supplied. So: */
+ len = VG_PGROUNDUP(len);
+
+ d = VG_(am_notify_client_mmap)( a, len, prot, flags, fd, offset );
+
+ if (d)
+ VG_(discard_translations)( a, (ULong)len,
+ "notify_core_of_mmap" );
+}
+
+static void notify_tool_of_mmap(Addr a, SizeT len, UInt prot, ULong di_handle)
+{
+ Bool rr, ww, xx;
+
+ /* 'a' is the return value from a real kernel mmap, hence: */
+ vg_assert(VG_IS_PAGE_ALIGNED(a));
+ /* whereas len is whatever the syscall supplied. So: */
+ len = VG_PGROUNDUP(len);
+
+ rr = toBool(prot & VKI_PROT_READ);
+ ww = toBool(prot & VKI_PROT_WRITE);
+ xx = toBool(prot & VKI_PROT_EXEC);
+
+ VG_TRACK( new_mem_mmap, a, len, rr, ww, xx, di_handle );
+}
+
+/* Based on ML_(generic_PRE_sys_mmap) from syswrap-generic.c.
+ If we are trying to do mmap with VKI_MAP_SHARED flag we need to align the
+ start address on VKI_SHMLBA like we did in
+ VG_(am_mmap_file_float_valgrind_flags)
+ */
+static SysRes nanomips_PRE_sys_mmap(ThreadId tid,
+ UWord arg1, UWord arg2, UWord arg3,
+ UWord arg4, UWord arg5, Off64T arg6)
+{
+ Addr advised;
+ SysRes sres;
+ MapRequest mreq;
+ Bool mreq_ok;
+
+ if (arg2 == 0) {
+ /* SuSV3 says: If len is zero, mmap() shall fail and no mapping
+ shall be established. */
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ if (!VG_IS_PAGE_ALIGNED(arg1)) {
+ /* zap any misaligned addresses. */
+ /* SuSV3 says misaligned addresses only cause the MAP_FIXED case
+ to fail. Here, we catch them all. */
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ if (!VG_IS_PAGE_ALIGNED(arg6)) {
+ /* zap any misaligned offsets. */
+ /* SuSV3 says: The off argument is constrained to be aligned and
+ sized according to the value returned by sysconf() when
+ passed _SC_PAGESIZE or _SC_PAGE_SIZE. */
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Figure out what kind of allocation constraints there are
+ (fixed/hint/any), and ask aspacem what we should do. */
+ mreq.start = arg1;
+ mreq.len = arg2;
+
+ if (arg4 & VKI_MAP_FIXED) {
+ mreq.rkind = MFixed;
+ } else if (arg1 != 0) {
+ mreq.rkind = MHint;
+ } else {
+ mreq.rkind = MAny;
+ }
+
+ if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & arg4)
+ && !(VKI_MAP_FIXED & arg4))
+ mreq.len = arg2 + VKI_SHMLBA - VKI_PAGE_SIZE;
+
+ /* Enquire ... */
+ advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok );
+
+ if ((VKI_SHMLBA > VKI_PAGE_SIZE) && (VKI_MAP_SHARED & arg4)
+ && !(VKI_MAP_FIXED & arg4))
+ advised = VG_ROUNDUP(advised, VKI_SHMLBA);
+
+ if (!mreq_ok) {
+ /* Our request was bounced, so we'd better fail. */
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* Otherwise we're OK (so far). Install aspacem's choice of
+ address, and let the mmap go through. */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3,
+ arg4 | VKI_MAP_FIXED,
+ arg5, arg6);
+
+ /* A refinement: it may be that the kernel refused aspacem's choice
+ of address. If we were originally asked for a hinted mapping,
+ there is still a last chance: try again at any address.
+ Hence: */
+ if (mreq.rkind == MHint && sr_isError(sres)) {
+ mreq.start = 0;
+ mreq.len = arg2;
+ mreq.rkind = MAny;
+ advised = VG_(am_get_advisory)( &mreq, True/*client*/, &mreq_ok );
+
+ if (!mreq_ok) {
+ /* Our request was bounced, so we'd better fail. */
+ return VG_(mk_SysRes_Error)( VKI_EINVAL );
+ }
+
+ /* and try again with the kernel */
+ sres = VG_(am_do_mmap_NO_NOTIFY)(advised, arg2, arg3,
+ arg4 | VKI_MAP_FIXED,
+ arg5, arg6);
+ }
+
+ if (!sr_isError(sres)) {
+ ULong di_handle;
+ /* Notify aspacem. */
+ notify_core_of_mmap(
+ (Addr)sr_Res(sres), /* addr kernel actually assigned */
+ arg2, /* length */
+ arg3, /* prot */
+ arg4, /* the original flags value */
+ arg5, /* fd */
+ arg6 /* offset */
+ );
+ /* Load symbols? */
+ di_handle = VG_(di_notify_mmap)( (Addr)sr_Res(sres),
+ False/*allow_SkFileV*/, (Int)arg5 );
+ /* Notify the tool. */
+ notify_tool_of_mmap(
+ (Addr)sr_Res(sres), /* addr kernel actually assigned */
+ arg2, /* length */
+ arg3, /* prot */
+ di_handle /* so the tool can refer to the read debuginfo later,
+ if it wants. */
+ );
+ }
+
+ /* Stay sane */
+ if (!sr_isError(sres) && (arg4 & VKI_MAP_FIXED))
+ vg_assert(sr_Res(sres) == arg1);
+
+ return sres;
+}
+/* ---------------------------------------------------------------------
+ PRE/POST wrappers for mips/Linux-specific syscalls
+ ------------------------------------------------------------------ */
+#define PRE(name) DEFN_PRE_TEMPLATE(mips_linux, name)
+#define POST(name) DEFN_POST_TEMPLATE(mips_linux, name)
+
+/* Add prototypes for the wrappers declared here, so that gcc doesn't
+ harass us for not having prototypes. Really this is a kludge --
+ the right thing to do is to make these wrappers 'static' since they
+ aren't visible outside this file, but that requires even more macro
+ magic. */
+DECL_TEMPLATE (mips_linux, sys_mmap2);
+DECL_TEMPLATE (mips_linux, sys_rt_sigreturn);
+DECL_TEMPLATE (mips_linux, sys_set_thread_area);
+DECL_TEMPLATE (mips_linux, sys_ptrace);
+DECL_TEMPLATE (mips_linux, sys_unshare);
+DECL_TEMPLATE (mips_linux, sys_reboot);
+DECL_TEMPLATE (mips_linux, sys_setdomainname);
+DECL_TEMPLATE (mips_linux, sys_sethostname);
+DECL_TEMPLATE (mips_linux, sys_swapon);
+DECL_TEMPLATE (mips_linux, sys_swapoff);
+
+PRE(sys_mmap2)
+{
+ /* Exactly like sys_mmap() except the file offset is specified in 4096 byte
+ units rather than bytes, so that it can be used for files bigger than
+ 2^32 bytes. */
+ SysRes r;
+ PRINT("sys_mmap2 ( %#lx, %lu, %ld, %ld, %ld, %ld )",
+ ARG1, ARG2, SARG3, SARG4, SARG5, SARG6);
+ PRE_REG_READ6(long, "mmap2", unsigned long, start, unsigned long, length,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, offset);
+ r = nanomips_PRE_sys_mmap(tid, ARG1, ARG2, ARG3, ARG4, ARG5,
+ 4096 * (Off64T) ARG6);
+ SET_STATUS_from_SysRes(r);
+}
+
+PRE(sys_ptrace)
+{
+ PRINT("sys_ptrace ( %ld, %ld, %#lx, %#lx )", SARG1, SARG2, ARG3, ARG4);
+ PRE_REG_READ4(int, "ptrace",
+ long, request, long, pid, unsigned long, addr,
+ unsigned long, data);
+
+ switch (ARG1) {
+ case VKI_PTRACE_PEEKTEXT:
+ case VKI_PTRACE_PEEKDATA:
+ case VKI_PTRACE_PEEKUSR:
+ PRE_MEM_WRITE("ptrace(peek)", ARG4, sizeof(long));
+ break;
+
+ case VKI_PTRACE_GETEVENTMSG:
+ PRE_MEM_WRITE("ptrace(geteventmsg)", ARG4, sizeof(unsigned long));
+ break;
+
+ case VKI_PTRACE_GETSIGINFO:
+ PRE_MEM_WRITE("ptrace(getsiginfo)", ARG4, sizeof(vki_siginfo_t));
+ break;
+
+ case VKI_PTRACE_SETSIGINFO:
+ PRE_MEM_READ("ptrace(setsiginfo)", ARG4, sizeof(vki_siginfo_t));
+ break;
+
+ case VKI_PTRACE_GETREGSET:
+ ML_(linux_PRE_getregset)(tid, ARG3, ARG4);
+ break;
+
+ default:
+ break;
+ }
+}
+
+POST(sys_ptrace)
+{
+ switch (ARG1) {
+ case VKI_PTRACE_TRACEME:
+ ML_(linux_POST_traceme)(tid);
+ break;
+
+ case VKI_PTRACE_PEEKTEXT:
+ case VKI_PTRACE_PEEKDATA:
+ case VKI_PTRACE_PEEKUSR:
+ POST_MEM_WRITE (ARG4, sizeof(long));
+ break;
+
+ case VKI_PTRACE_GETEVENTMSG:
+ POST_MEM_WRITE (ARG4, sizeof(unsigned long));
+ break;
+
+ case VKI_PTRACE_GETSIGINFO:
+ POST_MEM_WRITE (ARG4, sizeof(vki_siginfo_t));
+ break;
+
+ case VKI_PTRACE_GETREGSET:
+ ML_(linux_POST_getregset)(tid, ARG3, ARG4);
+ break;
+
+ default:
+ break;
+ }
+}
+
+PRE(sys_rt_sigreturn)
+{
+ PRINT ("rt_sigreturn ( )");
+ vg_assert (VG_ (is_valid_tid) (tid));
+ vg_assert (tid >= 1 && tid < VG_N_THREADS);
+ vg_assert (VG_ (is_running_thread) (tid));
+ /* Restore register state from frame and remove it */
+ VG_ (sigframe_destroy) (tid, True);
+ /* Tell the driver not to update the guest state with the "result",
+ and set a bogus result to keep it happy. */
+ *flags |= SfNoWriteResult;
+ SET_STATUS_Success (0);
+ /* Check to see if any signals arose as a result of this. */
+ *flags |= SfPollAfter;
+}
+
+PRE(sys_set_thread_area)
+{
+ PRINT ("set_thread_area (%lx)", ARG1);
+ PRE_REG_READ1(long, "set_thread_area", unsigned long, addr);
+ SET_STATUS_from_SysRes( sys_set_tls( tid, ARG1 ) );
+}
+
+PRE(sys_unshare)
+{
+ PRINT("sys_unshare ( %lu )", ARG1);
+ PRE_REG_READ1(long, "sys_unshare", unsigned long, flags);
+}
+
+PRE(sys_reboot)
+{
+ PRINT("sys_reboot ( %ld, %lu, %lu, %#lx )", SARG1, ARG2, ARG3, ARG4);
+ // An approximation. ARG4 is only read conditionally by the kernel
+ PRE_REG_READ4(int, "reboot",
+ int, magic1, int, magic2, unsigned int, cmd,
+ void *, arg);
+
+ *flags |= SfMayBlock;
+}
+
+PRE(sys_setdomainname)
+{
+ PRINT ("sys_setdomainname ( %#lx, %ld )", ARG1, SARG2);
+ PRE_REG_READ2 (long, "setdomainname", const void *, name, int, len);
+}
+
+PRE(sys_sethostname)
+{
+ PRINT ("sys_sethostname ( %#lx, %ld )", ARG1, SARG2);
+ PRE_REG_READ2 (long, "sethostname", const void *, name, int, len);
+}
+
+PRE(sys_swapon)
+{
+ PRINT("sys_swapon ( %#lx, %#lx )", ARG1, ARG2);
+ PRE_REG_READ2(long, "swapon", const void *, path, int, flags);
+}
+
+PRE(sys_swapoff)
+{
+ PRINT("sys_swapoff ( %#lx )", ARG1);
+ PRE_REG_READ1(long, "swapoff", const void *, path);
+}
+
+#undef PRE
+#undef POST
+
+/* ---------------------------------------------------------------------
+ The mips/Linux syscall table
+ ------------------------------------------------------------------ */
+#define PLAX_(sysno, name) WRAPPER_ENTRY_X_(mips_linux, sysno, name)
+#define PLAXY(sysno, name) WRAPPER_ENTRY_XY(mips_linux, sysno, name)
+
+// This table maps from __NR_xxx syscall numbers (from
+// linux/include/asm-mips/unistd.h) to the appropriate PRE/POST sys_foo()
+// wrappers on mips (as per sys_call_table in linux/arch/mips/kernel/entry.S).
+//
+
+// For those syscalls not handled by Valgrind, the annotation indicate its
+// arch/OS combination, eg. */* (generic), */Linux (Linux only), ?/?
+// (unknown).
+
+static SyscallTableEntry syscall_main_table[] = {
+ LINXY (__NR_io_setup, sys_io_setup),
+ LINX_ (__NR_io_destroy, sys_io_destroy),
+ LINX_ (__NR_io_submit, sys_io_submit),
+ LINXY (__NR_io_cancel, sys_io_cancel),
+ LINXY (__NR_io_getevents, sys_io_getevents),
+ LINX_ (__NR_setxattr, sys_setxattr),
+ LINX_ (__NR_lsetxattr, sys_lsetxattr),
+ LINX_ (__NR_fsetxattr, sys_fsetxattr),
+ LINXY (__NR_getxattr, sys_getxattr),
+ LINXY (__NR_lgetxattr, sys_lgetxattr),
+ LINXY (__NR_fgetxattr, sys_fgetxattr),
+ LINXY (__NR_listxattr, sys_listxattr),
+ LINXY (__NR_llistxattr, sys_llistxattr),
+ LINXY (__NR_flistxattr, sys_flistxattr),
+ LINX_ (__NR_removexattr, sys_removexattr),
+ LINX_ (__NR_lremovexattr, sys_lremovexattr),
+ LINX_ (__NR_fremovexattr, sys_fremovexattr),
+ GENXY (__NR_getcwd, sys_getcwd),
+ LINXY (__NR_lookup_dcookie, sys_lookup_dcookie),
+ LINXY (__NR_eventfd2, sys_eventfd2),
+ LINXY (__NR_epoll_create1, sys_epoll_create1),
+ LINX_ (__NR_epoll_ctl, sys_epoll_ctl),
+ LINXY (__NR_epoll_pwait, sys_epoll_pwait),
+ GENXY (__NR_dup, sys_dup),
+ LINXY (__NR_dup3, sys_dup3),
+ LINXY (__NR_fcntl64, sys_fcntl64),
+ LINXY (__NR_inotify_init1, sys_inotify_init1),
+ LINX_ (__NR_inotify_add_watch, sys_inotify_add_watch),
+ LINX_ (__NR_inotify_rm_watch, sys_inotify_rm_watch),
+ LINXY (__NR_ioctl, sys_ioctl),
+ LINX_ (__NR_ioprio_set, sys_ioprio_set),
+ LINX_ (__NR_ioprio_get, sys_ioprio_get),
+ GENX_ (__NR_flock, sys_flock),
+ LINX_ (__NR_mknodat, sys_mknodat),
+ LINX_ (__NR_mkdirat, sys_mkdirat),
+ LINX_ (__NR_unlinkat, sys_unlinkat),
+ LINX_ (__NR_symlinkat, sys_symlinkat),
+ LINX_ (__NR_linkat, sys_linkat),
+ LINX_ (__NR_umount2, sys_umount),
+ LINX_ (__NR_mount, sys_mount),
+ LINX_ (__NR_pivot_root, sys_pivot_root),
+ GENX_ (__NR_truncate64, sys_truncate64),
+ GENX_ (__NR_ftruncate64, sys_ftruncate64),
+ LINX_ (__NR_fallocate, sys_fallocate),
+ LINX_ (__NR_faccessat, sys_faccessat),
+ GENX_ (__NR_chdir, sys_chdir),
+ GENX_ (__NR_fchdir, sys_fchdir),
+ GENX_ (__NR_chroot, sys_chroot),
+ GENX_ (__NR_fchmod, sys_fchmod),
+ LINX_ (__NR_fchmodat, sys_fchmodat),
+ LINX_ (__NR_fchownat, sys_fchownat),
+ GENX_ (__NR_fchown, sys_fchown),
+ LINXY (__NR_openat, sys_openat),
+ GENXY (__NR_close, sys_close),
+ LINX_ (__NR_vhangup, sys_vhangup),
+ LINXY (__NR_pipe2, sys_pipe2),
+ LINX_ (__NR_quotactl, sys_quotactl),
+ GENXY (__NR_getdents64, sys_getdents64),
+ LINXY (__NR__llseek, sys_llseek),
+ GENXY (__NR_read, sys_read),
+ GENX_ (__NR_write, sys_write),
+ GENXY (__NR_readv, sys_readv),
+ GENX_ (__NR_writev, sys_writev),
+ GENXY (__NR_pread64, sys_pread64),
+ GENX_ (__NR_pwrite64, sys_pwrite64),
+ LINXY (__NR_preadv, sys_preadv),
+ LINX_ (__NR_pwritev, sys_pwritev),
+ LINXY (__NR_sendfile64, sys_sendfile64),
+ LINXY (__NR_pselect6, sys_pselect6),
+ LINXY (__NR_ppoll, sys_ppoll),
+ LINXY (__NR_signalfd4, sys_signalfd4),
+ LINX_ (__NR_vmsplice, sys_vmsplice),
+ LINX_ (__NR_splice, sys_splice),
+ LINX_ (__NR_tee, sys_tee),
+ LINX_ (__NR_readlinkat, sys_readlinkat),
+ GENX_ (__NR_sync, sys_sync),
+ GENX_ (__NR_fsync, sys_fsync),
+ GENX_ (__NR_fdatasync, sys_fdatasync),
+ LINX_ (__NR_sync_file_range2, sys_sync_file_range2),
+ LINXY (__NR_timerfd_create, sys_timerfd_create),
+ LINXY (__NR_timerfd_settime, sys_timerfd_settime),
+ LINXY (__NR_timerfd_gettime, sys_timerfd_gettime),
+ LINX_ (__NR_utimensat, sys_utimensat),
+ GENX_ (__NR_acct, sys_acct),
+ LINXY (__NR_capget, sys_capget),
+ LINX_ (__NR_capset, sys_capset),
+ LINX_ (__NR_personality, sys_personality),
+ GENX_ (__NR_exit, sys_exit),
+ LINX_ (__NR_exit_group, sys_exit_group),
+ LINXY (__NR_waitid, sys_waitid),
+ LINX_ (__NR_set_tid_address, sys_set_tid_address),
+ PLAX_ (__NR_unshare, sys_unshare),
+ LINXY (__NR_futex, sys_futex),
+ LINX_ (__NR_set_robust_list, sys_set_robust_list),
+ LINXY (__NR_get_robust_list, sys_get_robust_list),
+ GENXY (__NR_nanosleep, sys_nanosleep),
+ GENXY (__NR_getitimer, sys_getitimer),
+ GENXY (__NR_setitimer, sys_setitimer),
+ GENX_ (__NR_kexec_load, sys_ni_syscall),
+ LINX_ (__NR_init_module, sys_init_module),
+ LINX_ (__NR_delete_module, sys_delete_module),
+ LINXY (__NR_timer_create, sys_timer_create),
+ LINXY (__NR_timer_gettime, sys_timer_gettime),
+ LINX_ (__NR_timer_getoverrun, sys_timer_getoverrun),
+ LINXY (__NR_timer_settime, sys_timer_settime),
+ LINX_ (__NR_timer_delete, sys_timer_delete),
+ LINX_ (__NR_clock_settime, sys_clock_settime),
+ LINXY (__NR_clock_gettime, sys_clock_gettime),
+ LINXY (__NR_clock_getres, sys_clock_getres),
+ LINXY (__NR_clock_nanosleep, sys_clock_nanosleep),
+ LINXY (__NR_syslog, sys_syslog),
+ PLAXY (__NR_ptrace, sys_ptrace),
+ LINXY (__NR_sched_setparam, sys_sched_setparam),
+ LINX_ (__NR_sched_setscheduler, sys_sched_setscheduler),
+ LINX_ (__NR_sched_getscheduler, sys_sched_getscheduler),
+ LINXY (__NR_sched_getparam, sys_sched_getparam),
+ LINX_ (__NR_sched_setaffinity, sys_sched_setaffinity),
+ LINXY (__NR_sched_getaffinity, sys_sched_getaffinity),
+ LINX_ (__NR_sched_yield, sys_sched_yield),
+ LINX_ (__NR_sched_get_priority_max, sys_sched_get_priority_max),
+ LINX_ (__NR_sched_get_priority_min, sys_sched_get_priority_min),
+ LINX_ (__NR_sched_rr_get_interval, sys_sched_rr_get_interval),
+ GENX_ (__NR_kill, sys_kill),
+ LINXY (__NR_tkill, sys_tkill),
+ LINXY (__NR_tgkill, sys_tgkill),
+ GENXY (__NR_sigaltstack, sys_sigaltstack),
+ LINX_ (__NR_rt_sigsuspend, sys_rt_sigsuspend),
+ LINXY (__NR_rt_sigaction, sys_rt_sigaction),
+ LINXY (__NR_rt_sigprocmask, sys_rt_sigprocmask),
+ LINXY (__NR_rt_sigpending, sys_rt_sigpending),
+ LINXY (__NR_rt_sigtimedwait, sys_rt_sigtimedwait),
+ LINXY (__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo),
+ PLAX_ (__NR_rt_sigreturn, sys_rt_sigreturn),
+ GENX_ (__NR_setpriority, sys_setpriority),
+ GENX_ (__NR_getpriority, sys_getpriority),
+ PLAX_ (__NR_reboot, sys_reboot),
+ GENX_ (__NR_setregid, sys_setregid),
+ GENX_ (__NR_setgid, sys_setgid),
+ GENX_ (__NR_setreuid, sys_setreuid),
+ GENX_ (__NR_setuid, sys_setuid),
+ LINX_ (__NR_setresuid, sys_setresuid),
+ LINXY (__NR_getresuid, sys_getresuid),
+ LINX_ (__NR_setresgid, sys_setresgid),
+ LINXY (__NR_getresgid, sys_getresgid),
+ LINX_ (__NR_setfsuid, sys_setfsuid),
+ LINX_ (__NR_setfsgid, sys_setfsgid),
+ GENXY (__NR_times, sys_times),
+ GENX_ (__NR_setpgid, sys_setpgid),
+ GENX_ (__NR_getpgid, sys_getpgid),
+ GENX_ (__NR_getsid, sys_getsid),
+ GENX_ (__NR_setsid, sys_setsid),
+ GENXY (__NR_getgroups, sys_getgroups),
+ GENX_ (__NR_setgroups, sys_setgroups),
+ GENXY (__NR_uname, sys_newuname),
+ PLAX_ (__NR_sethostname, sys_sethostname),
+ PLAX_ (__NR_setdomainname, sys_setdomainname),
+ GENXY (__NR_getrusage, sys_getrusage),
+ GENX_ (__NR_umask, sys_umask),
+ LINXY (__NR_prctl, sys_prctl),
+ LINXY (__NR_getcpu, sys_getcpu),
+ GENXY (__NR_gettimeofday, sys_gettimeofday),
+ GENX_ (__NR_settimeofday, sys_settimeofday),
+ LINXY (__NR_adjtimex, sys_adjtimex),
+ GENX_ (__NR_getpid, sys_getpid),
+ GENX_ (__NR_getppid, sys_getppid),
+ GENX_ (__NR_getuid, sys_getuid),
+ GENX_ (__NR_geteuid, sys_geteuid),
+ GENX_ (__NR_getgid, sys_getgid),
+ GENX_ (__NR_getegid, sys_getegid),
+ LINX_ (__NR_gettid, sys_gettid),
+ LINXY (__NR_sysinfo, sys_sysinfo),
+ LINXY (__NR_mq_open, sys_mq_open),
+ LINX_ (__NR_mq_unlink, sys_mq_unlink),
+ LINX_ (__NR_mq_timedsend, sys_mq_timedsend),
+ LINXY (__NR_mq_timedreceive, sys_mq_timedreceive),
+ LINX_ (__NR_mq_notify, sys_mq_notify),
+ LINXY (__NR_mq_getsetattr, sys_mq_getsetattr),
+ LINX_ (__NR_msgget, sys_msgget),
+ LINXY (__NR_msgctl, sys_msgctl),
+ LINXY (__NR_msgrcv, sys_msgrcv),
+ LINX_ (__NR_msgsnd, sys_msgsnd),
+ LINX_ (__NR_semget, sys_semget),
+ LINXY (__NR_semctl, sys_semctl),
+ LINX_ (__NR_semtimedop, sys_semtimedop),
+ LINX_ (__NR_semop, sys_semop),
+ LINX_ (__NR_shmget, sys_shmget),
+ LINXY (__NR_shmctl, sys_shmctl),
+ LINXY (__NR_shmat, sys_shmat),
+ LINXY (__NR_shmdt, sys_shmdt),
+ LINXY (__NR_socket, sys_socket),
+ LINXY (__NR_socketpair, sys_socketpair),
+ LINX_ (__NR_bind, sys_bind),
+ LINX_ (__NR_listen, sys_listen),
+ LINXY (__NR_accept, sys_accept),
+ LINX_ (__NR_connect, sys_connect),
+ LINXY (__NR_getsockname, sys_getsockname),
+ LINXY (__NR_getpeername, sys_getpeername),
+ LINX_ (__NR_sendto, sys_sendto),
+ LINXY (__NR_recvfrom, sys_recvfrom),
+ LINX_ (__NR_setsockopt, sys_setsockopt),
+ LINXY (__NR_getsockopt, sys_getsockopt),
+ LINX_ (__NR_shutdown, sys_shutdown),
+ LINX_ (__NR_sendmsg, sys_sendmsg),
+ LINXY (__NR_recvmsg, sys_recvmsg),
+ LINX_ (__NR_readahead, sys_readahead),
+ GENX_ (__NR_brk, sys_brk),
+ GENXY (__NR_munmap, sys_munmap),
+ GENX_ (__NR_mremap, sys_mremap),
+ LINX_ (__NR_add_key, sys_add_key),
+ LINX_ (__NR_request_key, sys_request_key),
+ LINXY (__NR_keyctl, sys_keyctl),
+ LINX_ (__NR_clone, sys_clone),
+ GENX_ (__NR_execve, sys_execve),
+ PLAX_ (__NR_mmap2, sys_mmap2),
+ LINX_ (__NR_fadvise64_64, sys_fadvise64_64),
+ PLAX_ (__NR_swapon, sys_swapon),
+ PLAX_ (__NR_swapoff, sys_swapoff),
+ GENXY (__NR_mprotect, sys_mprotect),
+ GENX_ (__NR_msync, sys_msync),
+ GENX_ (__NR_mlock, sys_mlock),
+ GENX_ (__NR_munlock, sys_munlock),
+ GENX_ (__NR_mlockall, sys_mlockall),
+ LINX_ (__NR_munlockall, sys_munlockall),
+ GENXY (__NR_mincore, sys_mincore),
+ GENX_ (__NR_madvise, sys_madvise),
+ LINX_ (__NR_mbind, sys_mbind),
+ LINXY (__NR_get_mempolicy, sys_get_mempolicy),
+ LINX_ (__NR_set_mempolicy, sys_set_mempolicy),
+ LINXY (__NR_move_pages, sys_move_pages),
+ LINXY (__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo),
+ LINXY (__NR_perf_event_open, sys_perf_event_open),
+ LINXY (__NR_accept4, sys_accept4),
+ LINXY (__NR_recvmmsg, sys_recvmmsg),
+ PLAX_ (__NR_set_thread_area, sys_set_thread_area),
+ GENXY (__NR_wait4, sys_wait4),
+ LINXY (__NR_prlimit64, sys_prlimit64),
+ LINXY (__NR_fanotify_init, sys_fanotify_init),
+ LINX_ (__NR_fanotify_mark, sys_fanotify_mark),
+ LINXY (__NR_name_to_handle_at, sys_name_to_handle_at),
+ LINXY (__NR_open_by_handle_at, sys_open_by_handle_at),
+ LINXY (__NR_clock_adjtime, sys_clock_adjtime),
+ LINX_ (__NR_syncfs, sys_syncfs),
+ LINXY (__NR_sendmmsg, sys_sendmmsg),
+ LINXY (__NR_process_vm_readv, sys_process_vm_readv),
+ LINX_ (__NR_process_vm_writev, sys_process_vm_writev),
+ LINX_ (__NR_kcmp, sys_kcmp),
+ LINX_ (__NR_renameat2, sys_renameat2),
+ LINXY (__NR_getrandom, sys_getrandom),
+ LINXY (__NR_memfd_create, sys_memfd_create),
+ LINXY (__NR_statx, sys_statx),
+ // (__NR_bpf, sys_ni_syscall),
+ // (__NR_execveat, sys_ni_syscall),
+ // (__NR_userfaultfd, sys_ni_syscall),
+ // (__NR_membarrier, sys_ni_syscall),
+ // (__NR_mlock2, sys_ni_syscall),
+ // (__NR_copy_file_range, sys_ni_syscall),
+ // (__NR_preadv2, sys_ni_syscall),
+ // (__NR_pwritev2, sys_ni_syscall),
+ // (__NR_pkey_mprotect, sys_ni_syscall),
+ // (__NR_pkey_alloc, sys_ni_syscall),
+ // (__NR_pkey_free, sys_ni_syscall),
+};
+
+SyscallTableEntry* ML_(get_linux_syscall_entry) (UInt sysno)
+{
+ const UInt syscall_main_table_size
+ = sizeof (syscall_main_table) / sizeof (syscall_main_table[0]);
+
+ /* Is it in the contiguous initial section of the table? */
+ if (sysno < syscall_main_table_size) {
+ SyscallTableEntry * sys = &syscall_main_table[sysno];
+
+ if (sys->before == NULL)
+ return NULL; /* No entry. */
+ else
+ return sys;
+ }
+
+ /* Can't find a wrapper. */
+ return NULL;
+}
+
+#endif // defined(VGP_nanomips_linux)
+
+/*--------------------------------------------------------------------*/
+/*--- end syswrap-nanomips-linux.c ---*/
+/*--------------------------------------------------------------------*/
# undef UD2_1024
# undef UD2_PAGE
+/*---------------------- nanomips-linux --------------------*/
+#else
+#if defined(VGP_nanomips_linux)
+
+.global VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+
+.set push
+.set noreorder
+
+.global VG_(nanomips_linux_SUBST_FOR_rt_sigreturn)
+VG_(nanomips_linux_SUBST_FOR_rt_sigreturn):
+ li $t4,__NR_rt_sigreturn
+ syscall 1
+ .long 0
+
+.global VG_(nanomips_linux_REDIR_FOR_index)
+.type VG_(nanomips_linux_REDIR_FOR_index), @function
+VG_(nanomips_linux_REDIR_FOR_index):
+ index_loop:
+ lbu $t0, 0($a0)
+ beqc $t0, $a1, index_end
+ addiu $a0, $a0, 1
+ bnec $t0, $zero, index_loop
+ move $a0, $zero
+ index_end:
+ jrc $ra
+.size VG_(nanomips_linux_REDIR_FOR_index), .-VG_(nanomips_linux_REDIR_FOR_index)
+
+.global VG_(nanomips_linux_REDIR_FOR_strlen)
+.type VG_(nanomips_linux_REDIR_FOR_strlen), @function
+VG_(nanomips_linux_REDIR_FOR_strlen):
+ move $t1, $a0
+ strlen_loop:
+ lbu $t0, 0($a0)
+ addiu $a0, $a0, 1
+ bnec $t0, $zero, strlen_loop
+ subu $a0, $a0, $t1
+ addiu $a0, $a0, -1
+ jrc $ra
+
+.size VG_(nanomips_linux_REDIR_FOR_strlen), .-VG_(nanomips_linux_REDIR_FOR_strlen)
+
+.set pop
+
+.global VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
+
/*---------------------- mips64-linux ----------------------*/
#else
#if defined(VGP_mips64_linux)
#endif
#endif
#endif
+#endif
/* Let the linker know we don't need an executable stack */
MARK_STACK_NO_EXEC
static Bool translations_allowable_from_seg ( NSegment const* seg, Addr addr )
{
# if defined(VGA_x86) || defined(VGA_s390x) || defined(VGA_mips32) \
- || defined(VGA_mips64)
+ || defined(VGA_mips64) || defined(VGA_nanomips)
Bool allowR = True;
# else
Bool allowR = False;
)
);
// t9 needs to be set to point to the start of the redirected function.
-# if defined(VGP_mips32_linux)
+# if defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
VgCallbackClosure* closure = (VgCallbackClosure*)closureV;
Int offB_GPR25 = offsetof(VexGuestMIPS32State, guest_r25);
addStmtToIRSB(bb, IRStmt_Put(offB_GPR25, mkU32(closure->readdr)));
)
);
// t9 needs to be set to point to the start of the redirected function.
-# if defined(VGP_mips32_linux)
+# if defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
Int offB_GPR25 = offsetof(VexGuestMIPS32State, guest_r25);
addStmtToIRSB(bb, IRStmt_Put(offB_GPR25, mkU32(closure->readdr)));
# endif
}
# endif
+#if defined(VGP_nanomips_linux)
+ vex_abiinfo.guest__use_fallback_LLSC
+ = SimHintiS(SimHint_fallback_llsc, VG_(clo_sim_hints));
+#endif
+
# if defined(VGP_arm64_linux)
vex_abiinfo.guest__use_fallback_LLSC
= /* The user asked explicitly */
vg_assert(sizeof(TTEntryH) <= 20);
# if defined(VGP_ppc32_linux) || defined(VGP_mips32_linux) \
|| (defined(VGP_mips64_linux) && defined(VGABI_N32)) \
- || defined(VGP_arm_linux)
+ || defined(VGP_nanomips_linux) || defined(VGP_arm_linux)
/* On PPC32, MIPS32, ARM32 platforms, alignof(ULong) == 8, so the
structure is larger than on other 32 bit targets. */
vg_assert(sizeof(TTEntryC) <= 96);
#if defined(VGP_ppc32_linux) || defined(VGP_ppc64be_linux) \
|| defined(VGP_ppc64le_linux) || defined(VGP_arm64_linux) \
- || defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+ || defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined(VGP_nanomips_linux)
unsigned long VKI_PAGE_SHIFT = 12;
unsigned long VKI_PAGE_SIZE = 1UL << 12;
#endif
#if defined(VGP_ppc32_linux) \
|| defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
# define VG_STACK_GUARD_SZB 65536 // 1 or 16 pages
#else
# define VG_STACK_GUARD_SZB 8192 // 2 pages
Addr f0; Addr f1; Addr f2; Addr f3;
Addr f4; Addr f5; Addr f6; Addr f7; }
D3UnwindRegs;
-#elif defined(VGA_mips32) || defined(VGA_mips64)
+#elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
typedef
struct { Addr pc; Addr sp; Addr fp; Addr ra; }
D3UnwindRegs;
# error Unknown mips64 abi
# endif
# undef VG_PLAT_USES_PPCTOC
+#elif defined(VGP_nanomips_linux)
+# if defined (VG_LITTLEENDIAN)
+# define VG_ELF_DATA2XXX ELFDATA2LSB
+# elif defined (VG_BIGENDIAN)
+# define VG_ELF_DATA2XXX ELFDATA2MSB
+# else
+# error "Unknown endianness"
+# endif
+# if !defined(EM_NANOMIPS)
+# define EM_NANOMIPS 249 /* MIPS Tech nanoMIPS */
+# endif
+# define VG_ELF_MACHINE EM_NANOMIPS
+# define VG_ELF_CLASS ELFCLASS32
+# undef VG_PLAT_USES_PPCTOC
#else
# error Unknown platform
#endif
# define VG_STACK_PTR guest_SP
# define VG_FRAME_PTR guest_FP
# define VG_FPC_REG guest_fpc
-#elif defined(VGA_mips32)
-# define VG_INSTR_PTR guest_PC
-# define VG_STACK_PTR guest_r29
-# define VG_FRAME_PTR guest_r30
-#elif defined(VGA_mips64)
+#elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
# define VG_INSTR_PTR guest_PC
# define VG_STACK_PTR guest_r29
# define VG_FRAME_PTR guest_r30
defined(VGP_arm_linux) || \
defined(VGP_mips32_linux) || \
(defined(VGP_mips64_linux) && defined(VGABI_N32)) || \
+ defined(VGP_nanomips_linux) || \
defined(VGP_x86_solaris)
# define VG_MIN_MALLOC_SZB 8
// Nb: We always use 16 bytes for Darwin, even on 32-bits, so it can be used
UWord a3 );
extern SysRes VG_(mk_SysRes_mips64_linux)( ULong v0, ULong v1,
ULong a3 );
+extern SysRes VG_(mk_SysRes_nanomips_linux)( UWord a0, UWord a1 );
extern SysRes VG_(mk_SysRes_x86_solaris) ( Bool isErr, UInt val, UInt val2 );
extern SysRes VG_(mk_SysRes_amd64_solaris) ( Bool isErr, ULong val, ULong val2 );
extern SysRes VG_(mk_SysRes_Error) ( UWord val );
extern SysRes VG_(mk_SysRes_Success) ( UWord val );
-#if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
+#if defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
+ || defined(VGP_nanomips_linux)
/* On Linux/MIPS, VG_(mk_SysRes_Success) sets the second result word
to zero. Here is a version that allows setting both values. */
extern SysRes VG_(mk_SysRes_SuccessEx) ( UWord val, UWord valEx );
extern UInt VG_(mips64_linux_REDIR_FOR_strlen)( void* );
#endif
+#if defined(VGP_nanomips_linux)
+extern Addr VG_(nanomips_linux_SUBST_FOR_rt_sigreturn);
+extern Char* VG_(nanomips_linux_REDIR_FOR_index)( const Char*, Int );
+extern UInt VG_(nanomips_linux_REDIR_FOR_strlen)( void* );
+#endif
+
#if defined(VGP_x86_solaris)
extern SizeT VG_(x86_solaris_REDIR_FOR_strcmp)(const HChar *, const HChar *);
extern SizeT VG_(x86_solaris_REDIR_FOR_strlen)(const HChar *);
return merged & VG_TT_FAST_MASK;
}
-#elif defined(VGA_s390x) || defined(VGA_arm)
+#elif defined(VGA_s390x) || defined(VGA_arm) || defined(VGA_nanomips)
static inline UWord VG_TT_FAST_HASH ( Addr guest ) {
// Instructions are 2-byte aligned.
UWord merged = ((UWord)guest) >> 1;
# define FCS_h3 56
#elif defined(VGA_x86) || defined(VGA_arm) || defined(VGA_ppc32) \
- || defined(VGA_mips32)
+ || defined(VGA_mips32) || defined(VGP_nanomips_linux)
// And all other 32-bit hosts
# define VG_FAST_CACHE_SET_BITS 5
# define FCS_g0 0
sp = user_mod.regs.gpr[1];
#elif defined(VGA_s390x)
sp = user_mod.regs.gprs[15];
-#elif defined(VGA_mips32)
+#elif defined(VGA_mips32) || defined(VGA_nanomips)
long long *p = (long long *)user_mod.regs;
sp = p[29];
#elif defined(VGA_mips64)
#elif defined(VGA_s390x)
XERROR(0, "(fn32) s390x has no 32bits implementation");
-#elif defined(VGA_mips32)
+#elif defined(VGA_mips32) || defined(VGA_nanomips)
/* put check arg in register 4 */
p[4] = check;
/* put NULL return address in ra */
user_mod.regs.gprs[15] = sp;
/* set program counter */
user_mod.regs.psw.addr = shared64->invoke_gdbserver;
-#elif defined(VGA_mips32)
+#elif defined(VGA_mips32) || defined(VGA_nanomips)
assert(0); // cannot vgdb a 64 bits executable with a 32 bits exe
#elif defined(VGA_mips64)
/* put check arg in register 4 */