the changes to do with reading and using ELF and DWARF3 info.
This breaks all targets except amd64-linux and x86-linux.
git-svn-id: svn://svn.valgrind.org/valgrind/trunk@10982
# automake, but this does not really matter and seems hard to avoid.
AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@ = \
- -I$(top_srcdir) \
- -I$(top_srcdir)/include \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/include \
-I$(top_srcdir)/VEX/pub \
-DVGA_@VGCONF_ARCH_PRI@=1 \
-DVGO_@VGCONF_OS@=1 \
-DVGP_@VGCONF_ARCH_PRI@_@VGCONF_OS@=1
if VGCONF_HAVE_PLATFORM_SEC
AM_CPPFLAGS_@VGCONF_PLATFORM_SEC_CAPS@ = \
- -I$(top_srcdir) \
- -I$(top_srcdir)/include \
+ -I$(top_srcdir) \
+ -I$(top_srcdir)/include \
-I$(top_srcdir)/VEX/pub \
-DVGA_@VGCONF_ARCH_SEC@=1 \
-DVGO_@VGCONF_OS@=1 \
AM_CFLAGS_PPC64_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE)
AM_CCASFLAGS_PPC64_LINUX = $(AM_CPPFLAGS_PPC64_LINUX) @FLAG_M64@ -g
+AM_FLAG_M3264_ARM_LINUX = @FLAG_M32@
+AM_CFLAGS_ARM_LINUX = @FLAG_M32@ @PREFERRED_STACK_BOUNDARY@ \
+ $(AM_CFLAGS_BASE)
+AM_CCASFLAGS_ARM_LINUX = $(AM_CPPFLAGS_ARM_LINUX) @FLAG_M32@ -g
+
AM_FLAG_M3264_PPC32_AIX5 = @FLAG_MAIX32@
AM_CFLAGS_PPC32_AIX5 = @FLAG_MAIX32@ -mcpu=powerpc $(AM_CFLAGS_BASE)
AM_CCASFLAGS_PPC32_AIX5 = $(AM_CPPFLAGS_PPC32_AIX5) \
PRELOAD_LDFLAGS_AMD64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@
PRELOAD_LDFLAGS_PPC32_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@
PRELOAD_LDFLAGS_PPC64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@
+PRELOAD_LDFLAGS_ARM_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@
PRELOAD_LDFLAGS_PPC32_AIX5 = $(PRELOAD_LDFLAGS_COMMON_AIX5) @FLAG_MAIX32@
PRELOAD_LDFLAGS_PPC64_AIX5 = $(PRELOAD_LDFLAGS_COMMON_AIX5) @FLAG_MAIX64@
PRELOAD_LDFLAGS_X86_DARWIN = $(PRELOAD_LDFLAGS_COMMON_DARWIN) -arch i386
$(TOOL_LDFLAGS_COMMON_LINUX) @FLAG_M64@ \
-Wl,-T,$(top_builddir)/valt_load_address_ppc64_linux.lds
+TOOL_LDFLAGS_ARM_LINUX = \
+ $(TOOL_LDFLAGS_COMMON_LINUX) @FLAG_M32@ \
+ -Wl,-T,$(top_builddir)/valt_load_address_arm_linux.lds
+
TOOL_LDFLAGS_PPC32_AIX5 = \
$(TOOL_LDFLAGS_COMMON_AIX5) @FLAG_MAIX32@
BUILT_SOURCES += $(top_builddir)/valt_load_address_ppc64_linux.lds
CLEANFILES += $(top_builddir)/valt_load_address_ppc64_linux.lds
endif
+if VGCONF_PLATFORMS_INCLUDE_ARM_LINUX
+BUILT_SOURCES += $(top_builddir)/valt_load_address_arm_linux.lds
+CLEANFILES += $(top_builddir)/valt_load_address_arm_linux.lds
+endif
if VGCONF_PLATFORMS_INCLUDE_PPC32_AIX5
# No need to generate $(top_builddir)/valt_load_address*.lds; the final
# executables can be linked to be at any address. They will be relocated by
-e '/\. = \(0x[0-9A-Fa-f]\+\|SEGMENT_START("[^"]\+", 0x[0-9A-Fa-f]\+)\) + SIZEOF_HEADERS/s/0x[0-9A-Fa-f]\+/valt_load_address/g' > $@ \
|| rm -f $@
+$(top_builddir)/valt_load_address_arm_linux.lds: Makefile
+ $(CC) @FLAG_M32@ -Wl,--verbose -nostdlib 2>&1 | sed \
+ -e '1,/^=====\+$$/d' \
+ -e '/^=====\+$$/,/.\*/d' \
+ -e '/\. = \(0x[0-9A-Fa-f]\+\|SEGMENT_START("[^"]\+", 0x[0-9A-Fa-f]\+)\) + SIZEOF_HEADERS/s/0x[0-9A-Fa-f]\+/valt_load_address/g' > $@ \
+ || rm -f $@
+
#----------------------------------------------------------------------------
# vgpreload_<tool>-<platform>.a stuff
#----------------------------------------------------------------------------
LIBREPLACEMALLOC_PPC64_LINUX = \
$(top_builddir)/coregrind/libreplacemalloc_toolpreload-ppc64-linux.a
+LIBREPLACEMALLOC_ARM_LINUX = \
+ $(top_builddir)/coregrind/libreplacemalloc_toolpreload-arm-linux.a
+
LIBREPLACEMALLOC_PPC32_AIX5 = \
$(top_builddir)/coregrind/libreplacemalloc_toolpreload-ppc32-aix5.a
$(LIBREPLACEMALLOC_PPC64_LINUX) \
-Wl,--no-whole-archive
+LIBREPLACEMALLOC_LDFLAGS_ARM_LINUX = \
+ -Wl,--whole-archive \
+ $(LIBREPLACEMALLOC_ARM_LINUX) \
+ -Wl,--no-whole-archive
+
LIBREPLACEMALLOC_LDFLAGS_PPC32_AIX5 = \
$(LIBREPLACEMALLOC_PPC32_AIX5)
$(CC) $(LIBVEX_CFLAGS) -O -S -o auxprogs/genoffsets.s \
auxprogs/genoffsets.c
grep xyzzy auxprogs/genoffsets.s | grep define \
- | sed "s/xyzzy\\$$//g" | sed "s/xyzzy//g" \
+ | sed "s/xyzzy\\$$//g" \
+ | sed "s/xyzzy#//g" \
+ | sed "s/xyzzy//g" \
> pub/libvex_guest_offsets.h
rm -f auxprogs/genoffsets.s
-#!/bin/sh
+#!/bin/bash
# Do an automated test which involves building and regtesting version
# 1.6 of the GNU Scientific Library (gsl). This has proven to be a
cg_main.c \
cg-x86-amd64.c \
cg-ppc32.c \
- cg-ppc64.c
+ cg-ppc64.c \
+ cg-arm.c
cachegrind_@VGCONF_ARCH_PRI@_@VGCONF_OS@_SOURCES = \
$(CACHEGRIND_SOURCES_COMMON)
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- ARM-specific definitions. cg-arm.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Cachegrind, a Valgrind tool for cache
+ profiling programs.
+
+ Copyright (C) 2005-2009 Johan Bjork
+ jbjoerk@gmail.com
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGA_arm)
+
+#include "pub_tool_basics.h"
+#include "pub_tool_libcbase.h"
+#include "pub_tool_libcassert.h"
+#include "pub_tool_libcprint.h"
+
+#include "cg_arch.h"
+
+void VG_(configure_caches)(cache_t* I1c, cache_t* D1c, cache_t* L2c,
+ Bool all_caches_clo_defined)
+{
+ // Set caches to default (for Cortex-A8 ?)
+ *I1c = (cache_t) { 16384, 4, 64 };
+ *D1c = (cache_t) { 16384, 4, 64 };
+ *L2c = (cache_t) { 262144, 8, 64 };
+
+ if (!all_caches_clo_defined) {
+ VG_(message)(Vg_DebugMsg,
+ "Warning: Cannot auto-detect cache config on ARM, using one "
+ "or more defaults ");
+ }
+}
+
+#endif // #if defined(VGA_arm)
+
+/*--------------------------------------------------------------------*/
+/*--- end cg-arm.c ---*/
+/*--------------------------------------------------------------------*/
/* How many bits at the bottom of an instruction address are
guaranteed to be zero? */
-#if defined(VGA_ppc32) || defined(VGA_ppc64)
+#if defined(VGA_ppc32) || defined(VGA_ppc64) || defined(VGA_arm)
# define N_IADDR_LO_ZERO_BITS 2
#elif defined(VGA_x86) || defined(VGA_amd64)
# define N_IADDR_LO_ZERO_BITS 0
esac
;;
+ armv7*)
+ AC_MSG_RESULT([ok (${host_cpu})])
+ ARCH_MAX="arm"
+ ;;
+
*)
AC_MSG_RESULT([no (${host_cpu})])
AC_MSG_ERROR([Unsupported host architecture. Sorry])
valt_load_address_inner="0x0"
AC_MSG_RESULT([ok (${ARCH_MAX}-${VGCONF_OS})])
;;
+ arm-linux)
+ VGCONF_ARCH_PRI="arm"
+ VGCONF_PLATFORM_PRI_CAPS="ARM_LINUX"
+ VGCONF_PLATFORM_SEC_CAPS=""
+ valt_load_address_normal="0x38000000"
+ valt_load_address_inner="0x28000000"
+ AC_MSG_RESULT([ok (${host_cpu}-${host_os})])
+ ;;
*)
VGCONF_ARCH_PRI="unknown"
VGCONF_ARCH_SEC="unknown"
AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_PPC64,
test x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_AIX5 )
+AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_ARM,
+ test x$VGCONF_PLATFORM_PRI_CAPS = xARM_LINUX )
# Set up VGCONF_PLATFORMS_INCLUDE_<platform>. Either one or two of these
# become defined.
-o x$VGCONF_PLATFORM_SEC_CAPS = xPPC32_LINUX)
AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_PPC64_LINUX,
test x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_LINUX)
+AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_ARM_LINUX,
+ test x$VGCONF_PLATFORM_PRI_CAPS = xARM_LINUX)
AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_PPC32_AIX5,
test x$VGCONF_PLATFORM_PRI_CAPS = xPPC32_AIX5 \
test x$VGCONF_PLATFORM_PRI_CAPS = xX86_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xPPC32_LINUX \
- -o x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_LINUX)
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_LINUX \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xARM_LINUX )
AM_CONDITIONAL(VGCONF_OS_IS_AIX5,
test x$VGCONF_PLATFORM_PRI_CAPS = xPPC32_AIX5 \
-o x$VGCONF_PLATFORM_PRI_CAPS = xPPC64_AIX5)
none/tests/ppc32/Makefile
none/tests/ppc64/Makefile
none/tests/x86/Makefile
+ none/tests/arm/Makefile
none/tests/linux/Makefile
none/tests/darwin/Makefile
none/tests/x86-linux/Makefile
#----------------------------------------------------------------------------
AM_CPPFLAGS_@VGCONF_PLATFORM_PRI_CAPS@ += \
- -I$(top_srcdir)/coregrind \
+ -I$(top_srcdir)/coregrind \
-DVG_LIBDIR="\"$(pkglibdir)"\" \
-DVG_PLATFORM="\"@VGCONF_ARCH_PRI@-@VGCONF_OS@\""
if VGCONF_HAVE_PLATFORM_SEC
AM_CPPFLAGS_@VGCONF_PLATFORM_SEC_CAPS@ += \
- -I$(top_srcdir)/coregrind \
+ -I$(top_srcdir)/coregrind \
-DVG_LIBDIR="\"$(pkglibdir)"\" \
-DVG_PLATFORM="\"@VGCONF_ARCH_SEC@-@VGCONF_OS@\""
endif
m_dispatch/dispatch-amd64-linux.S \
m_dispatch/dispatch-ppc32-linux.S \
m_dispatch/dispatch-ppc64-linux.S \
+ m_dispatch/dispatch-arm-linux.S \
m_dispatch/dispatch-ppc32-aix5.S \
m_dispatch/dispatch-ppc64-aix5.S \
m_dispatch/dispatch-x86-darwin.S \
m_sigframe/sigframe-amd64-linux.c \
m_sigframe/sigframe-ppc32-linux.c \
m_sigframe/sigframe-ppc64-linux.c \
+ m_sigframe/sigframe-arm-linux.c \
m_sigframe/sigframe-ppc32-aix5.c \
m_sigframe/sigframe-ppc64-aix5.c \
m_sigframe/sigframe-x86-darwin.c \
m_syswrap/syscall-amd64-linux.S \
m_syswrap/syscall-ppc32-linux.S \
m_syswrap/syscall-ppc64-linux.S \
+ m_syswrap/syscall-arm-linux.S \
m_syswrap/syscall-ppc32-aix5.S \
m_syswrap/syscall-ppc64-aix5.S \
m_syswrap/syscall-x86-darwin.S \
m_syswrap/syswrap-amd64-linux.c \
m_syswrap/syswrap-ppc32-linux.c \
m_syswrap/syswrap-ppc64-linux.c \
+ m_syswrap/syswrap-arm-linux.c \
m_syswrap/syswrap-ppc32-aix5.c \
m_syswrap/syswrap-ppc64-aix5.c \
m_syswrap/syswrap-x86-darwin.c \
ehdr->e_ident[EI_OSABI] == ELFOSABI_LINUX)) {
platform = "x86-linux";
}
+ else
+ if (ehdr->e_machine == EM_ARM &&
+ (ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV ||
+ ehdr->e_ident[EI_OSABI] == ELFOSABI_LINUX)) {
+ platform = "arm-linux";
+ }
}
else if (header[EI_DATA] == ELFDATA2MSB) {
if (ehdr->e_machine == EM_PPC &&
platform = "ppc32-linux";
}
}
+
} else if (n_bytes >= sizeof(Elf64_Ehdr) && header[EI_CLASS] == ELFCLASS64) {
const Elf64_Ehdr *ehdr = (Elf64_Ehdr *)header;
if ((0==strcmp(VG_PLATFORM,"x86-linux")) ||
(0==strcmp(VG_PLATFORM,"amd64-linux")) ||
(0==strcmp(VG_PLATFORM,"ppc32-linux")) ||
- (0==strcmp(VG_PLATFORM,"ppc64-linux")))
+ (0==strcmp(VG_PLATFORM,"ppc64-linux")) ||
+ (0==strcmp(VG_PLATFORM,"arm-linux")))
default_platform = VG_PLATFORM;
else
barf("Unknown VG_PLATFORM '%s'", VG_PLATFORM);
{
SysRes res;
aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
-# if defined(VGP_x86_linux) || defined(VGP_ppc32_linux)
+# if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
+ || defined(VGP_arm_linux)
/* mmap2 uses 4096 chunks even if actual page size is bigger. */
aspacem_assert((offset % 4096) == 0);
res = VG_(do_syscall6)(__NR_mmap2, (UWord)start, length,
VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
parse_procselfmaps( read_maps_callback, NULL );
+#if defined(VGP_arm_linux)
+ /* ARM puts code at the end of memory that contains processor
+ specific stuff (cmpxchg, getting the thread local storage, etc.)
+ This isn't specified in /proc/self/maps, so do it here
+
+ EAZG: Is this the proper place for this? Seems like this is one
+ of the few contexts when we can punch holes in the map
+ */
+ init_nsegment( &seg );
+ seg.kind = SkFileC;
+ seg.start = 0xFFFF0000;
+ seg.end = 0xFFFFEFFF;
+ seg.hasR = toBool(1);
+ seg.hasW = toBool(0);
+ seg.hasX = toBool(1);
+ seg.fnIdx = allocate_segname( "arm_commpage" );
+ add_segment( &seg );
+#endif
+
VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
AM_SANITY_CHECK;
regs->dsisr = 0;
regs->result = 0;
+#elif defined(VGP_arm_linux)
+ regs->ARM_r0 = arch->vex.guest_R0;
+ regs->ARM_r1 = arch->vex.guest_R1;
+ regs->ARM_r2 = arch->vex.guest_R2;
+ regs->ARM_r3 = arch->vex.guest_R3;
+ regs->ARM_r4 = arch->vex.guest_R4;
+ regs->ARM_r5 = arch->vex.guest_R5;
+ regs->ARM_r6 = arch->vex.guest_R6;
+ regs->ARM_r7 = arch->vex.guest_R7;
+ regs->ARM_r8 = arch->vex.guest_R8;
+ regs->ARM_r9 = arch->vex.guest_R9;
+ regs->ARM_r10 = arch->vex.guest_R10;
+ regs->ARM_fp = arch->vex.guest_R11;
+ regs->ARM_ip = arch->vex.guest_R12;
+ regs->ARM_sp = arch->vex.guest_R13;
+ regs->ARM_lr = arch->vex.guest_R14;
+ regs->ARM_pc = arch->vex.guest_R15;
+ regs->ARM_cpsr = LibVEX_GuestARM_get_cpsr( &((ThreadArchState*)arch)->vex );
+
#else
# error Unknown ELF platform
#endif
DO(24); DO(25); DO(26); DO(27); DO(28); DO(29); DO(30); DO(31);
# undef DO
+#elif defined(VGP_arm_linux)
+ // umm ...
+
#else
# error Unknown ELF platform
#endif
/*
Bool VG_(has_cpuid)(void)
*/
-.globl VG_(has_cpuid)
#if defined(VGA_x86)
+.text
+.globl VG_(has_cpuid)
VG_(has_cpuid):
pushl %ebp
movl %esp, %ebp
popl %ebp
ret
#elif defined(VGA_amd64)
+.text
+.globl VG_(has_cpuid)
VG_(has_cpuid):
movq $1, %rax
ret
void VG_(cpuid)(UInt eax,
UInt* eax_ret, UInt* ebx_ret, UInt* ecx_ret, UInt* edx_ret)
*/
-.globl VG_(cpuid)
#if defined(VGA_x86)
+.text
+.globl VG_(cpuid)
VG_(cpuid):
pushl %ebp
movl %esp, %ebp
popl %ebp
ret
#elif defined(VGA_amd64)
+.text
+.globl VG_(cpuid)
VG_(cpuid):
pushq %rbp
movq %rsp, %rbp
ret
#endif
-#if defined(VGO_linux)
+#if defined(VGA_x86) || defined(VGA_amd64)
/* Let the linker know we don't need an executable stack */
.section .note.GNU-stack,"",@progbits
#endif
(void*)(long)LibVEX_GuestPPC64_get_XER(vex));
return rc;
+#elif defined(VGP_arm_linux)
+ struct vki_user_regs_struct uregs;
+ VG_(memset)(&uregs, 0, sizeof(uregs));
+ uregs.ARM_r0 = vex->guest_R0;
+ uregs.ARM_r1 = vex->guest_R1;
+ uregs.ARM_r2 = vex->guest_R2;
+ uregs.ARM_r3 = vex->guest_R3;
+ uregs.ARM_r4 = vex->guest_R4;
+ uregs.ARM_r5 = vex->guest_R5;
+ uregs.ARM_r6 = vex->guest_R6;
+ uregs.ARM_r7 = vex->guest_R7;
+ uregs.ARM_r8 = vex->guest_R8;
+ uregs.ARM_r9 = vex->guest_R9;
+ uregs.ARM_r10 = vex->guest_R10;
+ uregs.ARM_fp = vex->guest_R11;
+ uregs.ARM_ip = vex->guest_R12;
+ uregs.ARM_sp = vex->guest_R13;
+ uregs.ARM_lr = vex->guest_R14;
+ uregs.ARM_pc = vex->guest_R15;
+ uregs.ARM_cpsr = LibVEX_GuestARM_get_cpsr(vex);
+ return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &uregs);
+
#elif defined(VGP_ppc32_aix5)
I_die_here;
if (regno == 1/*SP*/) { *a = regs->sp; return True; }
# elif defined(VGP_ppc64_linux)
if (regno == 1/*SP*/) { *a = regs->sp; return True; }
+# elif defined(VGP_arm_linux)
+ if (regno == 13) { *a = regs->sp; return True; }
+ if (regno == 11) { *a = regs->fp; return True; }
# elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
vg_assert(0); /* this function should never be called */
# else
return (UInt)__res;
}
+#elif defined(VGP_arm_linux)
+
+static UInt local_sys_write_stderr ( HChar* buf, Int n )
+{
+ volatile Int block[2];
+ block[0] = (Int)buf;
+ block[1] = n;
+ __asm__ volatile (
+ "mov r0, #1\n\t"
+ "ldr r1, [%0]\n\t"
+ "ldr r2, [%0, #4]\n\t"
+ "mov r7, #"VG_STRINGIFY(__NR_write)"\n\t"
+ "svc 0x0\n" /* write() */
+ "str r0, [%0]\n\t"
+ :
+ : "r" (block)
+ : "r0","r1","r2","r7"
+ );
+ if (block[0] < 0)
+ block[0] = -1;
+ return (UInt)block[0];
+}
+
+static UInt local_sys_getpid ( void )
+{
+ UInt __res;
+ __asm__ volatile (
+ "mov r7, #"VG_STRINGIFY(__NR_getpid)"\n"
+ "svc 0x0\n" /* getpid() */
+ "mov %0, r0\n"
+ : "=r" (__res)
+ :
+ : "r0", "r7" );
+ return __res;
+}
+
#elif defined(VGP_ppc32_aix5)
static UInt local_sys_write_stderr ( HChar* buf, Int n )
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- The core dispatch loop, for jumping to a code address. ---*/
+/*--- dispatch-arm-linux.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2008-2009 Evan Geller
+ gaze@bea.ms
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_arm_linux)
+
+#include "pub_core_basics_asm.h"
+#include "pub_core_dispatch_asm.h"
+#include "pub_core_transtab_asm.h"
+#include "libvex_guest_offsets.h" /* for OFFSET_arm_R* */
+
+
+/*------------------------------------------------------------*/
+/*--- ---*/
+/*--- The dispatch loop. VG_(run_innerloop) is used to ---*/
+/*--- run all translations except no-redir ones. ---*/
+/*--- ---*/
+/*------------------------------------------------------------*/
+
+/*----------------------------------------------------*/
+/*--- Preamble (set everything up) ---*/
+/*----------------------------------------------------*/
+
+/* signature:
+UWord VG_(run_innerloop) ( void* guest_state, UWord do_profiling );
+*/
+.text
+.globl VG_(run_innerloop)
+VG_(run_innerloop):
+ push {r0, r1, r4, r5, r6, r7, r8, r9, fp, lr}
+
+ /* set FPSCR to vex-required default value */
+ mov r4, #0
+ fmxr fpscr, r4
+
+ /* r0 (hence also [sp,#0]) holds guest_state */
+ /* r1 holds do_profiling */
+ mov r8, r0
+ ldr r0, [r8, #OFFSET_arm_R15]
+
+ /* fall into main loop (the right one) */
+ cmp r1, #0 /* do_profiling */
+ beq VG_(run_innerloop__dispatch_unprofiled)
+ b VG_(run_innerloop__dispatch_profiled)
+
+
+/*----------------------------------------------------*/
+/*--- NO-PROFILING (standard) dispatcher ---*/
+/*----------------------------------------------------*/
+
+.global VG_(run_innerloop__dispatch_unprofiled)
+VG_(run_innerloop__dispatch_unprofiled):
+
+ /* AT ENTRY: r0 is next guest addr, r8 is possibly
+ modified guest state ptr */
+
+ /* Has the guest state pointer been messed with? If yes, exit. */
+ ldr r1, [sp, #0]
+ cmp r8, r1
+ bne gsp_changed
+
+ /* save the jump address in the guest state */
+ str r0, [r8, #OFFSET_arm_R15]
+
+ /* Are we out of timeslice? If yes, defer to scheduler. */
+ ldr r1, =VG_(dispatch_ctr)
+ ldr r2, [r1]
+ subs r2, r2, #1
+ str r2, [r1]
+ beq counter_is_zero
+
+ /* try a fast lookup in the translation cache */
+ // r0 = next guest, r1,r2,r3 scratch
+ ldr r1, =VG_TT_FAST_MASK // r1 = VG_TT_FAST_MASK
+ and r2, r1, r0, LSR #2 // r2 = entry #
+ ldr r1, =VG_(tt_fast) // r1 = &tt_fast[0]
+ add r1, r1, r2, LSL #3 // r1 = &tt_fast[entry#]
+ ldr r3, [r1, #0] /* .guest */
+ ldr r1, [r1, #4] /* .host */
+ cmp r0, r3
+ bne fast_lookup_failed
+ // r1: live, next-host r8: live, gsp
+ // r2: entry # (but not live)
+ // r0, r3: dead
+
+ /* Found a match. Jump to .host. */
+ blx r1
+ b VG_(run_innerloop__dispatch_unprofiled)
+.ltorg
+ /*NOTREACHED*/
+
+/*----------------------------------------------------*/
+/*--- PROFILING dispatcher (can be much slower) ---*/
+/*----------------------------------------------------*/
+
+.global VG_(run_innerloop__dispatch_profiled)
+VG_(run_innerloop__dispatch_profiled):
+
+ /* AT ENTRY: r0 is next guest addr, r8 is possibly
+ modified guest state ptr */
+
+ /* Has the guest state pointer been messed with? If yes, exit. */
+ ldr r1, [sp, #0]
+ cmp r8, r1
+ bne gsp_changed
+
+ /* save the jump address in the guest state */
+ str r0, [r8, #OFFSET_arm_R15]
+
+ /* Are we out of timeslice? If yes, defer to scheduler. */
+ ldr r1, =VG_(dispatch_ctr)
+ ldr r2, [r1]
+ subs r2, r2, #1
+ str r2, [r1]
+ beq counter_is_zero
+
+ /* try a fast lookup in the translation cache */
+ // r0 = next guest, r1,r2,r3 scratch
+ ldr r1, =VG_TT_FAST_MASK // r1 = VG_TT_FAST_MASK
+ and r2, r1, r0, LSR #2 // r2 = entry #
+ ldr r1, =VG_(tt_fast) // r1 = &tt_fast[0]
+ add r1, r1, r2, LSL #3 // r1 = &tt_fast[entry#]
+ ldr r3, [r1, #0] /* .guest */
+ ldr r1, [r1, #4] /* .host */
+ cmp r0, r3
+ bne fast_lookup_failed
+ // r1: live, next-host r8: live, gsp
+ // r2: entry # (but not live)
+ // r0, r3: dead
+
+ /* increment bb profile counter */
+ ldr r0, =VG_(tt_fastN) // r0 = &tt_fastN[0]
+ ldr r0, [r0, r2, LSL #2] // r0 = tt_fast[entry #]
+ ldr r3, [r0] // *r0 ++
+ add r3, r3, #1
+ str r3, [r0]
+
+ /* Found a match. Jump to .host. */
+ blx r1
+ b VG_(run_innerloop__dispatch_profiled)
+ /*NOTREACHED*/
+
+/*----------------------------------------------------*/
+/*--- exit points ---*/
+/*----------------------------------------------------*/
+
+gsp_changed:
+ // r0 = next guest addr (R15), r8 = modified gsp
+ /* Someone messed with the gsp. Have to
+ defer to scheduler to resolve this. dispatch ctr
+ is not yet decremented, so no need to increment. */
+ /* R15 is NOT up to date here. First, need to write
+ r0 back to R15, but without trashing r8 since
+ that holds the value we want to return to the scheduler.
+ Hence use r1 transiently for the guest state pointer. */
+ ldr r1, [sp, #0]
+ str r0, [r1, #OFFSET_arm_R15]
+ mov r0, r8 // "return modified gsp"
+ b run_innerloop_exit
+ /*NOTREACHED*/
+
+counter_is_zero:
+ /* R15 is up to date here */
+ /* Back out increment of the dispatch ctr */
+ ldr r1, =VG_(dispatch_ctr)
+ ldr r2, [r1]
+ add r2, r2, #1
+ str r2, [r1]
+ mov r0, #VG_TRC_INNER_COUNTERZERO
+ b run_innerloop_exit
+ /*NOTREACHED*/
+
+fast_lookup_failed:
+ /* R15 is up to date here */
+ /* Back out increment of the dispatch ctr */
+ ldr r1, =VG_(dispatch_ctr)
+ ldr r2, [r1]
+ add r2, r2, #1
+ str r2, [r1]
+ mov r0, #VG_TRC_INNER_FASTMISS
+ b run_innerloop_exit
+ /*NOTREACHED*/
+
+/* All exits from the dispatcher go through here. %r0 holds
+ the return value.
+*/
+run_innerloop_exit:
+ /* We're leaving. Check that nobody messed with
+ FPSCR in ways we don't expect. */
+ fmrx r4, fpscr
+ bic r4, #0xF0000000 /* mask out NZCV */
+ bic r4, #0x0000001F /* mask out IXC,UFC,OFC,DZC,IOC */
+ cmp r4, #0
+ bne invariant_violation
+ b run_innerloop_exit_REALLY
+
+invariant_violation:
+ mov r0, #VG_TRC_INVARIANT_FAILED
+ b run_innerloop_exit_REALLY
+
+run_innerloop_exit_REALLY:
+ add sp, sp, #8
+ pop {r4, r5, r6, r7, r8, r9, fp, pc}
+
+.size VG_(run_innerloop), .-VG_(run_innerloop)
+
+
+/*------------------------------------------------------------*/
+/*--- ---*/
+/*--- A special dispatcher, for running no-redir ---*/
+/*--- translations. Just runs the given translation once. ---*/
+/*--- ---*/
+/*------------------------------------------------------------*/
+
+/* signature:
+void VG_(run_a_noredir_translation) ( UWord* argblock );
+*/
+
+/* Run a no-redir translation. argblock points to 4 UWords, 2 to carry args
+ and 2 to carry results:
+ 0: input: ptr to translation
+ 1: input: ptr to guest state
+ 2: output: next guest PC
+ 3: output: guest state pointer afterwards (== thread return code)
+*/
+.global VG_(run_a_noredir_translation)
+VG_(run_a_noredir_translation):
+ push {r0,r1 /* EABI compliance */, r4-r12, lr}
+ ldr r8, [r0, #4]
+ mov lr, pc
+ ldr pc, [r0, #0]
+
+ pop {r1}
+ str r0, [r1, #8]
+ str r8, [r1, #12]
+ pop {r1/*EABI compliance*/,r4-r12, pc}
+
+.size VG_(run_a_noredir_translation), .-VG_(run_a_noredir_translation)
+
+/* Let the linker know we don't need an executable stack */
+.section .note.GNU-stack,"",%progbits
+
+#endif // defined(VGP_arm_linux)
+
+/*--------------------------------------------------------------------*/
+/*--- end dispatch-arm-linux.S ---*/
+/*--------------------------------------------------------------------*/
arch->vex.guest_GPR2 = iifii.initial_client_TOC;
arch->vex.guest_CIA = iifii.initial_client_IP;
+# elif defined(VGP_arm_linux)
+ /* Zero out the initial state, and set up the simulated FPU in a
+ sane way. */
+ LibVEX_GuestARM_initialise(&arch->vex);
+
+ /* Zero out the shadow areas. */
+ VG_(memset)(&arch->vex_shadow1, 0, sizeof(VexGuestARMState));
+ VG_(memset)(&arch->vex_shadow2, 0, sizeof(VexGuestARMState));
+
+ arch->vex.guest_R13 = iifii.initial_client_SP;
+ arch->vex.guest_R15 = iifii.initial_client_IP;
+
+ /* This is just EABI stuff. */
+ // FIXME jrs: what's this for?
+ arch->vex.guest_R1 = iifii.initial_client_SP;
+
# else
# error Unknown platform
# endif
Assertery.
------------------------------------------------------------------ */
-#if defined(VGP_x86_linux) || defined(VGP_x86_darwin)
-# define GET_REAL_PC_SP_AND_FP(pc, sp, fp) \
- asm("call 0f;" \
- "0: popl %0;" \
- "movl %%esp, %1;" \
- "movl %%ebp, %2;" \
- : "=r" (pc),\
- "=r" (sp),\
- "=r" (fp));
-#elif defined(VGP_amd64_linux) || defined(VGP_amd64_darwin)
-# define GET_REAL_PC_SP_AND_FP(pc, sp, fp) \
- asm("leaq 0(%%rip), %0;" \
- "movq %%rsp, %1;" \
- "movq %%rbp, %2;" \
- : "=r" (pc),\
- "=r" (sp),\
- "=r" (fp));
+#if defined(VGP_x86_linux) || defined(VGP_x86_darwin)
+# define GET_STARTREGS(srP) \
+ { UInt eip, esp, ebp; \
+ __asm__ __volatile__( \
+ "call 0f;" \
+ "0: popl %0;" \
+ "movl %%esp, %1;" \
+ "movl %%ebp, %2;" \
+ : "=r" (eip), "=r" (esp), "=r" (ebp) \
+ : /* reads none */ \
+ : "memory" \
+ ); \
+ (srP)->r_pc = (ULong)eip; \
+ (srP)->r_sp = (ULong)esp; \
+ (srP)->misc.X86.r_ebp = ebp; \
+ }
+#elif defined(VGP_amd64_linux) || defined(VGP_amd64_darwin)
+# define GET_STARTREGS(srP) \
+ { ULong rip, rsp, rbp; \
+ __asm__ __volatile__( \
+ "leaq 0(%%rip), %0;" \
+ "movq %%rsp, %1;" \
+ "movq %%rbp, %2;" \
+ : "=r" (rip), "=r" (rsp), "=r" (rbp) \
+ : /* reads none */ \
+ : "memory" \
+ ); \
+ (srP)->r_pc = rip; \
+ (srP)->r_sp = rsp; \
+ (srP)->misc.AMD64.r_rbp = rbp; \
+ }
#elif defined(VGP_ppc32_linux) || defined(VGP_ppc32_aix5)
-# define GET_REAL_PC_SP_AND_FP(pc, sp, fp) \
- asm("mflr 0;" /* r0 = lr */ \
- "bl m_libcassert_get_ip;" /* lr = pc */ \
- "m_libcassert_get_ip:\n" \
- "mflr %0;" \
- "mtlr 0;" /* restore lr */ \
- "mr %1,1;" \
- "mr %2,1;" \
- : "=r" (pc), \
- "=r" (sp), \
- "=r" (fp) \
- : /* reads none */ \
- : "r0" /* trashed */ );
+# define GET_STARTREGS(srP) \
+ { UInt cia, r1, lr; \
+ __asm__ __volatile__( \
+ "mflr 0;" /* r0 = lr */ \
+ "bl m_libcassert_get_ip;" /* lr = pc */ \
+ "m_libcassert_get_ip:\n" \
+ "mflr %0;" /* %0 = pc */ \
+ "mtlr 0;" /* restore lr */ \
+ "mr %1,1;" /* %1 = r1 */ \
+ "mr %2,0;" /* %2 = lr */ \
+ : "=r" (cia), "=r" (r1), "=r" (lr) \
+ : /* reads none */ \
+ : "r0" /* trashed */ \
+ ); \
+ srP->r_pc = (ULong)cia; \
+ srP->r_sp = (ULong)r1; \
+ srP->misc.PPC32.lr = lr; \
+ }
#elif defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5)
-# define GET_REAL_PC_SP_AND_FP(pc, sp, fp) \
- asm("mflr 0;" /* r0 = lr */ \
- "bl .m_libcassert_get_ip;" /* lr = pc */ \
- ".m_libcassert_get_ip:\n" \
- "mflr %0;" \
- "mtlr 0;" /* restore lr */ \
- "mr %1,1;" \
- "mr %2,1;" \
- : "=r" (pc), \
- "=r" (sp), \
- "=r" (fp) \
- : /* reads none */ \
- : "r0" /* trashed */ );
+# define GET_STARTREGS(srP) \
+ { ULong cia, r1, lr; \
+ __asm__ __volatile__( \
+ "mflr 0;" /* r0 = lr */ \
+ "bl .m_libcassert_get_ip;" /* lr = pc */ \
+ ".m_libcassert_get_ip:\n" \
+ "mflr %0;" /* %0 = pc */ \
+ "mtlr 0;" /* restore lr */ \
+ "mr %1,1;" /* %1 = r1 */ \
+ "mr %2,0;" /* %2 = lr */ \
+ : "=r" (cia), "=r" (r1), "=r" (lr) \
+ : /* reads none */ \
+ : "r0" /* trashed */ \
+ ); \
+ srP->r_pc = cia; \
+ srP->r_sp = r1; \
+ srP->misc.PPC64.lr = lr; \
+ }
+#elif defined(VGP_arm_linux)
+# define GET_STARTREGS(srP) \
+ { UInt block[5]; \
+ __asm__ __volatile__( \
+ "str r15, [%0, #+0];" \
+ "str r14, [%0, #+4];" \
+ "str r13, [%0, #+8];" \
+ "str r12, [%0, #+12];" \
+ "str r11, [%0, #+16];" \
+ : /* out */ \
+ : /* in */ "r"(&block[0]) \
+ : /* trash */ "memory" \
+ ); \
+ (srP)->r_pc = block[0] - 8; \
+ (srP)->r_sp = block[1]; \
+ (srP)->misc.ARM.r14 = block[2]; \
+ (srP)->misc.ARM.r12 = block[3]; \
+ (srP)->misc.ARM.r11 = block[4]; \
+ }
#else
# error Unknown platform
#endif
}
__attribute__ ((noreturn))
-static void report_and_quit ( const Char* report,
- Addr ip, Addr sp, Addr fp, Addr lr )
+static void report_and_quit ( const Char* report,
+ UnwindStartRegs* startRegsIN )
{
Addr stacktop;
Addr ips[BACKTRACE_DEPTH];
// If necessary, fake up an ExeContext which is of our actual real CPU
// state. Could cause problems if we got the panic/exception within the
// execontext/stack dump/symtab code. But it's better than nothing.
- if (0 == ip && 0 == sp && 0 == fp) {
- GET_REAL_PC_SP_AND_FP(ip, sp, fp);
+ UnwindStartRegs startRegs;
+ VG_(memset)(&startRegs, 0, sizeof(startRegs));
+
+ if (startRegsIN == NULL) {
+ GET_STARTREGS(&startRegs);
+ } else {
+ startRegs = *startRegsIN;
}
stacktop = tst->os_state.valgrind_stack_init_SP;
ips, BACKTRACE_DEPTH,
NULL/*array to dump SP values in*/,
NULL/*array to dump FP values in*/,
- ip, sp, fp, lr, sp, stacktop
+ &startRegs, stacktop
);
VG_(clo_xml) = False;
VG_(pp_StackTrace) (ips, n_ips);
if (!VG_STREQ(buf, ""))
VG_(printf)("%s: %s\n", component, buf );
- report_and_quit(bugs_to, 0,0,0,0);
+ report_and_quit(bugs_to, NULL);
}
__attribute__ ((noreturn))
static void panic ( Char* name, Char* report, Char* str,
- Addr ip, Addr sp, Addr fp, Addr lr )
+ UnwindStartRegs* startRegs )
{
if (VG_(clo_xml))
VG_(printf_xml)("</valgrindoutput>\n");
VG_(printf)("\n%s: the 'impossible' happened:\n %s\n", name, str);
- report_and_quit(report, ip, sp, fp, lr);
+ report_and_quit(report, startRegs);
}
-void VG_(core_panic_at) ( Char* str, Addr ip, Addr sp, Addr fp, Addr lr )
+void VG_(core_panic_at) ( Char* str, UnwindStartRegs* startRegs )
{
- panic("valgrind", VG_BUGS_TO, str, ip, sp, fp, lr);
+ panic("valgrind", VG_BUGS_TO, str, startRegs);
}
void VG_(core_panic) ( Char* str )
{
- VG_(core_panic_at)(str, 0,0,0,0);
+ VG_(core_panic_at)(str, NULL);
}
void VG_(tool_panic) ( Char* str )
{
- panic(VG_(details).name, VG_(details).bug_reports_to, str, 0,0,0,0);
+ panic(VG_(details).name, VG_(details).bug_reports_to, str, NULL);
}
/* Print some helpful-ish text about unimplemented things, and give up. */
res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_SOCKET, (UWord)&args);
return sr_isError(res) ? -1 : sr_Res(res);
-# elif defined(VGP_amd64_linux)
+# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux)
SysRes res;
res = VG_(do_syscall3)(__NR_socket, domain, type, protocol );
return sr_isError(res) ? -1 : sr_Res(res);
res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_CONNECT, (UWord)&args);
return sr_isError(res) ? -1 : sr_Res(res);
-# elif defined(VGP_amd64_linux)
+# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux)
SysRes res;
res = VG_(do_syscall3)(__NR_connect, sockfd, (UWord)serv_addr, addrlen);
return sr_isError(res) ? -1 : sr_Res(res);
res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_SEND, (UWord)&args);
return sr_isError(res) ? -1 : sr_Res(res);
-# elif defined(VGP_amd64_linux)
+# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux)
SysRes res;
res = VG_(do_syscall6)(__NR_sendto, sd, (UWord)msg,
count, VKI_MSG_NOSIGNAL, 0,0);
res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_GETSOCKNAME, (UWord)&args);
return sr_isError(res) ? -1 : sr_Res(res);
-# elif defined(VGP_amd64_linux)
+# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux)
SysRes res;
res = VG_(do_syscall3)( __NR_getsockname,
(UWord)sd, (UWord)name, (UWord)namelen );
res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_GETPEERNAME, (UWord)&args);
return sr_isError(res) ? -1 : sr_Res(res);
-# elif defined(VGP_amd64_linux)
+# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux)
SysRes res;
res = VG_(do_syscall3)( __NR_getpeername,
(UWord)sd, (UWord)name, (UWord)namelen );
res = VG_(do_syscall2)(__NR_socketcall, VKI_SYS_GETSOCKOPT, (UWord)&args);
return sr_isError(res) ? -1 : sr_Res(res);
-# elif defined(VGP_amd64_linux)
+# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux)
SysRes res;
res = VG_(do_syscall5)( __NR_getsockopt,
(UWord)sd, (UWord)level, (UWord)optname,
list[i] = (UInt)list16[i];
return sr_Res(sres);
-# elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
+# elif defined(VGP_amd64_linux) || defined(VGP_ppc64_linux) \
+ || defined(VGP_arm_linux) \
|| defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5) \
|| defined(VGO_darwin)
SysRes sres;
return VG_(threads)[tid].arch.vex.guest_LR;
# elif defined(VGA_x86) || defined(VGA_amd64)
return 0;
+# elif defined(VGA_arm)
+ return VG_(threads)[tid].arch.vex.guest_R14;
# else
# error "Unknown arch"
# endif
INSTR_PTR( VG_(threads)[tid].arch ) = ip;
}
+
+void VG_(get_UnwindStartRegs) ( /*OUT*/UnwindStartRegs* regs,
+ ThreadId tid )
+{
+# if defined(VGA_x86)
+ regs->r_pc = (ULong)VG_(threads)[tid].arch.vex.guest_EIP;
+ regs->r_sp = (ULong)VG_(threads)[tid].arch.vex.guest_ESP;
+ regs->misc.X86.r_ebp
+ = VG_(threads)[tid].arch.vex.guest_EBP;
+# elif defined(VGA_amd64)
+ regs->r_pc = VG_(threads)[tid].arch.vex.guest_RIP;
+ regs->r_sp = VG_(threads)[tid].arch.vex.guest_RSP;
+ regs->misc.AMD64.r_rbp
+ = VG_(threads)[tid].arch.vex.guest_RBP;
+# elif defined(VGA_ppc32)
+ regs->r_pc = (ULong)VG_(threads)[tid].arch.vex.guest_CIA;
+ regs->r_sp = (ULong)VG_(threads)[tid].arch.vex.guest_R1;
+ regs->misc.PPC32.r_lr
+ = VG_(threads)[tid].arch.vex.guest_LR;
+# elif defined(VGA_ppc64)
+ regs->r_pc = VG_(threads)[tid].arch.vex.guest_CIA;
+ regs->r_sp = VG_(threads)[tid].arch.vex.guest_R1;
+ regs->misc.PPC64.r_lr
+ = VG_(threads)[tid].arch.vex.guest_LR;
+# elif defined(VGA_arm)
+ regs->r_pc = (ULong)VG_(threads)[tid].arch.vex.guest_R15;
+ regs->r_sp = (ULong)VG_(threads)[tid].arch.vex.guest_R13;
+ regs->misc.ARM.r14
+ = VG_(threads)[tid].arch.vex.guest_R14;
+ regs->misc.ARM.r12
+ = VG_(threads)[tid].arch.vex.guest_R12;
+ regs->misc.ARM.r11
+ = VG_(threads)[tid].arch.vex.guest_R11;
+# else
+# error "Unknown arch"
+# endif
+}
+
+
void VG_(set_syscall_return_shadows) ( ThreadId tid,
/* shadow vals for the result */
UWord s1res, UWord s2res,
# elif defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
VG_(threads)[tid].arch.vex_shadow1.guest_GPR3 = s1res;
VG_(threads)[tid].arch.vex_shadow2.guest_GPR3 = s2res;
+# elif defined(VGP_arm_linux)
+ VG_(threads)[tid].arch.vex_shadow1.guest_R0 = s1res;
+ VG_(threads)[tid].arch.vex_shadow2.guest_R0 = s2res;
# elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
VG_(threads)[tid].arch.vex_shadow1.guest_GPR3 = s1res;
VG_(threads)[tid].arch.vex_shadow2.guest_GPR3 = s2res;
(*f)(vex->guest_R14);
(*f)(vex->guest_R15);
#elif defined(VGA_ppc32) || defined(VGA_ppc64)
- /* XXX ask tool about validity? */
(*f)(vex->guest_GPR0);
(*f)(vex->guest_GPR1);
(*f)(vex->guest_GPR2);
(*f)(vex->guest_GPR31);
(*f)(vex->guest_CTR);
(*f)(vex->guest_LR);
-
+#elif defined(VGA_arm)
+ (*f)(vex->guest_R0);
+ (*f)(vex->guest_R1);
+ (*f)(vex->guest_R2);
+ (*f)(vex->guest_R3);
+ (*f)(vex->guest_R4);
+ (*f)(vex->guest_R5);
+ (*f)(vex->guest_R6);
+ (*f)(vex->guest_R8);
+ (*f)(vex->guest_R9);
+ (*f)(vex->guest_R10);
+ (*f)(vex->guest_R11);
+ (*f)(vex->guest_R12);
+ (*f)(vex->guest_R13);
+ (*f)(vex->guest_R14);
#else
# error Unknown arch
#endif
*/
/* --------- State --------- */
-static Bool hwcaps_done = False;
+static Bool hwcaps_done = False;
/* --- all archs --- */
static VexArch va;
return True;
}
+#elif defined(VGA_arm)
+ {
+ va = VexArchARM;
+ vai.hwcaps = 0;
+ return True;
+ }
+
#else
# error "Unknown arch"
#endif
// produce a pointer to the actual entry point for the function.
void* VG_(fnptr_to_fnentry)( void* f )
{
-#if defined(VGP_x86_linux) || defined(VGP_amd64_linux) || \
- defined(VGP_ppc32_linux) || defined(VGO_darwin)
+#if defined(VGP_x86_linux) || defined(VGP_amd64_linux) \
+ || defined(VGP_arm_linux) \
+ || defined(VGP_ppc32_linux) || defined(VGO_darwin)
return f;
#elif defined(VGP_ppc64_linux) || defined(VGP_ppc32_aix5) \
|| defined(VGP_ppc64_aix5)
iters = 10;
# elif defined(VGP_ppc32_linux)
iters = 5;
+# elif defined(VGP_arm_linux)
+ iters = 1;
# elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
iters = 4;
# elif defined(VGO_darwin)
return VG_(memset)(s,c,n);
}
+/* EAZG: ARM's EABI will call floating point exception handlers in
+ libgcc which boil down to an abort or raise, that's usually defined
+ in libc. Instead, define them here. */
+#if defined(VGP_arm_linux)
+void raise(void);
+void raise(void){
+ VG_(printf)("Something called raise().\n");
+ vg_assert(0);
+}
+
+void abort(void);
+void abort(void){
+ VG_(printf)("Something called raise().\n");
+ vg_assert(0);
+}
+
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr0(void){
+ VG_(printf)("Something called __aeabi_unwind_cpp_pr0()\n");
+ vg_assert(0);
+}
+#endif
+
/* ---------------- Requirement 2 ---------------- */
/* Glibc's sysdeps/i386/elf/start.S has the following gem of a
"\tnop\n"
"\ttrap\n"
);
+#elif defined(VGP_arm_linux)
+asm("\n"
+ "\t.align 2\n"
+ "\t.global _start\n"
+ "_start:\n"
+ "\tldr r0, [pc, #36]\n"
+ "\tldr r1, [pc, #36]\n"
+ "\tadd r0, r1, r0\n"
+ "\tldr r1, [pc, #32]\n"
+ "\tadd r0, r1, r0\n"
+ "\tmvn r1, #15\n"
+ "\tand r0, r0, r1\n"
+ "\tmov r1, sp\n"
+ "\tmov sp, r0\n"
+ "\tmov r0, r1\n"
+ "\tb _start_in_C_linux\n"
+ "\t.word vgPlain_interim_stack\n"
+ "\t.word "VG_STRINGIFY(VG_STACK_GUARD_SZB)"\n"
+ "\t.word "VG_STRINGIFY(VG_STACK_ACTIVE_SZB)"\n"
+);
#else
# error "Unknown linux platform"
#endif
);
}
+# elif defined(VGP_arm_linux)
+ /* If we're using memcheck, use these intercepts right from
+ the start, otherwise ld.so makes a lot of noise. */
+ if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+ add_hardwired_spec(
+ "ld-linux.so.3", "strlen",
+ (Addr)&VG_(arm_linux_REDIR_FOR_strlen),
+ NULL
+ );
+ //add_hardwired_spec(
+ // "ld-linux.so.3", "index",
+ // (Addr)&VG_(arm_linux_REDIR_FOR_index),
+ // NULL
+ //);
+ add_hardwired_spec(
+ "ld-linux.so.3", "memcpy",
+ (Addr)&VG_(arm_linux_REDIR_FOR_memcpy),
+ NULL
+ );
+ }
+ /* nothing so far */
+
# elif defined(VGP_ppc32_aix5)
/* nothing so far */
It is called vg_replace_malloc.c because this filename appears in stack
traces, so we want the name to be (hopefully!) meaningful to users.
+
+ IMPORTANT: this file must not contain any floating point code, nor
+ any integer division. This is because on ARM these can cause calls
+ to helper functions, which will be unresolved within this .so.
+ Although it is usually the case that the client's ld.so instance
+ can bind them at runtime to the relevant functions in the client
+ executable, there is no guarantee of this; and so the client may
+ die via a runtime link failure. Hence the only safe approach is to
+ avoid such function calls in the first place. See "#define CALLOC"
+ below for a specific example.
+
+ A useful command is
+ for f in `find . -name "*preload*.so*"` ; \
+ do nm -A $f | grep " U " ; \
+ done
+
+ to see all the undefined symbols in all the preload shared objects.
------------------------------------------------------------------ */
#include "pub_core_basics.h"
#endif
+/* Compute the high word of the double-length unsigned product of U
+ and V. This is for calloc argument overflow checking; see comments
+ below. Algorithm as described in Hacker's Delight, chapter 8. */
+static UWord umulHW ( UWord u, UWord v )
+{
+ UWord u0, v0, w0, rHi;
+ UWord u1, v1, w1,w2,t;
+ UWord halfMask = sizeof(UWord)==4 ? (UWord)0xFFFF
+ : (UWord)0xFFFFFFFFULL;
+ UWord halfShift = sizeof(UWord)==4 ? 16 : 32;
+ u0 = u & halfMask;
+ u1 = u >> halfShift;
+ v0 = v & halfMask;
+ v1 = v >> halfShift;
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> halfShift);
+ w1 = t & halfMask;
+ w2 = t >> halfShift;
+ w1 = u0 * v1 + w1;
+ rHi = u1 * v1 + w2 + (w1 >> halfShift);
+ return rHi;
+}
+
+
/*------------------------------------------------------------*/
/*--- Replacing malloc() et al ---*/
/*------------------------------------------------------------*/
if (!init_done) init(); \
MALLOC_TRACE("calloc(%llu,%llu)", (ULong)nmemb, (ULong)size ); \
\
- /* Protect against overflow. See bug 24078. */ \
- if (size && nmemb > (SizeT)-1 / size) return NULL; \
+ /* Protect against overflow. See bug 24078. (that bug number is
+ invalid. Which one really?) */ \
+ /* But don't use division, since that produces an external symbol
+ reference on ARM, in the form of a call to __aeabi_uidiv. It's
+ normally OK, because ld.so manages to resolve it to something in the
+ executable, or one of its shared objects. But that isn't guaranteed
+ to be the case, and it has been observed to fail in rare cases, eg:
+ echo x | valgrind /bin/sed -n "s/.*-\>\ //p"
+ So instead compute the high word of the product and check it is zero. */ \
+ if (umulHW(size, nmemb) != 0) return NULL; \
v = (void*)VALGRIND_NON_SIMD_CALL2( info.tl_calloc, nmemb, size ); \
MALLOC_TRACE(" = %p\n", v ); \
return v; \
vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VR1));
vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VR1));
# endif
+
+# if defined(VGA_arm)
+ /* arm guest_state VFP regs must be 8 byte aligned for
+ loads/stores. */
+ vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_D0));
+ vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_D0));
+ vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_D0));
+ /* be extra paranoid .. */
+ vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex.guest_D1));
+ vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow1.guest_D1));
+ vg_assert(VG_IS_8_ALIGNED(& tst->arch.vex_shadow2.guest_D1));
+# endif
}
be in on entry to Vex-generated code, and they should be
unchanged on exit from it. Failure of this assertion
usually means a bug in Vex's code generation. */
+ //{ UInt xx;
+ // __asm__ __volatile__ (
+ // "\t.word 0xEEF12A10\n" // fmrx r2,fpscr
+ // "\tmov %0, r2" : "=r"(xx) : : "r2" );
+ // VG_(printf)("QQQQ new fpscr = %08x\n", xx);
+ //}
vg_assert2(0, "VG_(scheduler), phase 3: "
"run_innerloop detected host "
"state invariant failure", trc);
#elif defined(VGA_ppc32) || defined(VGA_ppc64)
# define VG_CLREQ_ARGS guest_GPR4
# define VG_CLREQ_RET guest_GPR3
+#elif defined(VGA_arm)
+# define VG_CLREQ_ARGS guest_R4
+# define VG_CLREQ_RET guest_R3
#else
# error Unknown arch
#endif
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- Create/destroy signal delivery frames. ---*/
+/*--- sigframe-arm-linux.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Nicholas Nethercote
+ njn@valgrind.org
+ Copyright (C) 2004-2009 Paul Mackerras
+ paulus@samba.org
+ Copyright (C) 2008-2009 Evan Geller
+ gaze@bea.ms
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_arm_linux)
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_machine.h"
+#include "pub_core_options.h"
+#include "pub_core_sigframe.h"
+#include "pub_core_signals.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_trampoline.h"
+#include "pub_core_transtab.h" // VG_(discard_translations)
+
+
+struct vg_sig_private {
+ UInt magicPI;
+ UInt sigNo_private;
+ VexGuestARMState vex_shadow1;
+ VexGuestARMState vex_shadow2;
+};
+
+struct sigframe {
+ struct vki_ucontext uc;
+ unsigned long retcode[2];
+ struct vg_sig_private vp;
+};
+
+struct rt_sigframe {
+ vki_siginfo_t info;
+ struct sigframe sig;
+};
+
+static Bool extend ( ThreadState *tst, Addr addr, SizeT size )
+{
+ ThreadId tid = tst->tid;
+ NSegment const* stackseg = NULL;
+
+ if (VG_(extend_stack)(addr, tst->client_stack_szB)) {
+ stackseg = VG_(am_find_nsegment)(addr);
+ if (0 && stackseg)
+ VG_(printf)("frame=%#lx seg=%#lx-%#lx\n",
+ addr, stackseg->start, stackseg->end);
+ }
+
+ if (stackseg == NULL || !stackseg->hasR || !stackseg->hasW) {
+ VG_(message)(
+ Vg_UserMsg,
+ "Can't extend stack to %#lx during signal delivery for thread %d:",
+ addr, tid);
+ if (stackseg == NULL)
+ VG_(message)(Vg_UserMsg, " no stack segment");
+ else
+ VG_(message)(Vg_UserMsg, " too small or bad protection modes");
+
+ /* set SIGSEGV to default handler */
+ VG_(set_default_handler)(VKI_SIGSEGV);
+ VG_(synth_fault_mapping)(tid, addr);
+
+ /* The whole process should be about to die, since the default
+ action of SIGSEGV to kill the whole process. */
+ return False;
+ }
+
+ /* For tracking memory events, indicate the entire frame has been
+ allocated. */
+ VG_TRACK( new_mem_stack_signal, addr - VG_STACK_REDZONE_SZB,
+ size + VG_STACK_REDZONE_SZB, tid );
+
+ return True;
+}
+
+static void synth_ucontext( ThreadId tid, const vki_siginfo_t *si,
+ UWord trapno, UWord err, const vki_sigset_t *set,
+ struct vki_ucontext *uc){
+
+ ThreadState *tst = VG_(get_ThreadState)(tid);
+ struct vki_sigcontext *sc = &uc->uc_mcontext;
+
+ VG_(memset)(uc, 0, sizeof(*uc));
+
+ uc->uc_flags = 0;
+ uc->uc_link = 0;
+ uc->uc_sigmask = *set;
+ uc->uc_stack = tst->altstack;
+
+# define SC2(reg,REG) sc->arm_##reg = tst->arch.vex.guest_##REG
+ SC2(r0,R0);
+ SC2(r1,R1);
+ SC2(r2,R2);
+ SC2(r3,R3);
+ SC2(r4,R4);
+ SC2(r5,R5);
+ SC2(r6,R6);
+ SC2(r7,R7);
+ SC2(r8,R8);
+ SC2(r9,R9);
+ SC2(r10,R10);
+ SC2(fp,R11);
+ SC2(ip,R12);
+ SC2(sp,R13);
+ SC2(lr,R14);
+ SC2(pc,R15);
+# undef SC2
+
+ sc->trap_no = trapno;
+ sc->error_code = err;
+ sc->fault_address = (UInt)si->_sifields._sigfault._addr;
+}
+
+
+static void build_sigframe(ThreadState *tst,
+ struct sigframe *frame,
+ const vki_siginfo_t *siginfo,
+ const struct vki_ucontext *siguc,
+ void *handler, UInt flags,
+ const vki_sigset_t *mask,
+ void *restorer){
+
+ UWord trapno;
+ UWord err;
+ Int sigNo = siginfo->si_signo;
+ struct vg_sig_private *priv = &frame->vp;
+
+ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, "signal handler frame",
+ (Addr)frame, offsetof(struct sigframe, vp));
+
+ if(siguc) {
+ trapno = siguc->uc_mcontext.trap_no;
+ err = siguc->uc_mcontext.error_code;
+ } else {
+ trapno = 0;
+ err = 0;
+ }
+
+ synth_ucontext(tst->tid, siginfo, trapno, err, mask, &frame->uc);
+
+ VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid,
+ (Addr)frame, offsetof(struct sigframe, vp));
+
+ priv->magicPI = 0x31415927;
+ priv->sigNo_private = sigNo;
+ priv->vex_shadow1 = tst->arch.vex_shadow1;
+ priv->vex_shadow2 = tst->arch.vex_shadow2;
+
+}
+
+
+
+/* EXPORTED */
+void VG_(sigframe_create)( ThreadId tid,
+ Addr sp_top_of_frame,
+ const vki_siginfo_t *siginfo,
+ const struct vki_ucontext *siguc,
+ void *handler,
+ UInt flags,
+ const vki_sigset_t *mask,
+ void *restorer )
+{
+// struct vg_sig_private *priv;
+ Addr sp = sp_top_of_frame;
+ ThreadState *tst;
+ Int sigNo = siginfo->si_signo;
+// Addr faultaddr;
+ UInt size;
+
+ tst = VG_(get_ThreadState)(tid);
+
+ size = flags & VKI_SA_SIGINFO ? sizeof(struct rt_sigframe) :
+ sizeof(struct sigframe);
+
+ sp -= size;
+ sp = VG_ROUNDDN(sp, 16);
+
+ if(!extend(tst, sp, size))
+ I_die_here; // XXX Incorrect behavior
+
+
+ if (flags & VKI_SA_SIGINFO){
+ struct rt_sigframe *rsf = (struct rt_sigframe *)sp;
+
+ /* Track our writes to siginfo */
+ VG_TRACK( pre_mem_write, Vg_CoreSignal, tst->tid, /* VVVVV */
+ "signal handler siginfo", (Addr)rsf,
+ offsetof(struct rt_sigframe, sig));
+
+ VG_(memcpy)(&rsf->info, siginfo, sizeof(vki_siginfo_t));
+
+ if(sigNo == VKI_SIGILL && siginfo->si_code > 0) {
+ rsf->info._sifields._sigfault._addr = (Addr *) (tst)->arch.vex.guest_R12; /* IP */
+ }
+ VG_TRACK( post_mem_write, Vg_CoreSignal, tst->tid, /* ^^^^^ */
+ (Addr)rsf, offsetof(struct rt_sigframe, sig));
+
+ build_sigframe(tst, &rsf->sig, siginfo, siguc,
+ handler, flags, mask, restorer);
+ tst->arch.vex.guest_R1 = (Addr)&rsf->info;
+ tst->arch.vex.guest_R2 = (Addr)&rsf->sig.uc;
+ }
+ else{
+ build_sigframe(tst, (struct sigframe *)sp, siginfo, siguc,
+ handler, flags, mask, restorer);
+ }
+
+ VG_(set_SP)(tid, sp);
+ VG_TRACK( post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR,
+ sizeof(Addr));
+ tst->arch.vex.guest_R0 = sigNo;
+
+ if(flags & VKI_SA_RESTORER)
+ tst->arch.vex.guest_R14 = (Addr) restorer;
+
+ tst->arch.vex.guest_R15 = (Addr) handler; /* R15 == PC */
+}
+
+
+/*------------------------------------------------------------*/
+/*--- Destroying signal frames ---*/
+/*------------------------------------------------------------*/
+
+/* EXPORTED */
+void VG_(sigframe_destroy)( ThreadId tid, Bool isRT )
+{
+ ThreadState *tst;
+ struct vg_sig_private *priv;
+ Addr sp;
+ UInt frame_size;
+ struct vki_sigcontext *mc;
+ Int sigNo;
+ Bool has_siginfo = isRT;
+
+ vg_assert(VG_(is_valid_tid)(tid));
+ tst = VG_(get_ThreadState)(tid);
+ sp = tst->arch.vex.guest_R13;
+
+ if (has_siginfo) {
+ struct rt_sigframe *frame = (struct rt_sigframe *)sp;
+ frame_size = sizeof(*frame);
+ mc = &frame->sig.uc.uc_mcontext;
+ priv = &frame->sig.vp;
+ vg_assert(priv->magicPI == 0x31415927);
+ tst->sig_mask = frame->sig.uc.uc_sigmask;
+ } else {
+ struct sigframe *frame = (struct sigframe *)sp;
+ frame_size = sizeof(*frame);
+ mc = &frame->uc.uc_mcontext;
+ priv = &frame->vp;
+ vg_assert(priv->magicPI == 0x31415927);
+ tst->sig_mask = frame->uc.uc_sigmask;
+ /*tst->sig_mask.sig[0] = frame->uc.uc_mcontext.oldmask;
+ tst->sig_mask.sig[1] = frame->uc.uc_mcontext._unused[3];
+ VG_(printf)("Setting signmask to %08x%08x\n",tst->sig_mask[0],tst->sig_mask[1]);
+*/
+ }
+ tst->tmp_sig_mask = tst->sig_mask;
+
+ sigNo = priv->sigNo_private;
+
+ //XXX: restore regs
+# define REST(reg,REG) tst->arch.vex.guest_##REG = mc->arm_##reg;
+ REST(r0,R0);
+ REST(r1,R1);
+ REST(r2,R2);
+ REST(r3,R3);
+ REST(r4,R4);
+ REST(r5,R5);
+ REST(r6,R6);
+ REST(r7,R7);
+ REST(r8,R8);
+ REST(r9,R9);
+ REST(r10,R10);
+ REST(fp,R11);
+ REST(ip,R12);
+ REST(sp,R13);
+ REST(lr,R14);
+ REST(pc,R15);
+# undef REST
+
+ tst->arch.vex_shadow1 = priv->vex_shadow1;
+ tst->arch.vex_shadow2 = priv->vex_shadow2;
+
+ VG_TRACK( die_mem_stack_signal, sp - VG_STACK_REDZONE_SZB,
+ frame_size + VG_STACK_REDZONE_SZB );
+
+ if (VG_(clo_trace_signals))
+ VG_(message)(Vg_DebugMsg,
+ "vg_pop_signal_frame (thread %d): isRT=%d valid magic; PC=%#x",
+ tid, has_siginfo, tst->arch.vex.guest_R15);
+
+ /* tell the tools */
+ VG_TRACK( post_deliver_signal, tid, sigNo );
+}
+
+#endif // defined(VGP_arm_linux)
+
+/*--------------------------------------------------------------------*/
+/*--- end sigframe-arm-linux.c ---*/
+/*--------------------------------------------------------------------*/
#if defined(VGP_x86_linux)
# define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
# define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
-# define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.ebp)
# define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
/* Convert the value in uc_mcontext.eax into a SysRes. */ \
VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
-# define VG_UCONTEXT_LINK_REG(uc) 0 /* Dude, where's my LR? */
+# define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
+ { (srP)->r_pc = (ULong)((uc)->uc_mcontext.eip); \
+ (srP)->r_sp = (ULong)((uc)->uc_mcontext.esp); \
+ (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.ebp; \
+ }
#elif defined(VGP_amd64_linux)
# define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.rip)
# define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.rsp)
-# define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.rbp)
# define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
/* Convert the value in uc_mcontext.rax into a SysRes. */ \
VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
-# define VG_UCONTEXT_LINK_REG(uc) 0 /* No LR on amd64 either */
+# define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
+ { (srP)->r_pc = (uc)->uc_mcontext.rip; \
+ (srP)->r_sp = (uc)->uc_mcontext.rsp; \
+ (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.rbp; \
+ }
#elif defined(VGP_ppc32_linux)
/* Comments from Paul Mackerras 25 Nov 05:
}
# define VG_UCONTEXT_LINK_REG(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_LNK])
+#elif defined(VGP_arm_linux)
+# define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.arm_pc)
+# define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.arm_sp)
+# define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
+ /* Convert the value in uc_mcontext.rax into a SysRes. */ \
+ VG_(mk_SysRes_arm_linux)( (uc)->uc_mcontext.arm_r0 )
+# define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
+ { (srP)->r_pc = (uc)->uc_mcontext.arm_pc; \
+ (srP)->r_sp = (uc)->uc_mcontext.arm_sp; \
+ (srP)->misc.ARM.r14 = (uc)->uc_mcontext.arm_lr; \
+ (srP)->misc.ARM.r12 = (uc)->uc_mcontext.arm_ip; \
+ (srP)->misc.ARM.r11 = (uc)->uc_mcontext.arm_fp; \
+ }
+
#elif defined(VGP_ppc32_aix5)
/* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
" movl $" #name ", %eax\n" \
" int $0x80\n" \
".previous\n"
+
#elif defined(VGP_amd64_linux)
# define _MY_SIGRETURN(name) \
".text\n" \
" movq $" #name ", %rax\n" \
" syscall\n" \
".previous\n"
+
#elif defined(VGP_ppc32_linux)
# define _MY_SIGRETURN(name) \
".text\n" \
" li 0, " #name "\n" \
" sc\n" \
".previous\n"
+
#elif defined(VGP_ppc64_linux)
# define _MY_SIGRETURN(name) \
".align 2\n" \
".my_sigreturn:\n" \
" li 0, " #name "\n" \
" sc\n"
+
+#elif defined(VGP_arm_linux)
+# define _MY_SIGRETURN(name) \
+ ".text\n" \
+ "my_sigreturn:\n\t" \
+ " mov r7, #" #name "\n\t" \
+ " svc 0x00000000\n" \
+ ".previous\n"
+
#elif defined(VGP_ppc32_aix5)
# define _MY_SIGRETURN(name) \
".globl my_sigreturn\n" \
".globl my_sigreturn\n" \
"my_sigreturn:\n" \
".long 0\n"
+
#elif defined(VGP_x86_darwin)
# define _MY_SIGRETURN(name) \
".text\n" \
"my_sigreturn:\n" \
"movl $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%eax\n" \
"int $0x80"
+
#elif defined(VGP_amd64_darwin)
// DDD: todo
# define _MY_SIGRETURN(name) \
".text\n" \
"my_sigreturn:\n" \
"ud2\n"
+
#else
# error Unknown platform
#endif
// tid = VG_(master_tid);
vg_assert(tid != 0);
- VG_(core_panic_at)("Killed by fatal signal",
- VG_UCONTEXT_INSTR_PTR(uc),
- VG_UCONTEXT_STACK_PTR(uc),
- VG_UCONTEXT_FRAME_PTR(uc),
- VG_UCONTEXT_LINK_REG(uc));
+ UnwindStartRegs startRegs;
+ VG_(memset)(&startRegs, 0, sizeof(startRegs));
+
+ VG_UCONTEXT_TO_UnwindStartRegs(&startRegs, uc);
+ VG_(core_panic_at)("Killed by fatal signal", &startRegs);
}
}
#include "pub_core_clientstate.h" // VG_(client__dl_sysinfo_int80)
#include "pub_core_trampoline.h"
+
/*------------------------------------------------------------*/
-/*--- Exported functions. ---*/
+/*--- ---*/
+/*--- BEGIN platform-dependent unwinder worker functions ---*/
+/*--- ---*/
/*------------------------------------------------------------*/
/* Take a snapshot of the client's stack, putting up to 'max_n_ips'
first parameter, else send zero. This helps generate better stack
traces on ppc64-linux and has no effect on other platforms.
*/
+
+/* ------------------------ x86 ------------------------- */
+
+#if defined(VGP_x86_linux) || defined(VGP_x86_darwin)
+
UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
/*OUT*/Addr* ips, UInt max_n_ips,
/*OUT*/Addr* sps, /*OUT*/Addr* fps,
- Addr ip, Addr sp, Addr fp, Addr lr,
- Addr fp_min, Addr fp_max_orig )
+ UnwindStartRegs* startRegs,
+ Addr fp_max_orig )
{
-# if defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux) \
- || defined(VGP_ppc32_aix5) \
- || defined(VGP_ppc64_aix5)
- Bool lr_is_first_RA = False;
-# endif
-# if defined(VGP_ppc64_linux) || defined(VGP_ppc64_aix5) \
- || defined(VGP_ppc32_aix5)
- Word redir_stack_size = 0;
- Word redirs_used = 0;
-# endif
-
Bool debug = False;
Int i;
Addr fp_max;
vg_assert(sizeof(Addr) == sizeof(UWord));
vg_assert(sizeof(Addr) == sizeof(void*));
+ Addr ip = (Addr)startRegs->r_pc;
+ Addr sp = (Addr)startRegs->r_sp;
+ Addr fp = startRegs->misc.X86.r_ebp;
+ Addr fp_min = sp;
+
/* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
stopping when the trail goes cold, which we guess to be
when FP is not a reasonable stack location. */
/* vg_assert(fp_min <= fp_max);*/
// On Darwin, this kicks in for pthread-related stack traces, so they're
// only 1 entry long which is wrong.
-#if !defined(VGO_darwin)
+# if !defined(VGO_darwin)
if (fp_min + 512 >= fp_max) {
/* If the stack limits look bogus, don't poke around ... but
don't bomb out either. */
ips[0] = ip;
return 1;
}
-#endif
-
- /* Otherwise unwind the stack in a platform-specific way. Trying
- to merge the x86, amd64, ppc32 and ppc64 logic into a single
- piece of code is just too confusing and difficult to
- performance-tune. */
-
-# if defined(VGP_x86_linux) || defined(VGP_x86_darwin)
-
- /*--------------------- x86 ---------------------*/
+# endif
/* fp is %ebp. sp is %esp. ip is %eip. */
break;
}
-# elif defined(VGP_amd64_linux) || defined(VGP_amd64_darwin)
+ n_found = i;
+ return n_found;
+}
+
+#endif
+
+/* ----------------------- amd64 ------------------------ */
+
+#if defined(VGP_amd64_linux) || defined(VGP_amd64_darwin)
+
+UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
+ /*OUT*/Addr* ips, UInt max_n_ips,
+ /*OUT*/Addr* sps, /*OUT*/Addr* fps,
+ UnwindStartRegs* startRegs,
+ Addr fp_max_orig )
+{
+ Bool debug = False;
+ Int i;
+ Addr fp_max;
+ UInt n_found = 0;
+
+ vg_assert(sizeof(Addr) == sizeof(UWord));
+ vg_assert(sizeof(Addr) == sizeof(void*));
+
+ Addr ip = startRegs->r_pc;
+ Addr sp = startRegs->r_sp;
+ Addr fp = startRegs->misc.AMD64.r_rbp;
+ Addr fp_min = sp;
+
+ /* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
+ stopping when the trail goes cold, which we guess to be
+ when FP is not a reasonable stack location. */
+
+ // JRS 2002-sep-17: hack, to round up fp_max to the end of the
+ // current page, at least. Dunno if it helps.
+ // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
+ fp_max = VG_PGROUNDUP(fp_max_orig);
+ if (fp_max >= sizeof(Addr))
+ fp_max -= sizeof(Addr);
+
+ if (debug)
+ VG_(printf)("max_n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
+ "fp_max=0x%lx ip=0x%lx fp=0x%lx\n",
+ max_n_ips, fp_min, fp_max_orig, fp_max, ip, fp);
- /*--------------------- amd64 ---------------------*/
+ /* Assertion broken before main() is reached in pthreaded programs; the
+ * offending stack traces only have one item. --njn, 2002-aug-16 */
+ /* vg_assert(fp_min <= fp_max);*/
+ // On Darwin, this kicks in for pthread-related stack traces, so they're
+ // only 1 entry long which is wrong.
+# if !defined(VGO_darwin)
+ if (fp_min + 512 >= fp_max) {
+ /* If the stack limits look bogus, don't poke around ... but
+ don't bomb out either. */
+ if (sps) sps[0] = sp;
+ if (fps) fps[0] = fp;
+ ips[0] = ip;
+ return 1;
+ }
+# endif
/* fp is %rbp. sp is %rsp. ip is %rip. */
break;
}
-# elif defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux) \
- || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+ n_found = i;
+ return n_found;
+}
+
+#endif
+
+/* -----------------------ppc32/64 ---------------------- */
+
+#if defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux) \
+ || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+
+UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
+ /*OUT*/Addr* ips, UInt max_n_ips,
+ /*OUT*/Addr* sps, /*OUT*/Addr* fps,
+ Addr ip, Addr sp, Addr fp, Addr lr,
+ Addr fp_min, Addr fp_max_orig )
+{
+ Bool lr_is_first_RA = False;
+# if defined(VG_PLAT_USES_PPCTOC)
+ Word redir_stack_size = 0;
+ Word redirs_used = 0;
+# endif
+
+ Bool debug = False;
+ Int i;
+ Addr fp_max;
+ UInt n_found = 0;
+
+ vg_assert(sizeof(Addr) == sizeof(UWord));
+ vg_assert(sizeof(Addr) == sizeof(void*));
+
+ /* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
+ stopping when the trail goes cold, which we guess to be
+ when FP is not a reasonable stack location. */
+
+ // JRS 2002-sep-17: hack, to round up fp_max to the end of the
+ // current page, at least. Dunno if it helps.
+ // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
+ fp_max = VG_PGROUNDUP(fp_max_orig);
+ if (fp_max >= sizeof(Addr))
+ fp_max -= sizeof(Addr);
- /*--------------------- ppc32/64 ---------------------*/
+ if (debug)
+ VG_(printf)("max_n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
+ "fp_max=0x%lx ip=0x%lx fp=0x%lx\n",
+ max_n_ips, fp_min, fp_max_orig, fp_max, ip, fp);
+
+ /* Assertion broken before main() is reached in pthreaded programs; the
+ * offending stack traces only have one item. --njn, 2002-aug-16 */
+ /* vg_assert(fp_min <= fp_max);*/
+ if (fp_min + 512 >= fp_max) {
+ /* If the stack limits look bogus, don't poke around ... but
+ don't bomb out either. */
+ if (sps) sps[0] = sp;
+ if (fps) fps[0] = fp;
+ ips[0] = ip;
+ return 1;
+ }
/* fp is %r1. ip is %cia. Note, ppc uses r1 as both the stack and
frame pointers. */
/* On ppc64-linux (ppc64-elf, really), and on AIX, the lr save
slot is 2 words back from sp, whereas on ppc32-elf(?) it's
only one word back. */
-# if defined(VGP_ppc64_linux) \
- || defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
+# if defined(VG_PLAT_USES_PPCTOC)
const Int lr_offset = 2;
# else
const Int lr_offset = 1;
}
}
-# else
-# error "Unknown platform"
-# endif
+ n_found = i;
+ return n_found;
+}
+
+#endif
+
+/* ------------------------ arm ------------------------- */
+
+#if defined(VGP_arm_linux)
+
+UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
+ /*OUT*/Addr* ips, UInt max_n_ips,
+ /*OUT*/Addr* sps, /*OUT*/Addr* fps,
+ UnwindStartRegs* startRegs,
+ Addr fp_max_orig )
+{
+ Bool debug = False;
+ Int i;
+ Addr fp_max;
+ UInt n_found = 0;
+
+ vg_assert(sizeof(Addr) == sizeof(UWord));
+ vg_assert(sizeof(Addr) == sizeof(void*));
+
+ Addr r15 = startRegs->r_pc;
+ Addr r13 = startRegs->r_sp;
+ Addr r14 = startRegs->misc.ARM.r14;
+ Addr r12 = startRegs->misc.ARM.r12;
+ Addr r11 = startRegs->misc.ARM.r11;
+ Addr fp_min = r13;
+
+ /* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
+ stopping when the trail goes cold, which we guess to be
+ when FP is not a reasonable stack location. */
+
+ // JRS 2002-sep-17: hack, to round up fp_max to the end of the
+ // current page, at least. Dunno if it helps.
+ // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
+ fp_max = VG_PGROUNDUP(fp_max_orig);
+ if (fp_max >= sizeof(Addr))
+ fp_max -= sizeof(Addr);
+
+ if (debug)
+ VG_(printf)("max_n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
+ "fp_max=0x%lx r15=0x%lx r13=0x%lx\n",
+ max_n_ips, fp_min, fp_max_orig, fp_max, r15, r13);
+
+ /* Assertion broken before main() is reached in pthreaded programs; the
+ * offending stack traces only have one item. --njn, 2002-aug-16 */
+ /* vg_assert(fp_min <= fp_max);*/
+ // On Darwin, this kicks in for pthread-related stack traces, so they're
+ // only 1 entry long which is wrong.
+ if (fp_min + 512 >= fp_max) {
+ /* If the stack limits look bogus, don't poke around ... but
+ don't bomb out either. */
+ if (sps) sps[0] = r13;
+ if (fps) fps[0] = 0;
+ ips[0] = r15;
+ return 1;
+ }
+
+ /* */
+
+ if (sps) sps[0] = r13;
+ if (fps) fps[0] = 0;
+ ips[0] = r15;
+ i = 1;
+
+ /* Loop unwinding the stack. */
+
+ while (True) {
+ if (debug) {
+ VG_(printf)("i: %d, r15: 0x%lx, r13: 0x%lx\n",i, r15, r13);
+ }
+
+ if (i >= max_n_ips)
+ break;
+
+ if (VG_(use_CF_info)( &r15, &r14, &r13, &r12, &r11, fp_min, fp_max )) {
+ if (sps) sps[i] = r13;
+ if (fps) fps[i] = 0;
+ ips[i++] = r15 -1;
+ if (debug)
+ VG_(printf)("USING CFI: r15: 0x%lx, r13: 0x%lx\n", r15, r13);
+ r15 = r15 - 1;
+ continue;
+ }
+ /* No luck. We have to give up. */
+ break;
+ }
n_found = i;
return n_found;
}
+#endif
+
+/*------------------------------------------------------------*/
+/*--- ---*/
+/*--- END platform-dependent unwinder worker functions ---*/
+/*--- ---*/
+/*------------------------------------------------------------*/
+
+/*------------------------------------------------------------*/
+/*--- Exported functions. ---*/
+/*------------------------------------------------------------*/
+
UInt VG_(get_StackTrace) ( ThreadId tid,
/*OUT*/StackTrace ips, UInt max_n_ips,
/*OUT*/StackTrace sps,
/*OUT*/StackTrace fps,
Word first_ip_delta )
{
- /* thread in thread table */
- Addr ip = VG_(get_IP)(tid);
- Addr fp = VG_(get_FP)(tid);
- Addr sp = VG_(get_SP)(tid);
- Addr lr = VG_(get_LR)(tid);
+ /* Get the register values with which to start the unwind. */
+ UnwindStartRegs startRegs;
+ VG_(memset)( &startRegs, 0, sizeof(startRegs) );
+ VG_(get_UnwindStartRegs)( &startRegs, tid );
+
Addr stack_highest_word = VG_(threads)[tid].client_stack_highest_word;
Addr stack_lowest_word = 0;
bothered.
*/
if (VG_(client__dl_sysinfo_int80) != 0 /* we know its address */
- && ip >= VG_(client__dl_sysinfo_int80)
- && ip < VG_(client__dl_sysinfo_int80)+3
- && VG_(am_is_valid_for_client)(sp, sizeof(Addr), VKI_PROT_READ)) {
- ip = *(Addr *)sp;
- sp += sizeof(Addr);
+ && startRegs.r_pc >= VG_(client__dl_sysinfo_int80)
+ && startRegs.r_pc < VG_(client__dl_sysinfo_int80)+3
+ && VG_(am_is_valid_for_client)(startRegs.r_pc, sizeof(Addr),
+ VKI_PROT_READ)) {
+ startRegs.r_pc = (ULong) *(Addr*)(UWord)startRegs.r_sp;
+ startRegs.r_sp += (ULong) sizeof(Addr);
}
# endif
/* See if we can get a better idea of the stack limits */
- VG_(stack_limits)(sp, &stack_lowest_word, &stack_highest_word);
+ VG_(stack_limits)( (Addr)startRegs.r_sp,
+ &stack_lowest_word, &stack_highest_word );
/* Take into account the first_ip_delta. */
- vg_assert( sizeof(Addr) == sizeof(Word) );
- ip += first_ip_delta;
+ startRegs.r_pc += (Long)(Word)first_ip_delta;
if (0)
- VG_(printf)("tid %d: stack_highest=0x%08lx ip=0x%08lx "
- "sp=0x%08lx fp=0x%08lx\n",
- tid, stack_highest_word, ip, sp, fp);
+ VG_(printf)("tid %d: stack_highest=0x%08lx ip=0x%010llx "
+ "sp=0x%010llx\n",
+ tid, stack_highest_word,
+ startRegs.r_pc, startRegs.r_sp);
return VG_(get_StackTrace_wrk)(tid, ips, max_n_ips,
sps, fps,
- ip, sp, fp, lr, sp,
+ &startRegs,
stack_highest_word);
}
return res;
}
+SysRes VG_(mk_SysRes_arm_linux) ( Int val ) {
+ SysRes res;
+ res._isError = val >= -4095 && val <= -1;
+ if (res._isError) {
+ res._val = (UInt)(-val);
+ } else {
+ res._val = (UInt)val;
+ }
+ return res;
+}
+
/* Generic constructors. */
SysRes VG_(mk_SysRes_Error) ( UWord err ) {
SysRes r;
" blr\n"
);
+#elif defined(VGP_arm_linux)
+/* I think the conventions are:
+ args in r0 r1 r2 r3 r4 r5
+ sysno in r7
+ return value in r0, w/ same conventions as x86-linux, viz r0 in
+ -4096 .. -1 is an error value. All other values are success
+ values.
+*/
+extern UWord do_syscall_WRK (
+ UWord a1, UWord a2, UWord a3,
+ UWord a4, UWord a5, UWord a6,
+ UWord syscall_no
+ );
+asm(
+".text\n"
+"do_syscall_WRK:\n"
+" push {r4, r5, r7}\n"
+" ldr r4, [sp, #12]\n"
+" ldr r5, [sp, #16]\n"
+" ldr r7, [sp, #20]\n"
+" svc 0x0\n"
+" pop {r4, r5, r7}\n"
+" bx lr\n"
+".previous\n"
+);
+
#elif defined(VGP_ppc32_aix5)
static void do_syscall_WRK ( UWord* res_r3, UWord* res_r4,
UWord sysno,
do_syscall_WRK( &argblock[0] );
return VG_(mk_SysRes_ppc64_linux)( argblock[0], argblock[1] );
+# elif defined(VGP_arm_linux)
+ UWord val = do_syscall_WRK(a1,a2,a3,a4,a5,a6,sysno);
+ return VG_(mk_SysRes_arm_linux)( val );
+
# elif defined(VGP_ppc32_aix5)
UWord res;
UWord err;
// Note that, depending on the platform, arguments may be found in
// registers or on the stack. (See the comment at the top of
// syswrap-main.c for per-platform details.) For register arguments
- // (which have o_arg field names) the o_arg value is the offset from
+ // (which have o_arg field names) the o_arg value is the offset into
// the vex register state. For stack arguments (which have s_arg
// field names), the s_arg value is the offset from the stack pointer.
Int o_sysno;
# if defined(VGP_x86_linux) || defined(VGP_amd64_linux) \
- || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+ || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux) \
+ || defined(VGP_arm_linux)
Int o_arg1;
Int o_arg2;
Int o_arg3;
*/
-#if defined(VGO_linux) || defined(VGO_darwin)
-/* On Linux, finding the wrapper is easy: just look up in fixed,
- platform-specific tables. These are defined in the relevant
- platform-specific files -- syswrap-arch-os.c */
-
-extern const SyscallTableEntry ML_(syscall_table)[];
-
-extern const UInt ML_(syscall_table_size);
+/* A function to find the syscall table entry for a given sysno. If
+ none is found, return NULL. This used to be done with a single
+ fixed sized table exposed to the caller, but that's too inflexible;
+ hence now use a function which can do arbitrary messing around to
+ find the required entry. */
+#if defined(VGO_linux)
+extern
+SyscallTableEntry* ML_(get_linux_syscall_entry)( UInt sysno );
#elif defined(VGP_ppc32_aix5)
-/* On AIX5 this is more complex than the simple fixed table lookup on
- Linux, since the syscalls don't have fixed numbers. So it's
- simplest to use a function, which does all the required messing
- around. */
+/* Same scheme on AIX5. This is more complex than the simple fixed
+ table lookup typical for Linux, since the syscalls don't have fixed
+ numbers. */
extern
SyscallTableEntry* ML_(get_ppc32_aix5_syscall_entry) ( UInt sysno );
extern
SyscallTableEntry* ML_(get_ppc64_aix5_syscall_entry) ( UInt sysno );
+#elif defined(VGO_darwin)
+/* XXX: Darwin still uses the old scheme of exposing the table
+ array(s) and size(s) directly to syswrap-main.c. This should be
+ fixed. */
+
+extern const SyscallTableEntry ML_(syscall_table)[];
+extern const UInt ML_(syscall_table_size);
+
#else
# error Unknown OS
#endif
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- Support for doing system calls. syscall-arm-linux.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2008-2009 Evan Geller (gaze@bea.ms)
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_arm_linux)
+
+#include "pub_core_basics_asm.h"
+#include "pub_core_vkiscnums_asm.h"
+#include "libvex_guest_offsets.h"
+
+
+/*----------------------------------------------------------------*/
+/*
+ Perform a syscall for the client. This will run a syscall
+ with the client's specific per-thread signal mask.
+
+ The structure of this function is such that, if the syscall is
+ interrupted by a signal, we can determine exactly what
+ execution state we were in with respect to the execution of
+ the syscall by examining the value of IP in the signal
+ handler. This means that we can always do the appropriate
+ thing to precisely emulate the kernel's signal/syscall
+ interactions.
+
+ The syscall number is taken from the argument, even though it
+ should also be in regs->m_R7. The syscall result is written
+ back to regs->m_R0 on completion.
+
+ Returns 0 if the syscall was successfully called (even if the
+ syscall itself failed), or a nonzero error code in the lowest
+ 8 bits if one of the sigprocmasks failed (there's no way to
+ determine which one failed). And there's no obvious way to
+ recover from that either, but nevertheless we want to know.
+
+ VG_(fixup_guest_state_after_syscall_interrupted) does the
+ thread state fixup in the case where we were interrupted by a
+ signal.
+
+ Prototype:
+
+ UWord ML_(do_syscall_for_client_WRK)(
+ Int syscallno, // r0
+ void* guest_state, // r1
+ const vki_sigset_t *sysmask, // r2
+ const vki_sigset_t *postmask, // r3
+ Int nsigwords) // [sp, #0]
+*/
+/* from vki_arch.h */
+#define VKI_SIG_SETMASK 2
+
+.globl ML_(do_syscall_for_client_WRK)
+ML_(do_syscall_for_client_WRK):
+
+ /* Stash callee-saves and our args on the stack */
+ push {r0, r1, r3, r4, r5, r7, fp, lr}
+
+1:
+
+ mov r7, #__NR_rt_sigprocmask
+ mov r0, #VKI_SIG_SETMASK
+ mov r1, r2 /* sysmask */
+ mov r2, r3 /* postmask */
+ ldr r3, [sp, #32] /* nsigwords */
+ svc 0x00000000
+
+
+ ldr r5, [sp, #4] /* guest_state */
+
+ ldr r7, [sp, #0] /* syscall# */
+ ldr r0, [r5, #OFFSET_arm_R0]
+ ldr r1, [r5, #OFFSET_arm_R1]
+ ldr r2, [r5, #OFFSET_arm_R2]
+ ldr r3, [r5, #OFFSET_arm_R3]
+ ldr r4, [r5, #OFFSET_arm_R4]
+ ldr r5, [r5, #OFFSET_arm_R5]
+
+2: svc 0x00000000
+3:
+ ldr r5, [sp, #4] /* guest_state */
+ str r0, [r5, #OFFSET_arm_R0]
+
+4:
+ mov r7, #__NR_rt_sigprocmask
+ mov r0, #VKI_SIG_SETMASK
+ ldr r1, [sp, #8] /* postmask */
+ mov r2, #0
+ ldr r3, [sp, #32] /* nsigwords */
+ svc 0x00000000
+
+ cmp r0, #0
+ blt 7f
+ add sp, sp, #4 /* r0 contains return value */
+
+5: /* Success */
+ mov r0, #0
+ pop {r1, r3, r4, r5, r7, fp, pc}
+
+7: /* Failure: return 0x8000 | error code */
+ orr r0, r0, #0x8000
+ pop {r1, r3, r4, r5, r7, fp, pc}
+
+
+.section .rodata
+/* export the ranges so that
+ VG_(fixup_guest_state_after_syscall_interrupted) can do the
+ right thing */
+
+.globl ML_(blksys_setup)
+.globl ML_(blksys_restart)
+.globl ML_(blksys_complete)
+.globl ML_(blksys_committed)
+.globl ML_(blksys_finished)
+ML_(blksys_setup): .long 1b
+ML_(blksys_restart): .long 2b
+ML_(blksys_complete): .long 3b
+ML_(blksys_committed): .long 4b
+ML_(blksys_finished): .long 5b
+
+/* Let the linker know we don't need an executable stack */
+.section .note.GNU-stack,"",%progbits
+
+.previous
+
+#endif // defined(VGP_arm_linux)
+
+/*--------------------------------------------------------------------*/
+/*--- end ---*/
+/*--------------------------------------------------------------------*/
// When implementing these wrappers, you need to work out if the wrapper is
// generic, Linux-only (but arch-independent), or AMD64/Linux only.
-const SyscallTableEntry ML_(syscall_table)[] = {
+static SyscallTableEntry syscall_table[] = {
GENXY(__NR_read, sys_read), // 0
GENX_(__NR_write, sys_write), // 1
GENXY(__NR_open, sys_open), // 2
LINXY(__NR_perf_counter_open, sys_perf_counter_open) // 298
};
-const UInt ML_(syscall_table_size) =
- sizeof(ML_(syscall_table)) / sizeof(ML_(syscall_table)[0]);
+SyscallTableEntry* ML_(get_linux_syscall_entry) ( UInt sysno )
+{
+ const UInt syscall_table_size
+ = sizeof(syscall_table) / sizeof(syscall_table[0]);
+
+ /* Is it in the contiguous initial section of the table? */
+ if (sysno < syscall_table_size) {
+ SyscallTableEntry* sys = &syscall_table[sysno];
+ if (sys->before == NULL)
+ return NULL; /* no entry */
+ else
+ return sys;
+ }
+
+ /* Can't find a wrapper */
+ return NULL;
+}
#endif // defined(VGP_amd64_linux)
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- Platform-specific syscalls stuff. syswrap-arm-linux.c -----*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Nicholas Nethercote
+ njn@valgrind.org
+ Copyright (C) 2008-2009 Evan Geller
+ gaze@bea.ms
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_arm_linux)
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_options.h"
+#include "pub_core_scheduler.h"
+#include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
+#include "pub_core_signals.h"
+#include "pub_core_syscall.h"
+#include "pub_core_syswrap.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_stacks.h" // VG_(register_stack)
+
+#include "priv_types_n_macros.h"
+#include "priv_syswrap-generic.h" /* for decls of generic wrappers */
+#include "priv_syswrap-linux.h" /* for decls of linux-ish wrappers */
+#include "priv_syswrap-main.h"
+
+
+/* ---------------------------------------------------------------------
+ clone() handling
+ ------------------------------------------------------------------ */
+
+/* Call f(arg1), but first switch stacks, using 'stack' as the new
+ stack, and use 'retaddr' as f's return-to address. Also, clear all
+ the integer registers before entering f.*/
+__attribute__((noreturn))
+void ML_(call_on_new_stack_0_1) ( Addr stack,
+ Addr retaddr,
+ void (*f)(Word),
+ Word arg1 );
+// r0 = stack
+// r1 = retaddr
+// r2 = f
+// r3 = arg1
+asm(
+".text\n"
+".globl vgModuleLocal_call_on_new_stack_0_1\n"
+"vgModuleLocal_call_on_new_stack_0_1:\n"
+" mov sp,r0\n\t" /* Stack pointer */
+" mov lr,r1\n\t" /* Return address */
+" mov r0,r3\n\t" /* First argument */
+" push {r2}\n\t" /* So we can ret to the new dest */
+" mov r1, #0\n\t" /* Clear our GPRs */
+" mov r2, #0\n\t"
+" mov r3, #0\n\t"
+" mov r4, #0\n\t"
+" mov r5, #0\n\t"
+" mov r6, #0\n\t"
+" mov r7, #0\n\t"
+" mov r8, #0\n\t"
+" mov r9, #0\n\t"
+" mov r10, #0\n\t"
+" mov r11, #0\n\t"
+" mov r12, #0\n\t"
+" pop {pc}\n\t" /* Herrre we go! */
+".previous\n"
+);
+
+
+#define __NR_CLONE VG_STRINGIFY(__NR_clone)
+#define __NR_EXIT VG_STRINGIFY(__NR_exit)
+
+extern
+ULong do_syscall_clone_arm_linux ( Word (*fn)(void *),
+ void* stack,
+ Int flags,
+ void* arg,
+ Int* child_tid,
+ Int* parent_tid,
+ void* tls );
+asm(
+".text\n"
+"do_syscall_clone_arm_linux:\n"
+
+/*Setup child stack */
+" str r0, [r1, #-4]!\n"
+" str r3, [r1, #-4]!\n"
+" push {r4,r7}\n"
+" mov r0, r2\n" /* arg1: flags */
+/* r1 (arg2) is already our child's stack */
+" ldr r2, [sp, #12]\n" // parent tid
+" ldr r3, [sp, #16]\n" // tls
+" ldr r4, [sp, #8]\n" // Child tid
+" mov r7, #"__NR_CLONE"\n"
+" svc 0x00000000\n"
+" cmp r0, #0\n"
+" beq 1f\n"
+
+/* Parent */
+" pop {r4,r7}\n"
+" bx lr\n"
+
+"1:\n" /*child*/
+" mov lr, pc\n"
+" pop {r0,pc}\n"
+/* Retval from child is already in r0 */
+" mov r7, #"__NR_EXIT"\n"
+" svc 0x00000000\n"
+/* Urh.. why did exit return? */
+" .long 0\n"
+" .previous\n"
+);
+
+#undef __NR_CLONE
+#undef __NR_EXIT
+
+// forward declarations
+static void setup_child ( ThreadArchState*, ThreadArchState* );
+static SysRes sys_set_tls ( ThreadId tid, Addr tlsptr );
+
+/*
+ When a client clones, we need to keep track of the new thread. This means:
+ 1. allocate a ThreadId+ThreadState+stack for the the thread
+
+ 2. initialize the thread's new VCPU state
+
+ 3. create the thread using the same args as the client requested,
+ but using the scheduler entrypoint for IP, and a separate stack
+ for SP.
+ */
+static SysRes do_clone ( ThreadId ptid,
+ UInt flags, Addr sp,
+ Int *parent_tidptr,
+ Int *child_tidptr,
+ Addr child_tls)
+{
+ const Bool debug = False;
+
+ ThreadId ctid = VG_(alloc_ThreadState)();
+ ThreadState* ptst = VG_(get_ThreadState)(ptid);
+ ThreadState* ctst = VG_(get_ThreadState)(ctid);
+ UInt r0;
+ UWord *stack;
+ NSegment const* seg;
+ SysRes res;
+ vki_sigset_t blockall, savedmask;
+
+ VG_(sigfillset)(&blockall);
+
+ vg_assert(VG_(is_running_thread)(ptid));
+ vg_assert(VG_(is_valid_tid)(ctid));
+
+ stack = (UWord*)ML_(allocstack)(ctid);
+
+ if(stack == NULL) {
+ res = VG_(mk_SysRes_Error)( VKI_ENOMEM );
+ goto out;
+ }
+
+ setup_child( &ctst->arch, &ptst->arch );
+
+ ctst->arch.vex.guest_R0 = 0;
+ if(sp != 0)
+ ctst->arch.vex.guest_R13 = sp;
+
+ ctst->os_state.parent = ptid;
+
+ ctst->sig_mask = ptst->sig_mask;
+ ctst->tmp_sig_mask = ptst->sig_mask;
+
+ seg = VG_(am_find_nsegment)((Addr)sp);
+ if (seg && seg->kind != SkResvn) {
+ ctst->client_stack_highest_word = (Addr)VG_PGROUNDUP(sp);
+ ctst->client_stack_szB = ctst->client_stack_highest_word - seg->start;
+
+ VG_(register_stack)(seg->start, ctst->client_stack_highest_word);
+
+ if (debug)
+ VG_(printf)("tid %d: guessed client stack range %#lx-%#lx\n",
+ ctid, seg->start, VG_PGROUNDUP(sp));
+ } else {
+ VG_(message)(Vg_UserMsg, "!? New thread %d starts with sp+%#lx) unmapped\n", ctid, sp);
+ ctst->client_stack_szB = 0;
+ }
+
+ VG_TRACK ( pre_thread_ll_create, ptid, ctid );
+
+ if (flags & VKI_CLONE_SETTLS) {
+ res = sys_set_tls(ctid, child_tls);
+ if (sr_isError(res))
+ goto out;
+ }
+
+ flags &= ~VKI_CLONE_SETTLS;
+
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, &savedmask);
+
+ r0 = do_syscall_clone_arm_linux(
+ ML_(start_thread_NORETURN), stack, flags, &VG_(threads)[ctid],
+ child_tidptr, parent_tidptr, NULL
+ );
+ //VG_(printf)("AFTER SYSCALL, %x and %x CHILD: %d PARENT: %d\n",child_tidptr, parent_tidptr,*child_tidptr,*parent_tidptr);
+
+ res = VG_(mk_SysRes_arm_linux)( r0 );
+
+ VG_(sigprocmask)(VKI_SIG_SETMASK, &savedmask, NULL);
+
+out:
+ if (sr_isError(res)) {
+ VG_(cleanup_thread)(&ctst->arch);
+ ctst->status = VgTs_Empty;
+ VG_TRACK( pre_thread_ll_exit, ctid );
+ }
+
+ return res;
+}
+
+
+
+/* ---------------------------------------------------------------------
+ More thread stuff
+ ------------------------------------------------------------------ */
+
+// ARM doesn't have any architecture specific thread stuff that
+// needs to be cleaned up
+void VG_(cleanup_thread) ( ThreadArchState* arch )
+{
+}
+
+void setup_child ( /*OUT*/ ThreadArchState *child,
+ /*IN*/ ThreadArchState *parent )
+{
+ child->vex = parent->vex;
+ child->vex_shadow1 = parent->vex_shadow1;
+ child->vex_shadow2 = parent->vex_shadow2;
+}
+
+static SysRes sys_set_tls ( ThreadId tid, Addr tlsptr )
+{
+ VG_(threads)[tid].arch.vex.guest_TPIDRURO = tlsptr;
+ return VG_(mk_SysRes_Success)( 0 );
+}
+
+/* ---------------------------------------------------------------------
+ PRE/POST wrappers for arm/Linux-specific syscalls
+ ------------------------------------------------------------------ */
+
+#define PRE(name) DEFN_PRE_TEMPLATE(arm_linux, name)
+#define POST(name) DEFN_POST_TEMPLATE(arm_linux, name)
+
+/* Add prototypes for the wrappers declared here, so that gcc doesn't
+ harass us for not having prototypes. Really this is a kludge --
+ the right thing to do is to make these wrappers 'static' since they
+ aren't visible outside this file, but that requires even more macro
+ magic. */
+
+DECL_TEMPLATE(arm_linux, sys_socketcall);
+DECL_TEMPLATE(arm_linux, sys_socket);
+DECL_TEMPLATE(arm_linux, sys_setsockopt);
+DECL_TEMPLATE(arm_linux, sys_getsockopt);
+DECL_TEMPLATE(arm_linux, sys_connect);
+DECL_TEMPLATE(arm_linux, sys_accept);
+DECL_TEMPLATE(arm_linux, sys_sendto);
+DECL_TEMPLATE(arm_linux, sys_recvfrom);
+//XXX: Semaphore code ripped from AMD64.
+DECL_TEMPLATE(arm_linux, sys_semget);
+DECL_TEMPLATE(arm_linux, sys_semop);
+DECL_TEMPLATE(arm_linux, sys_semctl);
+DECL_TEMPLATE(arm_linux, sys_semtimedop);
+//XXX: Shared memory code ripped from AMD64
+//
+DECL_TEMPLATE(arm_linux, wrap_sys_shmat);
+DECL_TEMPLATE(arm_linux, sys_shmget);
+DECL_TEMPLATE(arm_linux, sys_shmdt);
+DECL_TEMPLATE(arm_linux, sys_shmctl);
+DECL_TEMPLATE(arm_linux, sys_sendmsg);
+DECL_TEMPLATE(arm_linux, sys_recvmsg);
+//msg* code from AMD64
+DECL_TEMPLATE(arm_linux, sys_msgget);
+DECL_TEMPLATE(arm_linux, sys_msgrcv);
+DECL_TEMPLATE(arm_linux, sys_msgsnd);
+DECL_TEMPLATE(arm_linux, sys_msgctl);
+DECL_TEMPLATE(arm_linux, sys_shutdown);
+DECL_TEMPLATE(arm_linux, sys_bind);
+DECL_TEMPLATE(arm_linux, sys_listen);
+DECL_TEMPLATE(arm_linux, sys_getsockname);
+DECL_TEMPLATE(arm_linux, sys_getpeername);
+DECL_TEMPLATE(arm_linux, sys_socketpair);
+DECL_TEMPLATE(arm_linux, sys_send);
+DECL_TEMPLATE(arm_linux, sys_recv);
+DECL_TEMPLATE(arm_linux, sys_mmap);
+DECL_TEMPLATE(arm_linux, sys_mmap2);
+DECL_TEMPLATE(arm_linux, sys_stat64);
+DECL_TEMPLATE(arm_linux, sys_lstat64);
+DECL_TEMPLATE(arm_linux, sys_fstatat64);
+DECL_TEMPLATE(arm_linux, sys_fstat64);
+DECL_TEMPLATE(arm_linux, sys_ipc);
+DECL_TEMPLATE(arm_linux, sys_clone);
+DECL_TEMPLATE(arm_linux, sys_sigreturn);
+DECL_TEMPLATE(arm_linux, sys_rt_sigreturn);
+DECL_TEMPLATE(arm_linux, sys_sigaction);
+DECL_TEMPLATE(arm_linux, sys_sigsuspend);
+DECL_TEMPLATE(arm_linux, sys_set_tls);
+DECL_TEMPLATE(arm_linux, sys_cacheflush);
+
+PRE(sys_socketcall)
+{
+# define ARG2_0 (((UWord*)ARG2)[0])
+# define ARG2_1 (((UWord*)ARG2)[1])
+# define ARG2_2 (((UWord*)ARG2)[2])
+# define ARG2_3 (((UWord*)ARG2)[3])
+# define ARG2_4 (((UWord*)ARG2)[4])
+# define ARG2_5 (((UWord*)ARG2)[5])
+
+ *flags |= SfMayBlock;
+ PRINT("sys_socketcall ( %ld, %#lx )",ARG1,ARG2);
+ PRE_REG_READ2(long, "socketcall", int, call, unsigned long *, args);
+
+ switch (ARG1 /* request */) {
+
+ case VKI_SYS_SOCKETPAIR:
+ /* int socketpair(int d, int type, int protocol, int sv[2]); */
+ PRE_MEM_READ( "socketcall.socketpair(args)", ARG2, 4*sizeof(Addr) );
+ ML_(generic_PRE_sys_socketpair)( tid, ARG2_0, ARG2_1, ARG2_2, ARG2_3 );
+ break;
+
+ case VKI_SYS_SOCKET:
+ /* int socket(int domain, int type, int protocol); */
+ PRE_MEM_READ( "socketcall.socket(args)", ARG2, 3*sizeof(Addr) );
+ break;
+
+ case VKI_SYS_BIND:
+ /* int bind(int sockfd, struct sockaddr *my_addr,
+ int addrlen); */
+ PRE_MEM_READ( "socketcall.bind(args)", ARG2, 3*sizeof(Addr) );
+ ML_(generic_PRE_sys_bind)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_LISTEN:
+ /* int listen(int s, int backlog); */
+ PRE_MEM_READ( "socketcall.listen(args)", ARG2, 2*sizeof(Addr) );
+ break;
+
+ case VKI_SYS_ACCEPT: {
+ /* int accept(int s, struct sockaddr *addr, int *addrlen); */
+ PRE_MEM_READ( "socketcall.accept(args)", ARG2, 3*sizeof(Addr) );
+ ML_(generic_PRE_sys_accept)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+ }
+
+ case VKI_SYS_SENDTO:
+ /* int sendto(int s, const void *msg, int len,
+ unsigned int flags,
+ const struct sockaddr *to, int tolen); */
+ PRE_MEM_READ( "socketcall.sendto(args)", ARG2, 6*sizeof(Addr) );
+ ML_(generic_PRE_sys_sendto)( tid, ARG2_0, ARG2_1, ARG2_2,
+ ARG2_3, ARG2_4, ARG2_5 );
+ break;
+
+ case VKI_SYS_SEND:
+ /* int send(int s, const void *msg, size_t len, int flags); */
+ PRE_MEM_READ( "socketcall.send(args)", ARG2, 4*sizeof(Addr) );
+ ML_(generic_PRE_sys_send)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_RECVFROM:
+ /* int recvfrom(int s, void *buf, int len, unsigned int flags,
+ struct sockaddr *from, int *fromlen); */
+ PRE_MEM_READ( "socketcall.recvfrom(args)", ARG2, 6*sizeof(Addr) );
+ ML_(generic_PRE_sys_recvfrom)( tid, ARG2_0, ARG2_1, ARG2_2,
+ ARG2_3, ARG2_4, ARG2_5 );
+ break;
+
+ case VKI_SYS_RECV:
+ /* int recv(int s, void *buf, int len, unsigned int flags); */
+ /* man 2 recv says:
+ The recv call is normally used only on a connected socket
+ (see connect(2)) and is identical to recvfrom with a NULL
+ from parameter.
+ */
+ PRE_MEM_READ( "socketcall.recv(args)", ARG2, 4*sizeof(Addr) );
+ ML_(generic_PRE_sys_recv)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_CONNECT:
+ /* int connect(int sockfd,
+ struct sockaddr *serv_addr, int addrlen ); */
+ PRE_MEM_READ( "socketcall.connect(args)", ARG2, 3*sizeof(Addr) );
+ ML_(generic_PRE_sys_connect)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_SETSOCKOPT:
+ /* int setsockopt(int s, int level, int optname,
+ const void *optval, int optlen); */
+ PRE_MEM_READ( "socketcall.setsockopt(args)", ARG2, 5*sizeof(Addr) );
+ ML_(generic_PRE_sys_setsockopt)( tid, ARG2_0, ARG2_1, ARG2_2,
+ ARG2_3, ARG2_4 );
+ break;
+
+ case VKI_SYS_GETSOCKOPT:
+ /* int getsockopt(int s, int level, int optname,
+ void *optval, socklen_t *optlen); */
+ PRE_MEM_READ( "socketcall.getsockopt(args)", ARG2, 5*sizeof(Addr) );
+ ML_(linux_PRE_sys_getsockopt)( tid, ARG2_0, ARG2_1, ARG2_2,
+ ARG2_3, ARG2_4 );
+ break;
+
+ case VKI_SYS_GETSOCKNAME:
+ /* int getsockname(int s, struct sockaddr* name, int* namelen) */
+ PRE_MEM_READ( "socketcall.getsockname(args)", ARG2, 3*sizeof(Addr) );
+ ML_(generic_PRE_sys_getsockname)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_GETPEERNAME:
+ /* int getpeername(int s, struct sockaddr* name, int* namelen) */
+ PRE_MEM_READ( "socketcall.getpeername(args)", ARG2, 3*sizeof(Addr) );
+ ML_(generic_PRE_sys_getpeername)( tid, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_SHUTDOWN:
+ /* int shutdown(int s, int how); */
+ PRE_MEM_READ( "socketcall.shutdown(args)", ARG2, 2*sizeof(Addr) );
+ break;
+
+ case VKI_SYS_SENDMSG: {
+ /* int sendmsg(int s, const struct msghdr *msg, int flags); */
+
+ /* this causes warnings, and I don't get why. glibc bug?
+ * (after all it's glibc providing the arguments array)
+ PRE_MEM_READ( "socketcall.sendmsg(args)", ARG2, 3*sizeof(Addr) );
+ */
+ ML_(generic_PRE_sys_sendmsg)( tid, ARG2_0, ARG2_1 );
+ break;
+ }
+
+ case VKI_SYS_RECVMSG: {
+ /* int recvmsg(int s, struct msghdr *msg, int flags); */
+
+ /* this causes warnings, and I don't get why. glibc bug?
+ * (after all it's glibc providing the arguments array)
+ PRE_MEM_READ("socketcall.recvmsg(args)", ARG2, 3*sizeof(Addr) );
+ */
+ ML_(generic_PRE_sys_recvmsg)( tid, ARG2_0, ARG2_1 );
+ break;
+ }
+
+ default:
+ VG_(message)(Vg_DebugMsg,"Warning: unhandled socketcall 0x%lx",ARG1);
+ SET_STATUS_Failure( VKI_EINVAL );
+ break;
+ }
+# undef ARG2_0
+# undef ARG2_1
+# undef ARG2_2
+# undef ARG2_3
+# undef ARG2_4
+# undef ARG2_5
+}
+
+POST(sys_socketcall)
+{
+# define ARG2_0 (((UWord*)ARG2)[0])
+# define ARG2_1 (((UWord*)ARG2)[1])
+# define ARG2_2 (((UWord*)ARG2)[2])
+# define ARG2_3 (((UWord*)ARG2)[3])
+# define ARG2_4 (((UWord*)ARG2)[4])
+# define ARG2_5 (((UWord*)ARG2)[5])
+
+ SysRes r;
+ vg_assert(SUCCESS);
+ switch (ARG1 /* request */) {
+
+ case VKI_SYS_SOCKETPAIR:
+ r = ML_(generic_POST_sys_socketpair)(
+ tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1, ARG2_2, ARG2_3
+ );
+ SET_STATUS_from_SysRes(r);
+ break;
+
+ case VKI_SYS_SOCKET:
+ r = ML_(generic_POST_sys_socket)( tid, VG_(mk_SysRes_Success)(RES) );
+ SET_STATUS_from_SysRes(r);
+ break;
+
+ case VKI_SYS_BIND:
+ /* int bind(int sockfd, struct sockaddr *my_addr,
+ int addrlen); */
+ break;
+
+ case VKI_SYS_LISTEN:
+ /* int listen(int s, int backlog); */
+ break;
+
+ case VKI_SYS_ACCEPT:
+ /* int accept(int s, struct sockaddr *addr, int *addrlen); */
+ r = ML_(generic_POST_sys_accept)( tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1, ARG2_2 );
+ SET_STATUS_from_SysRes(r);
+ break;
+
+ case VKI_SYS_SENDTO:
+ break;
+
+ case VKI_SYS_SEND:
+ break;
+
+ case VKI_SYS_RECVFROM:
+ ML_(generic_POST_sys_recvfrom)( tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1, ARG2_2,
+ ARG2_3, ARG2_4, ARG2_5 );
+ break;
+
+ case VKI_SYS_RECV:
+ ML_(generic_POST_sys_recv)( tid, RES, ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_CONNECT:
+ break;
+
+ case VKI_SYS_SETSOCKOPT:
+ break;
+
+ case VKI_SYS_GETSOCKOPT:
+ ML_(linux_POST_sys_getsockopt)( tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1,
+ ARG2_2, ARG2_3, ARG2_4 );
+ break;
+
+ case VKI_SYS_GETSOCKNAME:
+ ML_(generic_POST_sys_getsockname)( tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_GETPEERNAME:
+ ML_(generic_POST_sys_getpeername)( tid, VG_(mk_SysRes_Success)(RES),
+ ARG2_0, ARG2_1, ARG2_2 );
+ break;
+
+ case VKI_SYS_SHUTDOWN:
+ break;
+
+ case VKI_SYS_SENDMSG:
+ break;
+
+ case VKI_SYS_RECVMSG:
+ ML_(generic_POST_sys_recvmsg)( tid, ARG2_0, ARG2_1 );
+ break;
+
+ default:
+ VG_(message)(Vg_DebugMsg,"FATAL: unhandled socketcall 0x%lx",ARG1);
+ VG_(core_panic)("... bye!\n");
+ break; /*NOTREACHED*/
+ }
+# undef ARG2_0
+# undef ARG2_1
+# undef ARG2_2
+# undef ARG2_3
+# undef ARG2_4
+# undef ARG2_5
+}
+
+PRE(sys_socket)
+{
+ PRINT("sys_socket ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "socket", int, domain, int, type, int, protocol);
+}
+POST(sys_socket)
+{
+ SysRes r;
+ vg_assert(SUCCESS);
+ r = ML_(generic_POST_sys_socket)(tid, VG_(mk_SysRes_Success)(RES));
+ SET_STATUS_from_SysRes(r);
+}
+
+PRE(sys_setsockopt)
+{
+ PRINT("sys_setsockopt ( %ld, %ld, %ld, %#lx, %ld )",ARG1,ARG2,ARG3,ARG4,ARG5);
+ PRE_REG_READ5(long, "setsockopt",
+ int, s, int, level, int, optname,
+ const void *, optval, int, optlen);
+ ML_(generic_PRE_sys_setsockopt)(tid, ARG1,ARG2,ARG3,ARG4,ARG5);
+}
+
+PRE(sys_getsockopt)
+{
+ PRINT("sys_getsockopt ( %ld, %ld, %ld, %#lx, %#lx )",ARG1,ARG2,ARG3,ARG4,ARG5);
+ PRE_REG_READ5(long, "getsockopt",
+ int, s, int, level, int, optname,
+ void *, optval, int, *optlen);
+ ML_(linux_PRE_sys_getsockopt)(tid, ARG1,ARG2,ARG3,ARG4,ARG5);
+}
+POST(sys_getsockopt)
+{
+ vg_assert(SUCCESS);
+ ML_(linux_POST_sys_getsockopt)(tid, VG_(mk_SysRes_Success)(RES),
+ ARG1,ARG2,ARG3,ARG4,ARG5);
+}
+
+PRE(sys_connect)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_connect ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "connect",
+ int, sockfd, struct sockaddr *, serv_addr, int, addrlen);
+ ML_(generic_PRE_sys_connect)(tid, ARG1,ARG2,ARG3);
+}
+
+PRE(sys_accept)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_accept ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "accept",
+ int, s, struct sockaddr *, addr, int, *addrlen);
+ ML_(generic_PRE_sys_accept)(tid, ARG1,ARG2,ARG3);
+}
+POST(sys_accept)
+{
+ SysRes r;
+ vg_assert(SUCCESS);
+ r = ML_(generic_POST_sys_accept)(tid, VG_(mk_SysRes_Success)(RES),
+ ARG1,ARG2,ARG3);
+ SET_STATUS_from_SysRes(r);
+}
+
+PRE(sys_sendto)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_sendto ( %ld, %#lx, %ld, %lu, %#lx, %ld )",ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
+ PRE_REG_READ6(long, "sendto",
+ int, s, const void *, msg, int, len,
+ unsigned int, flags,
+ const struct sockaddr *, to, int, tolen);
+ ML_(generic_PRE_sys_sendto)(tid, ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
+}
+
+PRE(sys_recvfrom)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_recvfrom ( %ld, %#lx, %ld, %lu, %#lx, %#lx )",ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
+ PRE_REG_READ6(long, "recvfrom",
+ int, s, void *, buf, int, len, unsigned int, flags,
+ struct sockaddr *, from, int *, fromlen);
+ ML_(generic_PRE_sys_recvfrom)(tid, ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
+}
+POST(sys_recvfrom)
+{
+ vg_assert(SUCCESS);
+ ML_(generic_POST_sys_recvfrom)(tid, VG_(mk_SysRes_Success)(RES),
+ ARG1,ARG2,ARG3,ARG4,ARG5,ARG6);
+}
+
+PRE(sys_sendmsg)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_sendmsg ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "sendmsg",
+ int, s, const struct msghdr *, msg, int, flags);
+ ML_(generic_PRE_sys_sendmsg)(tid, ARG1,ARG2);
+}
+
+PRE(sys_recvmsg)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_recvmsg ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "recvmsg", int, s, struct msghdr *, msg, int, flags);
+ ML_(generic_PRE_sys_recvmsg)(tid, ARG1,ARG2);
+}
+POST(sys_recvmsg)
+{
+ ML_(generic_POST_sys_recvmsg)(tid, ARG1,ARG2);
+}
+
+//XXX: Semaphore code ripped from AMD64.
+PRE(sys_semget)
+{
+ PRINT("sys_semget ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "semget", vki_key_t, key, int, nsems, int, semflg);
+}
+
+PRE(sys_semop)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_semop ( %ld, %#lx, %lu )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "semop",
+ int, semid, struct sembuf *, sops, unsigned, nsoops);
+ ML_(generic_PRE_sys_semop)(tid, ARG1,ARG2,ARG3);
+}
+
+PRE(sys_semctl)
+{
+ switch (ARG3 & ~VKI_IPC_64) {
+ case VKI_IPC_INFO:
+ case VKI_SEM_INFO:
+ PRINT("sys_semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
+ PRE_REG_READ4(long, "semctl",
+ int, semid, int, semnum, int, cmd, struct seminfo *, arg);
+ break;
+ case VKI_IPC_STAT:
+ case VKI_SEM_STAT:
+ case VKI_IPC_SET:
+ PRINT("sys_semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
+ PRE_REG_READ4(long, "semctl",
+ int, semid, int, semnum, int, cmd, struct semid_ds *, arg);
+ break;
+ case VKI_GETALL:
+ case VKI_SETALL:
+ PRINT("sys_semctl ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
+ PRE_REG_READ4(long, "semctl",
+ int, semid, int, semnum, int, cmd, unsigned short *, arg);
+ break;
+ default:
+ PRINT("sys_semctl ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "semctl",
+ int, semid, int, semnum, int, cmd);
+ break;
+ }
+ ML_(generic_PRE_sys_semctl)(tid, ARG1,ARG2,ARG3,ARG4);
+}
+
+POST(sys_semctl)
+{
+ ML_(generic_POST_sys_semctl)(tid, RES,ARG1,ARG2,ARG3,ARG4);
+}
+
+PRE(sys_semtimedop)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_semtimedop ( %ld, %#lx, %lu, %#lx )",ARG1,ARG2,ARG3,ARG4);
+ PRE_REG_READ4(long, "semtimedop",
+ int, semid, struct sembuf *, sops, unsigned, nsoops,
+ struct timespec *, timeout);
+ ML_(generic_PRE_sys_semtimedop)(tid, ARG1,ARG2,ARG3,ARG4);
+}
+
+//amd64
+PRE(sys_msgget)
+{
+ PRINT("sys_msgget ( %ld, %ld )",ARG1,ARG2);
+ PRE_REG_READ2(long, "msgget", vki_key_t, key, int, msgflg);
+}
+
+PRE(sys_msgsnd)
+{
+ PRINT("sys_msgsnd ( %ld, %#lx, %ld, %ld )",ARG1,ARG2,ARG3,ARG4);
+ PRE_REG_READ4(long, "msgsnd",
+ int, msqid, struct msgbuf *, msgp, vki_size_t, msgsz, int, msgflg);
+ ML_(linux_PRE_sys_msgsnd)(tid, ARG1,ARG2,ARG3,ARG4);
+ if ((ARG4 & VKI_IPC_NOWAIT) == 0)
+ *flags |= SfMayBlock;
+}
+
+PRE(sys_msgrcv)
+{
+ PRINT("sys_msgrcv ( %ld, %#lx, %ld, %ld, %ld )",ARG1,ARG2,ARG3,ARG4,ARG5);
+ PRE_REG_READ5(long, "msgrcv",
+ int, msqid, struct msgbuf *, msgp, vki_size_t, msgsz,
+ long, msgytp, int, msgflg);
+ ML_(linux_PRE_sys_msgrcv)(tid, ARG1,ARG2,ARG3,ARG4,ARG5);
+ if ((ARG4 & VKI_IPC_NOWAIT) == 0)
+ *flags |= SfMayBlock;
+}
+POST(sys_msgrcv)
+{
+ ML_(linux_POST_sys_msgrcv)(tid, RES,ARG1,ARG2,ARG3,ARG4,ARG5);
+}
+
+
+PRE(sys_msgctl)
+{
+ PRINT("sys_msgctl ( %ld, %ld, %#lx )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "msgctl",
+ int, msqid, int, cmd, struct msqid_ds *, buf);
+ ML_(linux_PRE_sys_msgctl)(tid, ARG1,ARG2,ARG3);
+}
+POST(sys_msgctl)
+{
+ ML_(linux_POST_sys_msgctl)(tid, RES,ARG1,ARG2,ARG3);
+}
+
+//shared memory code from AMD64
+PRE(sys_shmget)
+{
+ PRINT("sys_shmget ( %ld, %ld, %ld )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "shmget", vki_key_t, key, vki_size_t, size, int, shmflg);
+}
+
+PRE(wrap_sys_shmat)
+{
+ UWord arg2tmp;
+ PRINT("wrap_sys_shmat ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "shmat",
+ int, shmid, const void *, shmaddr, int, shmflg);
+ arg2tmp = ML_(generic_PRE_sys_shmat)(tid, ARG1,ARG2,ARG3);
+ if (arg2tmp == 0)
+ SET_STATUS_Failure( VKI_EINVAL );
+ else
+ ARG2 = arg2tmp;
+}
+
+POST(wrap_sys_shmat)
+{
+ ML_(generic_POST_sys_shmat)(tid, RES,ARG1,ARG2,ARG3);
+}
+
+PRE(sys_shmdt)
+{
+ PRINT("sys_shmdt ( %#lx )",ARG1);
+ PRE_REG_READ1(long, "shmdt", const void *, shmaddr);
+ if (!ML_(generic_PRE_sys_shmdt)(tid, ARG1))
+ SET_STATUS_Failure( VKI_EINVAL );
+}
+
+POST(sys_shmdt)
+{
+ ML_(generic_POST_sys_shmdt)(tid, RES,ARG1);
+}
+
+PRE(sys_shmctl)
+{
+ PRINT("sys_shmctl ( %ld, %ld, %#lx )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "shmctl",
+ int, shmid, int, cmd, struct shmid_ds *, buf);
+ ML_(generic_PRE_sys_shmctl)(tid, ARG1,ARG2,ARG3);
+}
+
+POST(sys_shmctl)
+{
+ ML_(generic_POST_sys_shmctl)(tid, RES,ARG1,ARG2,ARG3);
+}
+
+PRE(sys_shutdown)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_shutdown ( %ld, %ld )",ARG1,ARG2);
+ PRE_REG_READ2(int, "shutdown", int, s, int, how);
+}
+
+PRE(sys_bind)
+{
+ PRINT("sys_bind ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "bind",
+ int, sockfd, struct sockaddr *, my_addr, int, addrlen);
+ ML_(generic_PRE_sys_bind)(tid, ARG1,ARG2,ARG3);
+}
+
+PRE(sys_listen)
+{
+ PRINT("sys_listen ( %ld, %ld )",ARG1,ARG2);
+ PRE_REG_READ2(long, "listen", int, s, int, backlog);
+}
+
+PRE(sys_getsockname)
+{
+ PRINT("sys_getsockname ( %ld, %#lx, %#lx )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "getsockname",
+ int, s, struct sockaddr *, name, int *, namelen);
+ ML_(generic_PRE_sys_getsockname)(tid, ARG1,ARG2,ARG3);
+}
+POST(sys_getsockname)
+{
+ vg_assert(SUCCESS);
+ ML_(generic_POST_sys_getsockname)(tid, VG_(mk_SysRes_Success)(RES),
+ ARG1,ARG2,ARG3);
+}
+
+PRE(sys_getpeername)
+{
+ PRINT("sys_getpeername ( %ld, %#lx, %#lx )",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "getpeername",
+ int, s, struct sockaddr *, name, int *, namelen);
+ ML_(generic_PRE_sys_getpeername)(tid, ARG1,ARG2,ARG3);
+}
+POST(sys_getpeername)
+{
+ vg_assert(SUCCESS);
+ ML_(generic_POST_sys_getpeername)(tid, VG_(mk_SysRes_Success)(RES),
+ ARG1,ARG2,ARG3);
+}
+
+PRE(sys_socketpair)
+{
+ PRINT("sys_socketpair ( %ld, %ld, %ld, %#lx )",ARG1,ARG2,ARG3,ARG4);
+ PRE_REG_READ4(long, "socketpair",
+ int, d, int, type, int, protocol, int [2], sv);
+ ML_(generic_PRE_sys_socketpair)(tid, ARG1,ARG2,ARG3,ARG4);
+}
+POST(sys_socketpair)
+{
+ vg_assert(SUCCESS);
+ ML_(generic_POST_sys_socketpair)(tid, VG_(mk_SysRes_Success)(RES),
+ ARG1,ARG2,ARG3,ARG4);
+}
+
+PRE(sys_send)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_send ( %ld, %#lx, %ld, %lu )",ARG1,ARG2,ARG3,ARG4);
+ PRE_REG_READ4(long, "send",
+ int, s, const void *, msg, int, len,
+ unsigned int, flags);
+
+ ML_(generic_PRE_sys_send)( tid, ARG1, ARG2, ARG3 );
+}
+
+PRE(sys_recv)
+{
+ *flags |= SfMayBlock;
+ PRINT("sys_recv ( %ld, %#lx, %ld, %lu )",ARG1,ARG2,ARG3,ARG4);
+ PRE_REG_READ4(long, "recv",
+ int, s, void *, buf, int, len, unsigned int, flags);
+ ML_(generic_PRE_sys_recv)( tid, ARG1, ARG2, ARG3 );
+}
+
+POST(sys_recv)
+{
+ ML_(generic_POST_sys_recv)( tid, RES, ARG1, ARG2, ARG3 );
+}
+
+PRE(sys_mmap)
+{
+ I_die_here;
+}
+
+PRE(sys_mmap2)
+{
+ SysRes r;
+
+ // Exactly like old_mmap() except:
+ // - all 6 args are passed in regs, rather than in a memory-block.
+ // - the file offset is specified in pagesize units rather than bytes,
+ // so that it can be used for files bigger than 2^32 bytes.
+ // pagesize or 4K-size units in offset? For ppc32/64-linux, this is
+ // 4K-sized. Assert that the page size is 4K here for safety.
+ vg_assert(VKI_PAGE_SIZE == 4096);
+ PRINT("sys_mmap2 ( %#lx, %llu, %ld, %ld, %ld, %ld )",
+ ARG1, (ULong)ARG2, ARG3, ARG4, ARG5, ARG6 );
+ PRE_REG_READ6(long, "mmap2",
+ unsigned long, start, unsigned long, length,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, offset);
+
+ r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5,
+ 4096 * (Off64T)ARG6 );
+ SET_STATUS_from_SysRes(r);
+}
+
+// XXX: lstat64/fstat64/stat64 are generic, but not necessarily
+// applicable to every architecture -- I think only to 32-bit archs.
+// We're going to need something like linux/core_os32.h for such
+// things, eventually, I think. --njn
+PRE(sys_lstat64)
+{
+ PRINT("sys_lstat64 ( %#lx(%s), %#lx )",ARG1,(char*)ARG1,ARG2);
+ PRE_REG_READ2(long, "lstat64", char *, file_name, struct stat64 *, buf);
+ PRE_MEM_RASCIIZ( "lstat64(file_name)", ARG1 );
+ PRE_MEM_WRITE( "lstat64(buf)", ARG2, sizeof(struct vki_stat64) );
+}
+
+POST(sys_lstat64)
+{
+ vg_assert(SUCCESS);
+ if (RES == 0) {
+ POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
+ }
+}
+
+PRE(sys_stat64)
+{
+ PRINT("sys_stat64 ( %#lx(%s), %#lx )",ARG1,(char*)ARG1,ARG2);
+ PRE_REG_READ2(long, "stat64", char *, file_name, struct stat64 *, buf);
+ PRE_MEM_RASCIIZ( "stat64(file_name)", ARG1 );
+ PRE_MEM_WRITE( "stat64(buf)", ARG2, sizeof(struct vki_stat64) );
+}
+
+POST(sys_stat64)
+{
+ POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
+}
+
+PRE(sys_fstatat64)
+{
+ PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx )",ARG1,ARG2,(char*)ARG2,ARG3);
+ PRE_REG_READ3(long, "fstatat64",
+ int, dfd, char *, file_name, struct stat64 *, buf);
+ PRE_MEM_RASCIIZ( "fstatat64(file_name)", ARG2 );
+ PRE_MEM_WRITE( "fstatat64(buf)", ARG3, sizeof(struct vki_stat64) );
+}
+
+POST(sys_fstatat64)
+{
+ POST_MEM_WRITE( ARG3, sizeof(struct vki_stat64) );
+}
+
+PRE(sys_fstat64)
+{
+ PRINT("sys_fstat64 ( %ld, %#lx )",ARG1,ARG2);
+ PRE_REG_READ2(long, "fstat64", unsigned long, fd, struct stat64 *, buf);
+ PRE_MEM_WRITE( "fstat64(buf)", ARG2, sizeof(struct vki_stat64) );
+}
+
+POST(sys_fstat64)
+{
+ POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
+}
+
+
+PRE(sys_ipc)
+{
+ I_die_here;
+}
+
+POST(sys_ipc)
+{
+ I_die_here;
+}
+
+PRE(sys_clone)
+{
+ UInt cloneflags;
+
+ PRINT("sys_clone ( %lx, %#lx, %#lx, %#lx, %#lx )",ARG1,ARG2,ARG3,ARG4,ARG5);
+ PRE_REG_READ5(int, "clone",
+ unsigned long, flags,
+ void *, child_stack,
+ int *, parent_tidptr,
+ void *, child_tls,
+ int *, child_tidptr);
+
+ if (ARG1 & VKI_CLONE_PARENT_SETTID) {
+ PRE_MEM_WRITE("clone(parent_tidptr)", ARG3, sizeof(Int));
+ if (!VG_(am_is_valid_for_client)(ARG3, sizeof(Int),
+ VKI_PROT_WRITE)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ return;
+ }
+ }
+ if (ARG1 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID)) {
+ PRE_MEM_WRITE("clone(child_tidptr)", ARG5, sizeof(Int));
+ if (!VG_(am_is_valid_for_client)(ARG5, sizeof(Int),
+ VKI_PROT_WRITE)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ return;
+ }
+ }
+ if (ARG1 & VKI_CLONE_SETTLS) {
+ PRE_MEM_READ("clone(tls_user_desc)", ARG4, sizeof(vki_modify_ldt_t));
+ if (!VG_(am_is_valid_for_client)(ARG4, sizeof(vki_modify_ldt_t),
+ VKI_PROT_READ)) {
+ SET_STATUS_Failure( VKI_EFAULT );
+ return;
+ }
+ }
+
+ cloneflags = ARG1;
+
+ if (!ML_(client_signal_OK)(ARG1 & VKI_CSIGNAL)) {
+ SET_STATUS_Failure( VKI_EINVAL );
+ return;
+ }
+
+ /* Only look at the flags we really care about */
+ switch (cloneflags & (VKI_CLONE_VM | VKI_CLONE_FS
+ | VKI_CLONE_FILES | VKI_CLONE_VFORK)) {
+ case VKI_CLONE_VM | VKI_CLONE_FS | VKI_CLONE_FILES:
+ /* thread creation */
+ SET_STATUS_from_SysRes(
+ do_clone(tid,
+ ARG1, /* flags */
+ (Addr)ARG2, /* child ESP */
+ (Int *)ARG3, /* parent_tidptr */
+ (Int *)ARG5, /* child_tidptr */
+ (Addr)ARG4)); /* set_tls */
+ break;
+
+ case VKI_CLONE_VFORK | VKI_CLONE_VM: /* vfork */
+ /* FALLTHROUGH - assume vfork == fork */
+ cloneflags &= ~(VKI_CLONE_VFORK | VKI_CLONE_VM);
+
+ case 0: /* plain fork */
+ SET_STATUS_from_SysRes(
+ ML_(do_fork_clone)(tid,
+ cloneflags, /* flags */
+ (Int *)ARG3, /* parent_tidptr */
+ (Int *)ARG5)); /* child_tidptr */
+ break;
+
+ default:
+ /* should we just ENOSYS? */
+ VG_(message)(Vg_UserMsg, "");
+ VG_(message)(Vg_UserMsg, "Unsupported clone() flags: 0x%lx", ARG1);
+ VG_(message)(Vg_UserMsg, "");
+ VG_(message)(Vg_UserMsg, "The only supported clone() uses are:");
+ VG_(message)(Vg_UserMsg, " - via a threads library (LinuxThreads or NPTL)");
+ VG_(message)(Vg_UserMsg, " - via the implementation of fork or vfork");
+ VG_(message)(Vg_UserMsg, " - for the Quadrics Elan3 user-space driver");
+ VG_(unimplemented)
+ ("Valgrind does not support general clone().");
+ }
+
+ if (SUCCESS) {
+ if (ARG1 & VKI_CLONE_PARENT_SETTID)
+ POST_MEM_WRITE(ARG3, sizeof(Int));
+ if (ARG1 & (VKI_CLONE_CHILD_SETTID | VKI_CLONE_CHILD_CLEARTID))
+ POST_MEM_WRITE(ARG5, sizeof(Int));
+
+ /* Thread creation was successful; let the child have the chance
+ to run */
+ *flags |= SfYieldAfter;
+ }
+}
+
+PRE(sys_sigreturn)
+{
+ /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
+ an explanation of what follows. */
+
+ PRINT("sys_sigreturn ( )");
+
+ vg_assert(VG_(is_valid_tid)(tid));
+ vg_assert(tid >= 1 && tid < VG_N_THREADS);
+ vg_assert(VG_(is_running_thread)(tid));
+
+ /* Restore register state from frame and remove it */
+ VG_(sigframe_destroy)(tid, False);
+
+ /* Tell the driver not to update the guest state with the "result",
+ and set a bogus result to keep it happy. */
+ *flags |= SfNoWriteResult;
+ SET_STATUS_Success(0);
+
+ /* Check to see if any signals arose as a result of this. */
+ *flags |= SfPollAfter;
+}
+
+PRE(sys_rt_sigreturn)
+{
+ /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
+ an explanation of what follows. */
+
+ PRINT("rt_sigreturn ( )");
+
+ vg_assert(VG_(is_valid_tid)(tid));
+ vg_assert(tid >= 1 && tid < VG_N_THREADS);
+ vg_assert(VG_(is_running_thread)(tid));
+
+ /* Restore register state from frame and remove it */
+ VG_(sigframe_destroy)(tid, True);
+
+ /* Tell the driver not to update the guest state with the "result",
+ and set a bogus result to keep it happy. */
+ *flags |= SfNoWriteResult;
+ SET_STATUS_Success(0);
+
+ /* Check to see if any signals arose as a result of this. */
+ *flags |= SfPollAfter;
+}
+
+PRE(sys_sigaction)
+{
+ I_die_here;
+}
+
+POST(sys_sigaction)
+{ I_die_here;
+}
+
+PRE(sys_sigsuspend)
+{ I_die_here;
+}
+
+/* Very much ARM specific */
+
+PRE(sys_set_tls)
+{
+ PRE_REG_READ1(long, "set_tls", unsigned long, addr);
+
+ SET_STATUS_from_SysRes( sys_set_tls( tid, ARG1 ) );
+}
+
+PRE(sys_cacheflush)
+{
+ PRINT("cacheflush (%lx, %#lx, %#lx)",ARG1,ARG2,ARG3);
+ PRE_REG_READ3(long, "cacheflush", void*, addrlow,void*, addrhigh,int, flags);
+}
+
+
+#undef PRE
+#undef POST
+
+/* ---------------------------------------------------------------------
+ The arm/Linux syscall table
+ ------------------------------------------------------------------ */
+
+#if 0
+#define __NR_OABI_SYSCALL_BASE 0x900000
+#else
+#define __NR_OABI_SYSCALL_BASE 0x0
+#endif
+
+#define PLAX_(sysno, name) WRAPPER_ENTRY_X_(arm_linux, sysno, name)
+#define PLAXY(sysno, name) WRAPPER_ENTRY_XY(arm_linux, sysno, name)
+
+// This table maps from __NR_xxx syscall numbers (from
+// linux/include/asm-arm/unistd.h) to the appropriate PRE/POST sys_foo()
+// wrappers on arm (as per sys_call_table in linux/arch/arm/kernel/entry.S).
+//
+// For those syscalls not handled by Valgrind, the annotation indicate its
+// arch/OS combination, eg. */* (generic), */Linux (Linux only), ?/?
+// (unknown).
+
+static SyscallTableEntry syscall_main_table[] = {
+//zz // (restart_syscall) // 0
+ GENX_(__NR_exit, sys_exit), // 1
+ GENX_(__NR_fork, sys_fork), // 2
+ GENXY(__NR_read, sys_read), // 3
+ GENX_(__NR_write, sys_write), // 4
+
+ GENXY(__NR_open, sys_open), // 5
+ GENXY(__NR_close, sys_close), // 6
+// GENXY(__NR_waitpid, sys_waitpid), // 7
+ GENXY(__NR_creat, sys_creat), // 8
+ GENX_(__NR_link, sys_link), // 9
+
+ GENX_(__NR_unlink, sys_unlink), // 10
+ GENX_(__NR_execve, sys_execve), // 11
+ GENX_(__NR_chdir, sys_chdir), // 12
+ GENXY(__NR_time, sys_time), // 13
+ GENX_(__NR_mknod, sys_mknod), // 14
+
+ GENX_(__NR_chmod, sys_chmod), // 15
+//zz LINX_(__NR_lchown, sys_lchown16), // 16
+// GENX_(__NR_break, sys_ni_syscall), // 17
+//zz // (__NR_oldstat, sys_stat), // 18 (obsolete)
+ LINX_(__NR_lseek, sys_lseek), // 19
+
+ GENX_(__NR_getpid, sys_getpid), // 20
+ LINX_(__NR_mount, sys_mount), // 21
+ LINX_(__NR_umount, sys_oldumount), // 22
+ LINX_(__NR_setuid, sys_setuid16), // 23 ## P
+ LINX_(__NR_getuid, sys_getuid16), // 24 ## P
+//zz
+//zz // (__NR_stime, sys_stime), // 25 * (SVr4,SVID,X/OPEN)
+// PLAXY(__NR_ptrace, sys_ptrace), // 26
+ GENX_(__NR_alarm, sys_alarm), // 27
+//zz // (__NR_oldfstat, sys_fstat), // 28 * L -- obsolete
+ GENX_(__NR_pause, sys_pause), // 29
+
+ LINX_(__NR_utime, sys_utime), // 30
+// GENX_(__NR_stty, sys_ni_syscall), // 31
+// GENX_(__NR_gtty, sys_ni_syscall), // 32
+ GENX_(__NR_access, sys_access), // 33
+ GENX_(__NR_nice, sys_nice), // 34
+
+// GENX_(__NR_ftime, sys_ni_syscall), // 35
+ GENX_(__NR_sync, sys_sync), // 36
+ GENX_(__NR_kill, sys_kill), // 37
+ GENX_(__NR_rename, sys_rename), // 38
+ GENX_(__NR_mkdir, sys_mkdir), // 39
+
+ GENX_(__NR_rmdir, sys_rmdir), // 40
+ GENXY(__NR_dup, sys_dup), // 41
+ LINXY(__NR_pipe, sys_pipe), // 42
+ GENXY(__NR_times, sys_times), // 43
+// GENX_(__NR_prof, sys_ni_syscall), // 44
+//zz
+ GENX_(__NR_brk, sys_brk), // 45
+ LINX_(__NR_setgid, sys_setgid16), // 46
+ LINX_(__NR_getgid, sys_getgid16), // 47
+//zz // (__NR_signal, sys_signal), // 48 */* (ANSI C)
+ LINX_(__NR_geteuid, sys_geteuid16), // 49
+
+ LINX_(__NR_getegid, sys_getegid16), // 50
+ GENX_(__NR_acct, sys_acct), // 51
+ LINX_(__NR_umount2, sys_umount), // 52
+// GENX_(__NR_lock, sys_ni_syscall), // 53
+ LINXY(__NR_ioctl, sys_ioctl), // 54
+
+ LINXY(__NR_fcntl, sys_fcntl), // 55
+// GENX_(__NR_mpx, sys_ni_syscall), // 56
+ GENX_(__NR_setpgid, sys_setpgid), // 57
+// GENX_(__NR_ulimit, sys_ni_syscall), // 58
+//zz // (__NR_oldolduname, sys_olduname), // 59 Linux -- obsolete
+//zz
+ GENX_(__NR_umask, sys_umask), // 60
+ GENX_(__NR_chroot, sys_chroot), // 61
+//zz // (__NR_ustat, sys_ustat) // 62 SVr4 -- deprecated
+ GENXY(__NR_dup2, sys_dup2), // 63
+ GENX_(__NR_getppid, sys_getppid), // 64
+
+ GENX_(__NR_getpgrp, sys_getpgrp), // 65
+ GENX_(__NR_setsid, sys_setsid), // 66
+ PLAXY(__NR_sigaction, sys_sigaction), // 67
+//zz // (__NR_sgetmask, sys_sgetmask), // 68 */* (ANSI C)
+//zz // (__NR_ssetmask, sys_ssetmask), // 69 */* (ANSI C)
+//zz
+ LINX_(__NR_setreuid, sys_setreuid16), // 70
+ LINX_(__NR_setregid, sys_setregid16), // 71
+ PLAX_(__NR_sigsuspend, sys_sigsuspend), // 72
+ LINXY(__NR_sigpending, sys_sigpending), // 73
+//zz // (__NR_sethostname, sys_sethostname), // 74 */*
+//zz
+ GENX_(__NR_setrlimit, sys_setrlimit), // 75
+ GENXY(__NR_getrlimit, sys_old_getrlimit), // 76
+ GENXY(__NR_getrusage, sys_getrusage), // 77
+ GENXY(__NR_gettimeofday, sys_gettimeofday), // 78
+ GENX_(__NR_settimeofday, sys_settimeofday), // 79
+
+ LINXY(__NR_getgroups, sys_getgroups16), // 80
+ LINX_(__NR_setgroups, sys_setgroups16), // 81
+// PLAX_(__NR_select, old_select), // 82
+ GENX_(__NR_symlink, sys_symlink), // 83
+//zz // (__NR_oldlstat, sys_lstat), // 84 -- obsolete
+//zz
+ GENX_(__NR_readlink, sys_readlink), // 85
+//zz // (__NR_uselib, sys_uselib), // 86 */Linux
+//zz // (__NR_swapon, sys_swapon), // 87 */Linux
+//zz // (__NR_reboot, sys_reboot), // 88 */Linux
+//zz // (__NR_readdir, old_readdir), // 89 -- superseded
+//zz
+// PLAX_(__NR_mmap, old_mmap), // 90
+ GENXY(__NR_munmap, sys_munmap), // 91
+ GENX_(__NR_truncate, sys_truncate), // 92
+ GENX_(__NR_ftruncate, sys_ftruncate), // 93
+ GENX_(__NR_fchmod, sys_fchmod), // 94
+
+ LINX_(__NR_fchown, sys_fchown16), // 95
+ GENX_(__NR_getpriority, sys_getpriority), // 96
+ GENX_(__NR_setpriority, sys_setpriority), // 97
+// GENX_(__NR_profil, sys_ni_syscall), // 98
+ GENXY(__NR_statfs, sys_statfs), // 99
+
+ GENXY(__NR_fstatfs, sys_fstatfs), // 100
+// LINX_(__NR_ioperm, sys_ioperm), // 101
+ PLAXY(__NR_socketcall, sys_socketcall), // 102
+ LINXY(__NR_syslog, sys_syslog), // 103
+ GENXY(__NR_setitimer, sys_setitimer), // 104
+
+ GENXY(__NR_getitimer, sys_getitimer), // 105
+ GENXY(__NR_stat, sys_newstat), // 106
+ GENXY(__NR_lstat, sys_newlstat), // 107
+ GENXY(__NR_fstat, sys_newfstat), // 108
+//zz // (__NR_olduname, sys_uname), // 109 -- obsolete
+//zz
+// GENX_(__NR_iopl, sys_iopl), // 110
+ LINX_(__NR_vhangup, sys_vhangup), // 111
+// GENX_(__NR_idle, sys_ni_syscall), // 112
+// PLAXY(__NR_vm86old, sys_vm86old), // 113 __NR_syscall... weird
+ GENXY(__NR_wait4, sys_wait4), // 114
+//zz
+//zz // (__NR_swapoff, sys_swapoff), // 115 */Linux
+ LINXY(__NR_sysinfo, sys_sysinfo), // 116
+ PLAXY(__NR_ipc, sys_ipc), // 117
+ GENX_(__NR_fsync, sys_fsync), // 118
+ PLAX_(__NR_sigreturn, sys_sigreturn), // 119 ?/Linux
+
+ PLAX_(__NR_clone, sys_clone), // 120
+//zz // (__NR_setdomainname, sys_setdomainname), // 121 */*(?)
+ GENXY(__NR_uname, sys_newuname), // 122
+// PLAX_(__NR_modify_ldt, sys_modify_ldt), // 123
+//zz LINXY(__NR_adjtimex, sys_adjtimex), // 124
+//zz
+ GENXY(__NR_mprotect, sys_mprotect), // 125
+ // LINXY(__NR_sigprocmask, sys_sigprocmask), // 126
+//zz // Nb: create_module() was removed 2.4-->2.6
+// GENX_(__NR_create_module, sys_ni_syscall), // 127
+ LINX_(__NR_init_module, sys_init_module), // 128
+ LINX_(__NR_delete_module, sys_delete_module), // 129
+//zz
+//zz // Nb: get_kernel_syms() was removed 2.4-->2.6
+// GENX_(__NR_get_kernel_syms, sys_ni_syscall), // 130
+ LINX_(__NR_quotactl, sys_quotactl), // 131
+ GENX_(__NR_getpgid, sys_getpgid), // 132
+ GENX_(__NR_fchdir, sys_fchdir), // 133
+//zz // (__NR_bdflush, sys_bdflush), // 134 */Linux
+//zz
+//zz // (__NR_sysfs, sys_sysfs), // 135 SVr4
+ LINX_(__NR_personality, sys_personality), // 136
+// GENX_(__NR_afs_syscall, sys_ni_syscall), // 137
+ LINX_(__NR_setfsuid, sys_setfsuid16), // 138
+ LINX_(__NR_setfsgid, sys_setfsgid16), // 139
+
+ LINXY(__NR__llseek, sys_llseek), // 140
+ GENXY(__NR_getdents, sys_getdents), // 141
+ GENX_(__NR__newselect, sys_select), // 142
+ GENX_(__NR_flock, sys_flock), // 143
+ GENX_(__NR_msync, sys_msync), // 144
+
+ GENXY(__NR_readv, sys_readv), // 145
+ GENX_(__NR_writev, sys_writev), // 146
+ GENX_(__NR_getsid, sys_getsid), // 147
+ GENX_(__NR_fdatasync, sys_fdatasync), // 148
+ LINXY(__NR__sysctl, sys_sysctl), // 149
+
+ GENX_(__NR_mlock, sys_mlock), // 150
+ GENX_(__NR_munlock, sys_munlock), // 151
+ GENX_(__NR_mlockall, sys_mlockall), // 152
+ LINX_(__NR_munlockall, sys_munlockall), // 153
+ LINXY(__NR_sched_setparam, sys_sched_setparam), // 154
+
+ LINXY(__NR_sched_getparam, sys_sched_getparam), // 155
+ LINX_(__NR_sched_setscheduler, sys_sched_setscheduler), // 156
+ LINX_(__NR_sched_getscheduler, sys_sched_getscheduler), // 157
+ LINX_(__NR_sched_yield, sys_sched_yield), // 158
+ LINX_(__NR_sched_get_priority_max, sys_sched_get_priority_max),// 159
+
+ LINX_(__NR_sched_get_priority_min, sys_sched_get_priority_min),// 160
+//zz //LINX?(__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 161 */*
+ GENXY(__NR_nanosleep, sys_nanosleep), // 162
+ GENX_(__NR_mremap, sys_mremap), // 163
+ LINX_(__NR_setresuid, sys_setresuid16), // 164
+
+ LINXY(__NR_getresuid, sys_getresuid16), // 165
+// PLAXY(__NR_vm86, sys_vm86), // 166 x86/Linux-only
+// GENX_(__NR_query_module, sys_ni_syscall), // 167
+ GENXY(__NR_poll, sys_poll), // 168
+//zz // (__NR_nfsservctl, sys_nfsservctl), // 169 */Linux
+//zz
+ LINX_(__NR_setresgid, sys_setresgid16), // 170
+ LINXY(__NR_getresgid, sys_getresgid16), // 171
+ LINXY(__NR_prctl, sys_prctl), // 172
+ PLAX_(__NR_rt_sigreturn, sys_rt_sigreturn), // 173
+ LINXY(__NR_rt_sigaction, sys_rt_sigaction), // 174
+
+ LINXY(__NR_rt_sigprocmask, sys_rt_sigprocmask), // 175
+ LINXY(__NR_rt_sigpending, sys_rt_sigpending), // 176
+ LINXY(__NR_rt_sigtimedwait, sys_rt_sigtimedwait),// 177
+ LINXY(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo),// 178
+ LINX_(__NR_rt_sigsuspend, sys_rt_sigsuspend), // 179
+
+ // GENXY(__NR_pread64, sys_pread64_on32bitplat), // 180
+ //GENX_(__NR_pwrite64, sys_pwrite64_on32bitplat), // 181
+ LINX_(__NR_chown, sys_chown16), // 182
+ GENXY(__NR_getcwd, sys_getcwd), // 183
+ LINXY(__NR_capget, sys_capget), // 184
+
+ LINX_(__NR_capset, sys_capset), // 185
+ GENXY(__NR_sigaltstack, sys_sigaltstack), // 186
+ LINXY(__NR_sendfile, sys_sendfile), // 187
+// GENXY(__NR_getpmsg, sys_getpmsg), // 188
+// GENX_(__NR_putpmsg, sys_putpmsg), // 189
+
+ // Nb: we treat vfork as fork
+ GENX_(__NR_vfork, sys_fork), // 190
+ GENXY(__NR_ugetrlimit, sys_getrlimit), // 191
+ PLAX_(__NR_mmap2, sys_mmap2), // 192
+ GENX_(__NR_truncate64, sys_truncate64), // 193
+ GENX_(__NR_ftruncate64, sys_ftruncate64), // 194
+
+ PLAXY(__NR_stat64, sys_stat64), // 195
+ PLAXY(__NR_lstat64, sys_lstat64), // 196
+ PLAXY(__NR_fstat64, sys_fstat64), // 197
+ GENX_(__NR_lchown32, sys_lchown), // 198
+ GENX_(__NR_getuid32, sys_getuid), // 199
+
+ GENX_(__NR_getgid32, sys_getgid), // 200
+ GENX_(__NR_geteuid32, sys_geteuid), // 201
+ GENX_(__NR_getegid32, sys_getegid), // 202
+ GENX_(__NR_setreuid32, sys_setreuid), // 203
+ GENX_(__NR_setregid32, sys_setregid), // 204
+
+ GENXY(__NR_getgroups32, sys_getgroups), // 205
+ GENX_(__NR_setgroups32, sys_setgroups), // 206
+ GENX_(__NR_fchown32, sys_fchown), // 207
+ LINX_(__NR_setresuid32, sys_setresuid), // 208
+ LINXY(__NR_getresuid32, sys_getresuid), // 209
+
+ LINX_(__NR_setresgid32, sys_setresgid), // 210
+ LINXY(__NR_getresgid32, sys_getresgid), // 211
+ GENX_(__NR_chown32, sys_chown), // 212
+ GENX_(__NR_setuid32, sys_setuid), // 213
+ GENX_(__NR_setgid32, sys_setgid), // 214
+
+ LINX_(__NR_setfsuid32, sys_setfsuid), // 215
+ LINX_(__NR_setfsgid32, sys_setfsgid), // 216
+//zz // (__NR_pivot_root, sys_pivot_root), // 217 */Linux
+ GENXY(__NR_mincore, sys_mincore), // 218
+ GENX_(__NR_madvise, sys_madvise), // 219
+
+ GENXY(__NR_getdents64, sys_getdents64), // 220
+ LINXY(__NR_fcntl64, sys_fcntl64), // 221
+// GENX_(222, sys_ni_syscall), // 222
+// PLAXY(223, sys_syscall223), // 223 // sys_bproc?
+ LINX_(__NR_gettid, sys_gettid), // 224
+
+//zz // (__NR_readahead, sys_readahead), // 225 */(Linux?)
+ LINX_(__NR_setxattr, sys_setxattr), // 226
+ LINX_(__NR_lsetxattr, sys_lsetxattr), // 227
+ LINX_(__NR_fsetxattr, sys_fsetxattr), // 228
+ LINXY(__NR_getxattr, sys_getxattr), // 229
+
+ LINXY(__NR_lgetxattr, sys_lgetxattr), // 230
+ LINXY(__NR_fgetxattr, sys_fgetxattr), // 231
+ LINXY(__NR_listxattr, sys_listxattr), // 232
+ LINXY(__NR_llistxattr, sys_llistxattr), // 233
+ LINXY(__NR_flistxattr, sys_flistxattr), // 234
+
+ LINX_(__NR_removexattr, sys_removexattr), // 235
+ LINX_(__NR_lremovexattr, sys_lremovexattr), // 236
+ LINX_(__NR_fremovexattr, sys_fremovexattr), // 237
+ LINXY(__NR_tkill, sys_tkill), // 238 */Linux
+ LINXY(__NR_sendfile64, sys_sendfile64), // 239
+
+ LINXY(__NR_futex, sys_futex), // 240
+ LINX_(__NR_sched_setaffinity, sys_sched_setaffinity), // 241
+ LINXY(__NR_sched_getaffinity, sys_sched_getaffinity), // 242
+// PLAX_(__NR_set_thread_area, sys_set_thread_area), // 243
+// PLAX_(__NR_get_thread_area, sys_get_thread_area), // 244
+
+ LINXY(__NR_io_setup, sys_io_setup), // 245
+ LINX_(__NR_io_destroy, sys_io_destroy), // 246
+ LINXY(__NR_io_getevents, sys_io_getevents), // 247
+ LINX_(__NR_io_submit, sys_io_submit), // 248
+ LINXY(__NR_io_cancel, sys_io_cancel), // 249
+
+// LINX_(__NR_fadvise64, sys_fadvise64), // 250 */(Linux?)
+ GENX_(251, sys_ni_syscall), // 251
+ LINX_(__NR_exit_group, sys_exit_group), // 252
+// GENXY(__NR_lookup_dcookie, sys_lookup_dcookie), // 253
+ LINXY(__NR_epoll_create, sys_epoll_create), // 254
+
+ LINX_(__NR_epoll_ctl, sys_epoll_ctl), // 255
+ LINXY(__NR_epoll_wait, sys_epoll_wait), // 256
+//zz // (__NR_remap_file_pages, sys_remap_file_pages), // 257 */Linux
+ LINX_(__NR_set_tid_address, sys_set_tid_address), // 258
+ LINXY(__NR_timer_create, sys_timer_create), // 259
+
+ LINXY(__NR_timer_settime, sys_timer_settime), // (timer_create+1)
+ LINXY(__NR_timer_gettime, sys_timer_gettime), // (timer_create+2)
+ LINX_(__NR_timer_getoverrun, sys_timer_getoverrun),//(timer_create+3)
+ LINX_(__NR_timer_delete, sys_timer_delete), // (timer_create+4)
+ LINX_(__NR_clock_settime, sys_clock_settime), // (timer_create+5)
+
+ LINXY(__NR_clock_gettime, sys_clock_gettime), // (timer_create+6)
+ LINXY(__NR_clock_getres, sys_clock_getres), // (timer_create+7)
+ LINXY(__NR_clock_nanosleep, sys_clock_nanosleep),// (timer_create+8) */*
+ GENXY(__NR_statfs64, sys_statfs64), // 268
+ GENXY(__NR_fstatfs64, sys_fstatfs64), // 269
+
+ LINX_(__NR_tgkill, sys_tgkill), // 270 */Linux
+ GENX_(__NR_utimes, sys_utimes), // 271
+// LINX_(__NR_fadvise64_64, sys_fadvise64_64), // 272 */(Linux?)
+ GENX_(__NR_vserver, sys_ni_syscall), // 273
+ LINX_(__NR_mbind, sys_mbind), // 274 ?/?
+
+ LINXY(__NR_get_mempolicy, sys_get_mempolicy), // 275 ?/?
+ LINX_(__NR_set_mempolicy, sys_set_mempolicy), // 276 ?/?
+ LINXY(__NR_mq_open, sys_mq_open), // 277
+ LINX_(__NR_mq_unlink, sys_mq_unlink), // (mq_open+1)
+ LINX_(__NR_mq_timedsend, sys_mq_timedsend), // (mq_open+2)
+
+ LINXY(__NR_mq_timedreceive, sys_mq_timedreceive),// (mq_open+3)
+ LINX_(__NR_mq_notify, sys_mq_notify), // (mq_open+4)
+ LINXY(__NR_mq_getsetattr, sys_mq_getsetattr), // (mq_open+5)
+ LINXY(__NR_waitid, sys_waitid), // 280
+
+ PLAXY(__NR_socket, sys_socket), // 281
+ PLAX_(__NR_bind, sys_bind), // 282
+ PLAX_(__NR_connect, sys_connect), // 283
+ PLAX_(__NR_listen, sys_listen), // 284
+ PLAXY(__NR_accept, sys_accept), // 285
+ PLAXY(__NR_getsockname, sys_getsockname), // 286
+ PLAXY(__NR_getpeername, sys_getpeername), // 287
+ PLAXY(__NR_socketpair, sys_socketpair), // 288
+ PLAX_(__NR_send, sys_send),
+ PLAX_(__NR_sendto, sys_sendto), // 290
+ PLAXY(__NR_recv, sys_recv),
+ PLAXY(__NR_recvfrom, sys_recvfrom), // 292
+ PLAX_(__NR_shutdown, sys_shutdown), // 293
+ PLAX_(__NR_setsockopt, sys_setsockopt), // 294
+ PLAXY(__NR_getsockopt, sys_getsockopt), // 295
+ PLAX_(__NR_sendmsg, sys_sendmsg), // 296
+ PLAXY(__NR_recvmsg, sys_recvmsg), // 297
+ PLAX_(__NR_semop, sys_semop), // 298
+ PLAX_(__NR_semget, sys_semget), // 299
+ PLAXY(__NR_semctl, sys_semctl), // 300
+ PLAX_(__NR_msgget, sys_msgget),
+ PLAX_(__NR_msgsnd, sys_msgsnd),
+ PLAXY(__NR_msgrcv, sys_msgrcv),
+ PLAXY(__NR_msgctl, sys_msgctl), // 304
+ PLAX_(__NR_semtimedop, sys_semtimedop), // 312
+
+ LINX_(__NR_add_key, sys_add_key), // 286
+ LINX_(__NR_request_key, sys_request_key), // 287
+ LINXY(__NR_keyctl, sys_keyctl), // not 288...
+// LINX_(__NR_ioprio_set, sys_ioprio_set), // 289
+
+// LINX_(__NR_ioprio_get, sys_ioprio_get), // 290
+ LINX_(__NR_inotify_init, sys_inotify_init), // 291
+ LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 292
+ LINX_(__NR_inotify_rm_watch, sys_inotify_rm_watch), // 293
+// LINX_(__NR_migrate_pages, sys_migrate_pages), // 294
+
+ LINXY(__NR_openat, sys_openat), // 295
+ LINX_(__NR_mkdirat, sys_mkdirat), // 296
+ LINX_(__NR_mknodat, sys_mknodat), // 297
+ LINX_(__NR_fchownat, sys_fchownat), // 298
+ LINX_(__NR_futimesat, sys_futimesat), // 326 on arm
+
+ PLAXY(__NR_fstatat64, sys_fstatat64), // 300
+ LINX_(__NR_unlinkat, sys_unlinkat), // 301
+ LINX_(__NR_renameat, sys_renameat), // 302
+ LINX_(__NR_linkat, sys_linkat), // 303
+ LINX_(__NR_symlinkat, sys_symlinkat), // 304
+
+ LINX_(__NR_readlinkat, sys_readlinkat), //
+ LINX_(__NR_fchmodat, sys_fchmodat), //
+ LINX_(__NR_faccessat, sys_faccessat), //
+ PLAXY(__NR_shmat, wrap_sys_shmat), //305
+ PLAXY(__NR_shmdt, sys_shmdt), //306
+ PLAX_(__NR_shmget, sys_shmget), //307
+ PLAXY(__NR_shmctl, sys_shmctl), // 308
+// LINX_(__NR_pselect6, sys_pselect6), //
+// LINXY(__NR_ppoll, sys_ppoll), // 309
+
+// LINX_(__NR_unshare, sys_unshare), // 310
+ LINX_(__NR_set_robust_list, sys_set_robust_list), // 311
+ LINXY(__NR_get_robust_list, sys_get_robust_list), // 312
+// LINX_(__NR_splice, sys_ni_syscall), // 313
+// LINX_(__NR_sync_file_range, sys_sync_file_range), // 314
+
+// LINX_(__NR_tee, sys_ni_syscall), // 315
+// LINX_(__NR_vmsplice, sys_ni_syscall), // 316
+// LINX_(__NR_move_pages, sys_ni_syscall), // 317
+// LINX_(__NR_getcpu, sys_ni_syscall), // 318
+// LINXY(__NR_epoll_pwait, sys_epoll_pwait), // 319
+
+ LINX_(__NR_utimensat, sys_utimensat), // 320
+ LINXY(__NR_signalfd, sys_signalfd), // 321
+ LINXY(__NR_timerfd_create, sys_timerfd_create), // 322
+ LINX_(__NR_eventfd, sys_eventfd), // 323
+// LINX_(__NR_fallocate, sys_ni_syscall), // 324
+ LINXY(__NR_timerfd_settime, sys_timerfd_settime), // 325
+ LINXY(__NR_timerfd_gettime, sys_timerfd_gettime) // 326
+};
+
+
+/* These are not in the main table because there indexes are not small
+ integers, but rather values close to one million. So their
+ inclusion would force the main table to be huge (about 8 MB). */
+
+static SyscallTableEntry ste___ARM_set_tls
+ = { WRAPPER_PRE_NAME(arm_linux,sys_set_tls), NULL };
+
+static SyscallTableEntry ste___ARM_cacheflush
+ = { WRAPPER_PRE_NAME(arm_linux,sys_cacheflush), NULL };
+
+SyscallTableEntry* ML_(get_linux_syscall_entry) ( UInt sysno )
+{
+ const UInt syscall_main_table_size
+ = sizeof(syscall_main_table) / sizeof(syscall_main_table[0]);
+
+ /* Is it in the contiguous initial section of the table? */
+ if (sysno < syscall_main_table_size) {
+ SyscallTableEntry* sys = &syscall_main_table[sysno];
+ if (sys->before == NULL)
+ return NULL; /* no entry */
+ else
+ return sys;
+ }
+
+ /* Check if it's one of the out-of-line entries. */
+ switch (sysno) {
+ case __NR_ARM_set_tls: return &ste___ARM_set_tls;
+ case __NR_ARM_cacheflush: return &ste___ARM_cacheflush;
+ default: break;
+ }
+
+ /* Can't find a wrapper */
+ return NULL;
+}
+
+#endif // defined(VGP_arm_linux)
+
+/*--------------------------------------------------------------------*/
+/*--- end syswrap-arm-linux.c ---*/
+/*--------------------------------------------------------------------*/
: "=m" (tst->status)
: "r" (vgts_empty), "n" (__NR_exit), "m" (tst->os_state.exitcode));
}
+#elif defined(VGP_arm_linux)
+ asm volatile (
+ "str %1, %0\n" /* set tst->status = VgTs_Empty */
+ "mov r7, %2\n" /* set %r7 = __NR_exit */
+ "ldr r0, %3\n" /* set %r0 = tst->os_state.exitcode */
+ "svc 0x00000000\n" /* exit(tst->os_state.exitcode) */
+ : "=m" (tst->status)
+ : "r" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode));
#else
# error Unknown platform
#endif
/* Since this is the fork() form of clone, we don't need all that
VG_(clone) stuff */
-#if defined(VGP_x86_linux) || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux)
+#if defined(VGP_x86_linux) \
+ || defined(VGP_ppc32_linux) || defined(VGP_ppc64_linux) \
+ || defined(VGP_arm_linux)
res = VG_(do_syscall5)( __NR_clone, flags,
(UWord)NULL, (UWord)parent_tidptr,
(UWord)NULL, (UWord)child_tidptr );
canonical->arg7 = 0;
canonical->arg8 = 0;
+#elif defined(VGP_arm_linux)
+ VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
+ canonical->sysno = gst->guest_R7;
+ canonical->arg1 = gst->guest_R0;
+ canonical->arg2 = gst->guest_R1;
+ canonical->arg3 = gst->guest_R2;
+ canonical->arg4 = gst->guest_R3;
+ canonical->arg5 = gst->guest_R4;
+ canonical->arg6 = gst->guest_R5;
+ canonical->arg7 = 0;
+ canonical->arg8 = 0;
+
#elif defined(VGP_ppc32_aix5)
VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
canonical->sysno = gst->guest_GPR2;
gst->guest_GPR7 = canonical->arg5;
gst->guest_GPR8 = canonical->arg6;
+#elif defined(VGP_arm_linux)
+ VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
+ gst->guest_R7 = canonical->sysno;
+ gst->guest_R0 = canonical->arg1;
+ gst->guest_R1 = canonical->arg2;
+ gst->guest_R2 = canonical->arg3;
+ gst->guest_R3 = canonical->arg4;
+ gst->guest_R4 = canonical->arg5;
+ gst->guest_R5 = canonical->arg6;
+
#elif defined(VGP_ppc32_aix5)
VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
gst->guest_GPR2 = canonical->sysno;
canonical->sres = VG_(mk_SysRes_ppc64_linux)( gst->guest_GPR3, cr0so );
canonical->what = SsComplete;
+# elif defined(VGP_arm_linux)
+ VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
+ canonical->sres = VG_(mk_SysRes_arm_linux)( gst->guest_R0 );
+ canonical->what = SsComplete;
+
# elif defined(VGP_ppc32_aix5)
VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
canonical->sres = VG_(mk_SysRes_ppc32_aix5)( gst->guest_GPR3,
if (sr_isError(canonical->sres)) {
/* This isn't exactly right, in that really a Failure with res
not in the range 1 .. 4095 is unrepresentable in the
- Linux-x86 scheme. Oh well. */
+ Linux-amd64 scheme. Oh well. */
gst->guest_RAX = - (Long)sr_Err(canonical->sres);
} else {
gst->guest_RAX = sr_Res(canonical->sres);
VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
OFFSET_ppc64_CR0_0, sizeof(UChar) );
+# elif defined(VGP_arm_linux)
+ VexGuestARMState* gst = (VexGuestARMState*)gst_vanilla;
+ vg_assert(canonical->what == SsComplete);
+ if (sr_isError(canonical->sres)) {
+ /* This isn't exactly right, in that really a Failure with res
+ not in the range 1 .. 4095 is unrepresentable in the
+ Linux-arm scheme. Oh well. */
+ gst->guest_R0 = - (Int)sr_Err(canonical->sres);
+ } else {
+ gst->guest_R0 = sr_Res(canonical->sres);
+ }
+ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
+ OFFSET_arm_R0, sizeof(UWord) );
+
# elif defined(VGP_ppc32_aix5)
VexGuestPPC32State* gst = (VexGuestPPC32State*)gst_vanilla;
vg_assert(canonical->what == SsComplete);
layout->uu_arg7 = -1; /* impossible value */
layout->uu_arg8 = -1; /* impossible value */
+#elif defined(VGP_arm_linux)
+ layout->o_sysno = OFFSET_arm_R7;
+ layout->o_arg1 = OFFSET_arm_R0;
+ layout->o_arg2 = OFFSET_arm_R1;
+ layout->o_arg3 = OFFSET_arm_R2;
+ layout->o_arg4 = OFFSET_arm_R3;
+ layout->o_arg5 = OFFSET_arm_R4;
+ layout->o_arg6 = OFFSET_arm_R5;
+ layout->uu_arg7 = -1; /* impossible value */
+ layout->uu_arg8 = -1; /* impossible value */
+
#elif defined(VGP_ppc32_aix5)
layout->o_sysno = OFFSET_ppc32_GPR2;
layout->o_arg1 = OFFSET_ppc32_GPR3;
const SyscallTableEntry* sys = NULL;
# if defined(VGO_linux)
- if (syscallno < ML_(syscall_table_size) &&
- ML_(syscall_table)[syscallno].before != NULL)
- sys = &ML_(syscall_table)[syscallno];
+ sys = ML_(get_linux_syscall_entry)( syscallno );
# elif defined(VGP_ppc32_aix5)
sys = ML_(get_ppc32_aix5_syscall_entry) ( syscallno );
vg_assert(p[0] == 0x44 && p[1] == 0x0 && p[2] == 0x0 && p[3] == 0x2);
}
+#elif defined(VGP_arm_linux)
+ arch->vex.guest_R15 -= 4; // sizeof(arm instr)
+ {
+ UChar *p = (UChar*)arch->vex.guest_R15;
+
+ if ((p[3] & 0xF) != 0xF)
+ VG_(message)(Vg_DebugMsg,
+ "?! restarting over syscall that is not syscall at %#llx %02x %02x %02x %02x\n",
+ arch->vex.guest_R15 + 0ULL, p[0], p[1], p[2], p[3]);
+
+ vg_assert((p[3] & 0xF) == 0xF);
+ }
+
#elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
/* Hmm. This is problematic, because on AIX the kernel resumes
after a syscall at LR, not at the insn following SC. Hence
// arch/OS combination, eg. */* (generic), */Linux (Linux only), ?/?
// (unknown).
-const SyscallTableEntry ML_(syscall_table)[] = {
+static SyscallTableEntry syscall_table[] = {
//zz // (restart_syscall) // 0
GENX_(__NR_exit, sys_exit), // 1
GENX_(__NR_fork, sys_fork), // 2
LINXY(__NR_perf_counter_open, sys_perf_counter_open) // 336
};
-const UInt ML_(syscall_table_size) =
- sizeof(ML_(syscall_table)) / sizeof(ML_(syscall_table)[0]);
+SyscallTableEntry* ML_(get_linux_syscall_entry) ( UInt sysno )
+{
+ const UInt syscall_table_size
+ = sizeof(syscall_table) / sizeof(syscall_table[0]);
+
+ /* Is it in the contiguous initial section of the table? */
+ if (sysno < syscall_table_size) {
+ SyscallTableEntry* sys = &syscall_table[sysno];
+ if (sys->before == NULL)
+ return NULL; /* no entry */
+ else
+ return sys;
+ }
+
+ /* Can't find a wrapper */
+ return NULL;
+}
#endif // defined(VGP_x86_linux)
# undef UD2_1024
# undef UD2_PAGE
+/*---------------- ppc32-linux ----------------*/
+
+#elif defined(VGP_arm_linux)
+
+# define UD2_4 .word 0xFFFFFFFF
+# define UD2_16 UD2_4 ; UD2_4 ; UD2_4 ; UD2_4
+# define UD2_64 UD2_16 ; UD2_16 ; UD2_16 ; UD2_16
+# define UD2_256 UD2_64 ; UD2_64 ; UD2_64 ; UD2_64
+# define UD2_1024 UD2_256 ; UD2_256 ; UD2_256 ; UD2_256
+# define UD2_PAGE UD2_1024 ; UD2_1024 ; UD2_1024 ; UD2_1024
+
+ /* a leading page of unexecutable code */
+ UD2_PAGE
+
+.global VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+
+.global VG_(arm_linux_REDIR_FOR_strlen)
+VG_(arm_linux_REDIR_FOR_strlen):
+ mov r2, r0
+ ldrb r0, [r0, #0] @ zero_extendqisi2
+ @ lr needed for prologue
+ cmp r0, #0
+ bxeq lr
+ mov r0, #0
+.L5:
+ add r0, r0, #1
+ ldrb r3, [r0, r2] @ zero_extendqisi2
+ cmp r3, #0
+ bne .L5
+ bx lr
+ UD2_4
+
+//.global VG_(arm_linux_REDIR_FOR_index)
+//VG_(arm_linux_REDIR_FOR_index):
+// ldrb r3, [r0, #0] @ zero_extendqisi2
+// and r1, r1, #255
+// cmp r3, r1
+// @ lr needed for prologue
+// bne .L9
+// bx lr
+//.L12:
+// ldrb r3, [r0, #1]! @ zero_extendqisi2
+// cmp r3, r1
+// beq .L11
+//.L9:
+// cmp r3, #0
+// bne .L12
+// mov r0, #0
+// bx lr
+//.L11:
+// bx lr
+// UD2_4
+
+.global VG_(arm_linux_REDIR_FOR_memcpy)
+VG_(arm_linux_REDIR_FOR_memcpy):
+ stmfd sp!, {r4, r5, lr}
+ subs lr, r2, #0
+ mov r5, r0
+ beq .L2
+ cmp r0, r1
+ bls .L4
+ add r3, r0, lr
+ add r1, lr, r1
+ cmp lr, #3
+ sub r4, r3, #1
+ sub r0, r1, #1
+ ble .L28
+ sub ip, r3, #5
+ sub r1, r1, #5
+.L8:
+ ldrb r3, [r1, #4] @ zero_extendqisi2
+ sub lr, lr, #4
+ strb r3, [ip, #4]
+ ldrb r2, [r1, #3] @ zero_extendqisi2
+ cmp lr, #3
+ strb r2, [ip, #3]
+ ldrb r3, [r1, #2] @ zero_extendqisi2
+ mov r4, ip
+ strb r3, [ip, #2]
+ ldrb r2, [r1, #1] @ zero_extendqisi2
+ mov r0, r1
+ strb r2, [ip, #1]
+ sub r1, r1, #4
+ sub ip, ip, #4
+ bgt .L8
+ cmp lr, #0
+ beq .L2
+.L28:
+ sub r2, lr, #1
+.L21:
+ sub r2, r2, #1
+ ldrb r3, [r0], #-1 @ zero_extendqisi2
+ cmn r2, #1
+ strb r3, [r4], #-1
+ bne .L21
+.L2:
+ mov r0, r5
+ ldmfd sp!, {r4, r5, pc}
+.L4:
+ bcs .L2
+ cmp lr, #3
+ mov ip, r0
+ ble .L29
+.L19:
+ ldrb r3, [r1, #0] @ zero_extendqisi2
+ sub lr, lr, #4
+ strb r3, [ip, #0]
+ ldrb r2, [r1, #1] @ zero_extendqisi2
+ cmp lr, #3
+ strb r2, [ip, #1]
+ ldrb r3, [r1, #2] @ zero_extendqisi2
+ strb r3, [ip, #2]
+ ldrb r2, [r1, #3] @ zero_extendqisi2
+ add r1, r1, #4
+ strb r2, [ip, #3]
+ add ip, ip, #4
+ bgt .L19
+ cmp lr, #0
+ beq .L2
+.L29:
+ sub r2, lr, #1
+.L20:
+ sub r2, r2, #1
+ ldrb r3, [r1], #1 @ zero_extendqisi2
+ cmn r2, #1
+ strb r3, [ip], #1
+ bne .L20
+ mov r0, r5
+ ldmfd sp!, {r4, r5, pc}
+ UD2_4
+
+.global VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
+ /* and a trailing page of unexecutable code */
+ UD2_PAGE
+
+# undef UD2_4
+# undef UD2_16
+# undef UD2_64
+# undef UD2_256
+# undef UD2_1024
+# undef UD2_PAGE
+
/*---------------- ppc32-aix5 ----------------*/
#else
#if defined(VGP_ppc32_aix5)
#if defined(VGO_linux)
/* Let the linker know we don't need an executable stack */
-.section .note.GNU-stack,"",@progbits
+# if defined(VGP_arm_linux)
+ .section .note.GNU-stack,"",%progbits
+# else
+ .section .note.GNU-stack,"",@progbits
+# endif
#endif
/*--------------------------------------------------------------------*/
VG_(clo_profile_flags) > 0
? (void*) &VG_(run_innerloop__dispatch_profiled)
: (void*) &VG_(run_innerloop__dispatch_unprofiled);
-# elif defined(VGA_ppc32) || defined(VGA_ppc64)
+# elif defined(VGA_ppc32) || defined(VGA_ppc64) \
+ || defined(VGA_arm)
vta.dispatch = NULL;
# else
# error "Unknown arch"
#include "pub_core_aspacemgr.h"
#include "pub_core_mallocfree.h" // VG_(out_of_memory_NORETURN)
+// JRS FIXME get rid of this somehow
+#if defined(VGP_arm_linux)
+# include "pub_core_vkiscnums.h" // __ARM_NR_cacheflush
+# include "pub_core_syscall.h" // VG_(do_syscallN)
+#endif
+
+
/* #define DEBUG_TRANSTAB */
vg_assert(cls == 32 || cls == 64 || cls == 128);
startaddr &= ~(cls - 1);
- for (addr = startaddr; addr < endaddr; addr += cls)
- asm volatile("dcbst 0,%0" : : "r" (addr));
- asm volatile("sync");
- for (addr = startaddr; addr < endaddr; addr += cls)
- asm volatile("icbi 0,%0" : : "r" (addr));
- asm volatile("sync; isync");
+ for (addr = startaddr; addr < endaddr; addr += cls) {
+ __asm__ __volatile__("dcbst 0,%0" : : "r" (addr));
+ }
+ __asm__ __volatile__("sync");
+ for (addr = startaddr; addr < endaddr; addr += cls) {
+ __asm__ __volatile__("icbi 0,%0" : : "r" (addr));
+ }
+ __asm__ __volatile__("sync; isync");
# elif defined(VGA_x86)
/* no need to do anything, hardware provides coherence */
# elif defined(VGA_amd64)
/* no need to do anything, hardware provides coherence */
+# elif defined(VGP_arm_linux)
+ /* ARM cache flushes are privileged, so we must defer to the kernel. */
+ Addr startaddr = (Addr) ptr;
+ Addr endaddr = startaddr + nbytes;
+ VG_(do_syscall2)(__NR_ARM_cacheflush, startaddr, endaddr);
+
# else
# error "Unknown ARCH"
# endif
# include "libvex_guest_ppc32.h"
#elif defined(VGA_ppc64)
# include "libvex_guest_ppc64.h"
+#elif defined(VGA_arm)
+# include "libvex_guest_arm.h"
#else
# error Unknown arch
#endif
// For jmp_buf
#include <setjmp.h>
+
+/* ---------------------------------------------------------------------
+ A struct to hold starting values for stack unwinding.
+ ------------------------------------------------------------------ */
+
+/* This really shouldn't be here. But putting it elsewhere leads to a
+ veritable swamp of new module cycles. */
+
+/* To support CFA-based stack unwinding, and stack unwinding in
+ general, we need to be able to get hold of the values of specific
+ registers, in order to start the unwinding process. This is
+ unavoidably arch and platform dependent. Here is a struct which
+ holds the relevant values. All platforms must have a program
+ counter and a stack pointer register, but the other fields (frame
+ pointer? link register? misc other regs?) are ad-hoc. Note, the
+ common fields are 64-bit, so as to make this host-independent. */
+
+typedef
+ struct {
+ ULong r_pc; /* x86:EIP, amd64:RIP, ppc:CIA, arm:R15 */
+ ULong r_sp; /* x86:ESP, amd64:RSP, ppc:R1, arm:R13 */
+ union {
+ struct {
+ UInt r_ebp;
+ } X86;
+ struct {
+ ULong r_rbp;
+ } AMD64;
+ struct {
+ UInt r_lr;
+ } PPC32;
+ struct {
+ ULong r_lr;
+ } PPC64;
+ struct {
+ UInt r14;
+ UInt r12;
+ UInt r11;
+ } ARM;
+ } misc;
+ }
+ UnwindStartRegs;
+
+
#endif // __PUB_CORE_BASICS_H
/*--------------------------------------------------------------------*/
__attribute__ ((__noreturn__))
extern void VG_(core_panic) ( Char* str );
__attribute__ ((__noreturn__))
-extern void VG_(core_panic_at) ( Char* str,
- Addr ip, Addr sp, Addr fp, Addr lr );
+extern void VG_(core_panic_at) ( Char* str, UnwindStartRegs* );
/* Called when some unhandleable client behaviour is detected.
Prints a msg and aborts. */
# define VG_ELF_MACHINE EM_PPC64
# define VG_ELF_CLASS ELFCLASS64
# define VG_PLAT_USES_PPCTOC 1
+#elif defined(VGP_arm_linux)
+# define VG_ELF_DATA2XXX ELFDATA2LSB
+# define VG_ELF_MACHINE EM_ARM
+# define VG_ELF_CLASS ELFCLASS32
+# undef VG_PLAT_USES_PPCTOC
#elif defined(VGO_aix5)
# undef VG_ELF_DATA2XXX
# undef VG_ELF_MACHINE
# define VG_INSTR_PTR guest_CIA
# define VG_STACK_PTR guest_GPR1
# define VG_FRAME_PTR guest_GPR1 // No frame ptr for PPC
+#elif defined(VGA_arm)
+# define VG_INSTR_PTR guest_R15
+# define VG_STACK_PTR guest_R13
+# define VG_FRAME_PTR guest_R11
#else
# error Unknown arch
#endif
#define VG_O_INSTR_PTR (offsetof(VexGuestArchState, VG_INSTR_PTR))
+//-------------------------------------------------------------
+// Get hold of the values needed for a stack unwind, for the specified
+// (client) thread.
+void VG_(get_UnwindStartRegs) ( /*OUT*/UnwindStartRegs* regs,
+ ThreadId tid );
+
+
//-------------------------------------------------------------
/* Details about the capabilities of the underlying (host) CPU. These
details are acquired by (1) enquiring with the CPU at startup, or
// greater than 8.
#if defined(VGP_x86_linux) || \
defined(VGP_ppc32_linux) || \
- defined(VGP_ppc32_aix5)
+ defined(VGP_arm_linux)
# define VG_MIN_MALLOC_SZB 8
// Nb: We always use 16 bytes for Darwin, even on 32-bits, so it can be used
// for any AltiVec- or SSE-related type. This matches the Darwin libc.
#elif defined(VGP_amd64_linux) || \
defined(VGP_ppc64_linux) || \
defined(VGP_ppc64_aix5) || \
+ defined(VGP_ppc32_aix5) || \
defined(VGP_x86_darwin) || \
defined(VGP_amd64_darwin)
# define VG_MIN_MALLOC_SZB 16
UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
/*OUT*/Addr* ips, UInt n_ips,
/*OUT*/Addr* sps, /*OUT*/Addr* fps,
- Addr ip, Addr sp, Addr fp, Addr lr,
- Addr fp_min, Addr fp_max_orig );
+ UnwindStartRegs* startRegs,
+ Addr fp_max_orig );
#endif // __PUB_CORE_STACKTRACE_H
extern SysRes VG_(mk_SysRes_amd64_linux) ( Long val );
extern SysRes VG_(mk_SysRes_ppc32_linux) ( UInt val, UInt cr0so );
extern SysRes VG_(mk_SysRes_ppc64_linux) ( ULong val, ULong cr0so );
+extern SysRes VG_(mk_SysRes_arm_linux) ( Int val );
extern SysRes VG_(mk_SysRes_ppc32_aix5) ( UInt val, UInt err );
extern SysRes VG_(mk_SysRes_ppc64_aix5) ( ULong val, ULong err );
extern SysRes VG_(mk_SysRes_x86_darwin) ( UChar scclass, Bool isErr,
typedef VexGuestPPC32State VexGuestArchState;
#elif defined(VGA_ppc64)
typedef VexGuestPPC64State VexGuestArchState;
+#elif defined(VGA_arm)
+ typedef VexGuestARMState VexGuestArchState;
#else
# error Unknown architecture
#endif
extern void VG_(ppctoc_magic_redirect_return_stub);
#endif
+#if defined(VGP_arm_linux)
+extern UInt VG_(arm_linux_REDIR_FOR_strlen)( void* );
+//extern void* VG_(arm_linux_REDIR_FOR_index) ( void*, Int );
+extern void* VG_(arm_linux_REDIR_FOR_memcpy)( void*, void*, Int );
+#endif
+
#if defined(VGP_ppc32_aix5)
/* A label (sans dot) marking the client start point for ppc32_aix5.
This function is entered with r3 holding a pointer to the
On ppc32/ppc64, the bottom two bits of instruction addresses are
zero, which means that function causes only 1/4 of the entries to
ever be used. So instead the function is '(address >>u
- 2)[VG_TT_FAST_BITS-1 : 0]' on those targets. */
+ 2)[VG_TT_FAST_BITS-1 : 0]' on those targets.
+
+ On ARM we do like ppc32/ppc64, although that will have to be
+ revisited when we come to implement Thumb. */
#define VG_TT_FAST_BITS 15
#define VG_TT_FAST_SIZE (1 << VG_TT_FAST_BITS)
like a good place to put it. */
#if defined(VGA_x86) || defined(VGA_amd64)
# define VG_TT_FAST_HASH(_addr) ((((UWord)(_addr)) ) & VG_TT_FAST_MASK)
-#elif defined(VGA_ppc32) || defined(VGA_ppc64)
+#elif defined(VGA_ppc32) || defined(VGA_ppc64) || defined(VGA_arm)
# define VG_TT_FAST_HASH(_addr) ((((UWord)(_addr)) >> 2) & VG_TT_FAST_MASK)
#else
# error "VG_TT_FAST_HASH: unknown platform"
TBD
+arm-linux
+~~~~~~~~~
+
+Reg Callee Arg
+Name Saves? Reg? Comment Vex-uses?
+--------------------------------------------------------------
+r0 int#1 int[31:0] retreg? avail
+r1 int#2 int[63:32] retreg? avail
+r2 int#3 avail
+r3 int#4 avail
+r4 y avail
+r5 y avail
+r6 y avail
+r7 y avail
+r8 y GSP
+r9 y (but only on Linux; not in general) avail
+r10 y avail
+r11 y avail
+r12 possibly used by linker? unavail
+r13(sp) unavail
+r14(lr) unavail
+r15(pc) unavail
+
+VFP: d8-d15 are callee-saved
+r12 (IP) is probably available for use as a caller-saved
+register; but instead we use it as an intermediate for
+holding the address for F32/F64 spills, since the VFP load/store
+insns have reg+offset forms for offsets only up to 1020, which
+often isn't enough.
+
+
ppc32-aix5
~~~~~~~~~~
#undef VG_BIGENDIAN
#undef VG_LITTLEENDIAN
-#if defined(VGA_x86) || defined(VGA_amd64)
+#if defined(VGA_x86) || defined(VGA_amd64) || defined (VGA_arm)
# define VG_LITTLEENDIAN 1
#elif defined(VGA_ppc32) || defined(VGA_ppc64)
# define VG_BIGENDIAN 1
/* Regparmness */
#if defined(VGA_x86)
# define VG_REGPARM(n) __attribute__((regparm(n)))
-#elif defined(VGA_amd64) || defined(VGA_ppc32) || defined(VGA_ppc64)
+#elif defined(VGA_amd64) || defined(VGA_ppc32) \
+ || defined(VGA_ppc64) || defined(VGA_arm)
# define VG_REGPARM(n) /* */
#else
# error Unknown arch
# define VG_CLREQ_SZB 14 // length of a client request, may
// be larger than VG_MAX_INSTR_SZB
# define VG_STACK_REDZONE_SZB 0 // number of addressable bytes below %RSP
+
#elif defined(VGP_amd64_linux)
# define VG_MIN_INSTR_SZB 1
# define VG_MAX_INSTR_SZB 16
# define VG_CLREQ_SZB 19
# define VG_STACK_REDZONE_SZB 128
+
#elif defined(VGP_ppc32_linux)
# define VG_MIN_INSTR_SZB 4
# define VG_MAX_INSTR_SZB 4
# define VG_CLREQ_SZB 20
# define VG_STACK_REDZONE_SZB 0
+
#elif defined(VGP_ppc64_linux)
# define VG_MIN_INSTR_SZB 4
# define VG_MAX_INSTR_SZB 4
# define VG_CLREQ_SZB 20
# define VG_STACK_REDZONE_SZB 288 // number of addressable bytes below R1
- // from 64-bit PowerPC ELF ABI Supplement 1.7
+ // from 64-bit PowerPC ELF ABI
+ // Supplement 1.7
+
+#elif defined(VGP_arm_linux)
+# define VG_MIN_INSTR_SZB 4
+# define VG_MAX_INSTR_SZB 4
+# define VG_CLREQ_SZB 28
+# define VG_STACK_REDZONE_SZB 0
+
#elif defined(VGP_ppc32_aix5)
# define VG_MIN_INSTR_SZB 4
# define VG_MAX_INSTR_SZB 4
8-alignment of the area to be messed with. So let's just say
224 instead. Gdb has a similar kludge. */
# define VG_STACK_REDZONE_SZB 224
+
#elif defined(VGP_ppc64_aix5)
# define VG_MIN_INSTR_SZB 4
# define VG_MAX_INSTR_SZB 4
# define VG_CLREQ_SZB 20
# define VG_STACK_REDZONE_SZB 288 // is this right?
+
#elif defined(VGP_x86_darwin)
# define VG_MIN_INSTR_SZB 1 // min length of native instruction
# define VG_MAX_INSTR_SZB 16 // max length of native instruction
# define VG_CLREQ_SZB 14 // length of a client request, may
// be larger than VG_MAX_INSTR_SZB
# define VG_STACK_REDZONE_SZB 0 // number of addressable bytes below %RSP
+
#elif defined(VGP_amd64_darwin)
# define VG_MIN_INSTR_SZB 1
# define VG_MAX_INSTR_SZB 16
# define VG_CLREQ_SZB 19
# define VG_STACK_REDZONE_SZB 128
+
#else
# error Unknown platform
#endif
#elif defined(VGP_ppc64_linux)
# include "vki/vki-scnums-ppc64-linux.h"
+#elif defined(VGP_arm_linux)
+# include "vki/vki-scnums-arm-linux.h"
+
#elif defined(VGP_ppc32_aix5) || defined(VGP_ppc64_aix5)
// Nothing: vki-scnums-aix5.h only contains stuff suitable for inclusion
// in C files, not asm files. So unlike all the other
identifying architectures, which are different to the ones we use
within the rest of Valgrind. Note, __powerpc__ is active for both
32 and 64-bit PPC, whereas __powerpc64__ is only active for the
- latter (on Linux, that is). */
+ latter (on Linux, that is).
+
+ Misc note: how to find out what's predefined in gcc by default:
+ gcc -Wp,-dM somefile.c
+*/
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
#undef PLAT_ppc32_aix5
#undef PLAT_ppc64_aix5
-
#if defined(_AIX) && defined(__64BIT__)
# define PLAT_ppc64_aix5 1
#elif defined(_AIX) && !defined(__64BIT__)
# define PLAT_x86_darwin 1
#elif defined(__APPLE__) && defined(__x86_64__)
# define PLAT_amd64_darwin 1
-#elif defined(__i386__)
+#elif defined(__linux__) && defined(__i386__)
# define PLAT_x86_linux 1
-#elif defined(__x86_64__)
+#elif defined(__linux__) && defined(__x86_64__)
# define PLAT_amd64_linux 1
-#elif defined(__powerpc__) && !defined(__powerpc64__)
+#elif defined(__linux__) && defined(__powerpc__) && !defined(__powerpc64__)
# define PLAT_ppc32_linux 1
-#elif defined(__powerpc__) && defined(__powerpc64__)
+#elif defined(__linux__) && defined(__powerpc__) && defined(__powerpc64__)
# define PLAT_ppc64_linux 1
+#elif defined(__linux__) && defined(__arm__)
+# define PLAT_arm_linux 1
#else
/* If we're not compiling for our target platform, don't generate
any inline asms. */
#endif /* PLAT_ppc64_linux */
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "mov r12, r12, ror #3 ; mov r12, r12, ror #13 \n\t" \
+ "mov r12, r12, ror #29 ; mov r12, r12, ror #19 \n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST( \
+ _zzq_rlval, _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ { volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile("mov r3, %1\n\t" /*default*/ \
+ "mov r4, %2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* R3 = client_request ( R4 ) */ \
+ "orr r10, r10, r10\n\t" \
+ "mov %0, r3" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "cc","memory", "r3", "r4"); \
+ _zzq_rlval = _zzq_result; \
+ }
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* R3 = guest_NRADDR */ \
+ "orr r11, r11, r11\n\t" \
+ "mov %0, r3" \
+ : "=r" (__addr) \
+ : \
+ : "cc", "memory", "r3" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir *%R4 */ \
+ "orr r12, r12, r12\n\t"
+
+#endif /* PLAT_arm_linux */
+
/* ------------------------ ppc32-aix5 ------------------------- */
#if defined(PLAT_ppc32_aix5)
#endif /* PLAT_ppc64_linux */
+/* ------------------------- arm-linux ------------------------- */
+
+#if defined(PLAT_arm_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
+
+/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #4 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #8 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #12 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "push {r0, r1, r2, r3} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #16 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #20 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #40] \n\t" \
+ "push {r0} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #24 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "push {r0, r1} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #28 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "ldr r0, [%1, #40] \n\t" \
+ "ldr r1, [%1, #44] \n\t" \
+ "ldr r2, [%1, #48] \n\t" \
+ "push {r0, r1, r2} \n\t" \
+ "ldr r0, [%1, #20] \n\t" \
+ "ldr r1, [%1, #24] \n\t" \
+ "ldr r2, [%1, #28] \n\t" \
+ "ldr r3, [%1, #32] \n\t" \
+ "ldr r4, [%1, #36] \n\t" \
+ "push {r0, r1, r2, r3, r4} \n\t" \
+ "ldr r0, [%1, #4] \n\t" \
+ "ldr r1, [%1, #8] \n\t" \
+ "ldr r2, [%1, #12] \n\t" \
+ "ldr r3, [%1, #16] \n\t" \
+ "ldr r4, [%1] \n\t" /* target->r4 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ "add sp, sp, #32 \n\t" \
+ "mov %0, r0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_arm_linux */
+
/* ------------------------ ppc32-aix5 ------------------------- */
#if defined(PLAT_ppc32_aix5)
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64_linux
+#undef PLAT_arm_linux
#undef PLAT_ppc32_aix5
#undef PLAT_ppc64_aix5
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- arm/Linux-specific kernel interface. vki-arm-linux.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2008 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VKI_ARM_LINUX_H
+#define __VKI_ARM_LINUX_H
+
+// arm is little-endian.
+#define VKI_LITTLE_ENDIAN 1
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/types.h
+//----------------------------------------------------------------------
+
+typedef unsigned char __vki_u8;
+
+typedef __signed__ short __vki_s16;
+typedef unsigned short __vki_u16;
+
+typedef __signed__ int __vki_s32;
+typedef unsigned int __vki_u32;
+
+typedef __signed__ long long __vki_s64;
+typedef unsigned long long __vki_u64;
+
+typedef unsigned short vki_u16;
+
+typedef unsigned int vki_u32;
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/page.h
+//----------------------------------------------------------------------
+
+/* PAGE_SHIFT determines the page size */
+#define VKI_PAGE_SHIFT 12
+#define VKI_PAGE_SIZE (1UL << VKI_PAGE_SHIFT)
+#define VKI_MAX_PAGE_SHIFT VKI_PAGE_SHIFT
+#define VKI_MAX_PAGE_SIZE VKI_PAGE_SIZE
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/signal.h
+//----------------------------------------------------------------------
+
+#define VKI_MINSIGSTKSZ 2048
+
+#define VKI_SIG_BLOCK 0 /* for blocking signals */
+#define VKI_SIG_UNBLOCK 1 /* for unblocking signals */
+#define VKI_SIG_SETMASK 2 /* for setting the signal mask */
+
+/* Type of a signal handler. */
+typedef void __vki_signalfn_t(int);
+typedef __vki_signalfn_t __user *__vki_sighandler_t;
+
+typedef void __vki_restorefn_t(void);
+typedef __vki_restorefn_t __user *__vki_sigrestore_t;
+
+#define VKI_SIG_DFL ((__vki_sighandler_t)0) /* default signal handling */
+#define VKI_SIG_IGN ((__vki_sighandler_t)1) /* ignore signal */
+
+#define _VKI_NSIG 64
+#define _VKI_NSIG_BPW 32
+#define _VKI_NSIG_WORDS (_VKI_NSIG / _VKI_NSIG_BPW)
+
+typedef unsigned long vki_old_sigset_t; /* at least 32 bits */
+
+typedef struct {
+ unsigned long sig[_VKI_NSIG_WORDS];
+} vki_sigset_t;
+
+#define VKI_SIGHUP 1
+#define VKI_SIGINT 2
+#define VKI_SIGQUIT 3
+#define VKI_SIGILL 4
+#define VKI_SIGTRAP 5
+#define VKI_SIGABRT 6
+//#define VKI_SIGIOT 6
+#define VKI_SIGBUS 7
+#define VKI_SIGFPE 8
+#define VKI_SIGKILL 9
+#define VKI_SIGUSR1 10
+#define VKI_SIGSEGV 11
+#define VKI_SIGUSR2 12
+#define VKI_SIGPIPE 13
+#define VKI_SIGALRM 14
+#define VKI_SIGTERM 15
+#define VKI_SIGSTKFLT 16
+#define VKI_SIGCHLD 17
+#define VKI_SIGCONT 18
+#define VKI_SIGSTOP 19
+#define VKI_SIGTSTP 20
+#define VKI_SIGTTIN 21
+#define VKI_SIGTTOU 22
+#define VKI_SIGURG 23
+#define VKI_SIGXCPU 24
+#define VKI_SIGXFSZ 25
+#define VKI_SIGVTALRM 26
+#define VKI_SIGPROF 27
+#define VKI_SIGWINCH 28
+#define VKI_SIGIO 29
+#define VKI_SIGPWR 30
+#define VKI_SIGSYS 31
+#define VKI_SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define VKI_SIGRTMIN 32
+// [[This was (_NSIG-1) in 2.4.X... not sure if it matters.]]
+#define VKI_SIGRTMAX _VKI_NSIG
+
+#define VKI_SA_NOCLDSTOP 0x00000001u
+#define VKI_SA_NOCLDWAIT 0x00000002u
+#define VKI_SA_SIGINFO 0x00000004u
+#define VKI_SA_ONSTACK 0x08000000u
+#define VKI_SA_RESTART 0x10000000u
+#define VKI_SA_NODEFER 0x40000000u
+#define VKI_SA_RESETHAND 0x80000000u
+
+#define VKI_SA_NOMASK VKI_SA_NODEFER
+#define VKI_SA_ONESHOT VKI_SA_RESETHAND
+//#define VKI_SA_INTERRUPT 0x20000000 /* dummy -- ignored */
+
+#define VKI_SA_RESTORER 0x04000000
+
+#define VKI_SS_ONSTACK 1
+#define VKI_SS_DISABLE 2
+
+struct vki_old_sigaction {
+ // [[Nb: a 'k' prefix is added to "sa_handler" because
+ // bits/sigaction.h (which gets dragged in somehow via signal.h)
+ // #defines it as something else. Since that is done for glibc's
+ // purposes, which we don't care about here, we use our own name.]]
+ __vki_sighandler_t ksa_handler;
+ vki_old_sigset_t sa_mask;
+ unsigned long sa_flags;
+ __vki_sigrestore_t sa_restorer;
+};
+
+struct vki_sigaction_base {
+ // [[See comment about extra 'k' above]]
+ __vki_sighandler_t ksa_handler;
+ unsigned long sa_flags;
+ __vki_sigrestore_t sa_restorer;
+ vki_sigset_t sa_mask; /* mask last for extensibility */
+};
+
+/* On Linux we use the same type for passing sigactions to
+ and from the kernel. Hence: */
+typedef struct vki_sigaction_base vki_sigaction_toK_t;
+typedef struct vki_sigaction_base vki_sigaction_fromK_t;
+
+
+typedef struct vki_sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ vki_size_t ss_size;
+} vki_stack_t;
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/sigcontext.h
+//----------------------------------------------------------------------
+
+struct _vki_fpreg {
+ unsigned short significand[4];
+ unsigned short exponent;
+};
+
+struct _vki_fpxreg {
+ unsigned short significand[4];
+ unsigned short exponent;
+ unsigned short padding[3];
+};
+
+struct _vki_xmmreg {
+ unsigned long element[4];
+};
+
+struct _vki_fpstate {
+ /* Regular FPU environment */
+ unsigned long cw;
+ unsigned long sw;
+ unsigned long tag;
+ unsigned long ipoff;
+ unsigned long cssel;
+ unsigned long dataoff;
+ unsigned long datasel;
+ struct _vki_fpreg _st[8];
+ unsigned short status;
+ unsigned short magic; /* 0xffff = regular FPU data only */
+
+ /* FXSR FPU environment */
+ unsigned long _fxsr_env[6]; /* FXSR FPU env is ignored */
+ unsigned long mxcsr;
+ unsigned long reserved;
+ struct _vki_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
+ struct _vki_xmmreg _xmm[8];
+ unsigned long padding[56];
+};
+
+struct vki_sigcontext {
+ unsigned long trap_no;
+ unsigned long error_code;
+ unsigned long oldmask;
+ unsigned long arm_r0;
+ unsigned long arm_r1;
+ unsigned long arm_r2;
+ unsigned long arm_r3;
+ unsigned long arm_r4;
+ unsigned long arm_r5;
+ unsigned long arm_r6;
+ unsigned long arm_r7;
+ unsigned long arm_r8;
+ unsigned long arm_r9;
+ unsigned long arm_r10;
+ unsigned long arm_fp;
+ unsigned long arm_ip;
+ unsigned long arm_sp;
+ unsigned long arm_lr;
+ unsigned long arm_pc;
+ unsigned long arm_cpsr;
+ unsigned long fault_address;
+};
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/mman.h
+//----------------------------------------------------------------------
+
+#define VKI_PROT_NONE 0x0 /* No page permissions */
+#define VKI_PROT_READ 0x1 /* page can be read */
+#define VKI_PROT_WRITE 0x2 /* page can be written */
+#define VKI_PROT_EXEC 0x4 /* page can be executed */
+#define VKI_PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
+#define VKI_PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
+
+#define VKI_MAP_SHARED 0x01 /* Share changes */
+#define VKI_MAP_PRIVATE 0x02 /* Changes are private */
+//#define VKI_MAP_TYPE 0x0f /* Mask for type of mapping */
+#define VKI_MAP_FIXED 0x10 /* Interpret addr exactly */
+#define VKI_MAP_ANONYMOUS 0x20 /* don't use a file */
+#define VKI_MAP_NORESERVE 0x4000 /* don't check for reservations */
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/fcntl.h
+//----------------------------------------------------------------------
+
+#define VKI_O_RDONLY 00
+#define VKI_O_WRONLY 01
+#define VKI_O_RDWR 02
+#define VKI_O_CREAT 0100 /* not fcntl */
+#define VKI_O_EXCL 0200 /* not fcntl */
+#define VKI_O_TRUNC 01000 /* not fcntl */
+#define VKI_O_APPEND 02000
+#define VKI_O_NONBLOCK 04000
+#define VKI_O_LARGEFILE 0100000
+
+#define VKI_AT_FDCWD -100
+
+#define VKI_F_DUPFD 0 /* dup */
+#define VKI_F_GETFD 1 /* get close_on_exec */
+#define VKI_F_SETFD 2 /* set/clear close_on_exec */
+#define VKI_F_GETFL 3 /* get file->f_flags */
+#define VKI_F_SETFL 4 /* set file->f_flags */
+#define VKI_F_GETLK 5
+#define VKI_F_SETLK 6
+#define VKI_F_SETLKW 7
+
+#define VKI_F_SETOWN 8 /* for sockets. */
+#define VKI_F_GETOWN 9 /* for sockets. */
+#define VKI_F_SETSIG 10 /* for sockets. */
+#define VKI_F_GETSIG 11 /* for sockets. */
+
+#define VKI_F_GETLK64 12 /* using 'struct flock64' */
+#define VKI_F_SETLK64 13
+#define VKI_F_SETLKW64 14
+
+/* for F_[GET|SET]FL */
+#define VKI_FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+#define VKI_F_LINUX_SPECIFIC_BASE 1024
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/resource.h
+//----------------------------------------------------------------------
+
+#define VKI_RLIMIT_DATA 2 /* max data size */
+#define VKI_RLIMIT_STACK 3 /* max stack size */
+#define VKI_RLIMIT_CORE 4 /* max core file size */
+#define VKI_RLIMIT_NOFILE 7 /* max number of open files */
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/socket.h
+//----------------------------------------------------------------------
+
+#define VKI_SOL_SOCKET 1
+
+#define VKI_SO_TYPE 3
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/sockios.h
+//----------------------------------------------------------------------
+
+#define VKI_SIOCSPGRP 0x8902
+#define VKI_SIOCGPGRP 0x8904
+#define VKI_SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
+#define VKI_SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/stat.h
+//----------------------------------------------------------------------
+
+struct vki_stat {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+ unsigned long st_rdev;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct vki_stat64 {
+ unsigned long long st_dev;
+ unsigned char __pad0[4];
+
+#define STAT64_HAS_BROKEN_ST_INO 1
+ unsigned long __st_ino;
+
+ unsigned int st_mode;
+ unsigned int st_nlink;
+
+ unsigned long st_uid;
+ unsigned long st_gid;
+
+ unsigned long long st_rdev;
+ unsigned char __pad3[4];
+
+ long long st_size;
+ unsigned long st_blksize;
+
+ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
+ unsigned long __pad4; /* future possible st_blocks high bits */
+
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+
+ unsigned long st_mtime;
+ unsigned int st_mtime_nsec;
+
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+
+ unsigned long long st_ino;
+};
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/statfs.h
+//----------------------------------------------------------------------
+
+// [[Nb: asm-i386/statfs.h just #include asm-generic/statfs.h directly]]
+struct vki_statfs {
+ __vki_u32 f_type;
+ __vki_u32 f_bsize;
+ __vki_u32 f_blocks;
+ __vki_u32 f_bfree;
+ __vki_u32 f_bavail;
+ __vki_u32 f_files;
+ __vki_u32 f_ffree;
+ __vki_kernel_fsid_t f_fsid;
+ __vki_u32 f_namelen;
+ __vki_u32 f_frsize;
+ __vki_u32 f_spare[5];
+};
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/termios.h
+//----------------------------------------------------------------------
+
+struct vki_winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define VKI_NCC 8
+struct vki_termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[VKI_NCC]; /* control characters */
+};
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/termbits.h
+//----------------------------------------------------------------------
+
+typedef unsigned char vki_cc_t;
+typedef unsigned int vki_tcflag_t;
+
+#define VKI_NCCS 19
+struct vki_termios {
+ vki_tcflag_t c_iflag; /* input mode flags */
+ vki_tcflag_t c_oflag; /* output mode flags */
+ vki_tcflag_t c_cflag; /* control mode flags */
+ vki_tcflag_t c_lflag; /* local mode flags */
+ vki_cc_t c_line; /* line discipline */
+ vki_cc_t c_cc[VKI_NCCS]; /* control characters */
+};
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/ioctl.h
+//----------------------------------------------------------------------
+
+#define _VKI_IOC_NRBITS 8
+#define _VKI_IOC_TYPEBITS 8
+#define _VKI_IOC_SIZEBITS 14
+#define _VKI_IOC_DIRBITS 2
+
+#define _VKI_IOC_NRMASK ((1 << _VKI_IOC_NRBITS)-1)
+#define _VKI_IOC_TYPEMASK ((1 << _VKI_IOC_TYPEBITS)-1)
+#define _VKI_IOC_SIZEMASK ((1 << _VKI_IOC_SIZEBITS)-1)
+#define _VKI_IOC_DIRMASK ((1 << _VKI_IOC_DIRBITS)-1)
+
+#define _VKI_IOC_NRSHIFT 0
+#define _VKI_IOC_TYPESHIFT (_VKI_IOC_NRSHIFT+_VKI_IOC_NRBITS)
+#define _VKI_IOC_SIZESHIFT (_VKI_IOC_TYPESHIFT+_VKI_IOC_TYPEBITS)
+#define _VKI_IOC_DIRSHIFT (_VKI_IOC_SIZESHIFT+_VKI_IOC_SIZEBITS)
+
+#define _VKI_IOC_NONE 0U
+#define _VKI_IOC_WRITE 1U
+#define _VKI_IOC_READ 2U
+
+#define _VKI_IOC(dir,type,nr,size) \
+ (((dir) << _VKI_IOC_DIRSHIFT) | \
+ ((type) << _VKI_IOC_TYPESHIFT) | \
+ ((nr) << _VKI_IOC_NRSHIFT) | \
+ ((size) << _VKI_IOC_SIZESHIFT))
+
+/* provoke compile error for invalid uses of size argument */
+extern unsigned int __vki_invalid_size_argument_for_IOC;
+#define _VKI_IOC_TYPECHECK(t) \
+ ((sizeof(t) == sizeof(t[1]) && \
+ sizeof(t) < (1 << _VKI_IOC_SIZEBITS)) ? \
+ sizeof(t) : __vki_invalid_size_argument_for_IOC)
+
+/* used to create numbers */
+#define _VKI_IO(type,nr) _VKI_IOC(_VKI_IOC_NONE,(type),(nr),0)
+#define _VKI_IOR(type,nr,size) _VKI_IOC(_VKI_IOC_READ,(type),(nr),(_VKI_IOC_TYPECHECK(size)))
+#define _VKI_IOW(type,nr,size) _VKI_IOC(_VKI_IOC_WRITE,(type),(nr),(_VKI_IOC_TYPECHECK(size)))
+#define _VKI_IOWR(type,nr,size) _VKI_IOC(_VKI_IOC_READ|_VKI_IOC_WRITE,(type),(nr),(_VKI_IOC_TYPECHECK(size)))
+
+/* used to decode ioctl numbers.. */
+#define _VKI_IOC_DIR(nr) (((nr) >> _VKI_IOC_DIRSHIFT) & _VKI_IOC_DIRMASK)
+#define _VKI_IOC_TYPE(nr) (((nr) >> _VKI_IOC_TYPESHIFT) & _VKI_IOC_TYPEMASK)
+#define _VKI_IOC_NR(nr) (((nr) >> _VKI_IOC_NRSHIFT) & _VKI_IOC_NRMASK)
+#define _VKI_IOC_SIZE(nr) (((nr) >> _VKI_IOC_SIZESHIFT) & _VKI_IOC_SIZEMASK)
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/ioctls.h
+//----------------------------------------------------------------------
+
+#define VKI_TCGETS 0x5401
+#define VKI_TCSETS 0x5402 /* Clashes with SNDCTL_TMR_START sound ioctl */
+#define VKI_TCSETSW 0x5403
+#define VKI_TCSETSF 0x5404
+#define VKI_TCGETA 0x5405
+#define VKI_TCSETA 0x5406
+#define VKI_TCSETAW 0x5407
+#define VKI_TCSETAF 0x5408
+#define VKI_TCSBRK 0x5409
+#define VKI_TCXONC 0x540A
+#define VKI_TCFLSH 0x540B
+#define VKI_TIOCSCTTY 0x540E
+#define VKI_TIOCGPGRP 0x540F
+#define VKI_TIOCSPGRP 0x5410
+#define VKI_TIOCOUTQ 0x5411
+#define VKI_TIOCGWINSZ 0x5413
+#define VKI_TIOCSWINSZ 0x5414
+#define VKI_TIOCMGET 0x5415
+#define VKI_TIOCMBIS 0x5416
+#define VKI_TIOCMBIC 0x5417
+#define VKI_TIOCMSET 0x5418
+#define VKI_FIONREAD 0x541B
+#define VKI_TIOCLINUX 0x541C
+#define VKI_FIONBIO 0x5421
+#define VKI_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define VKI_TIOCGPTN _VKI_IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define VKI_TIOCSPTLCK _VKI_IOW('T',0x31, int) /* Lock/unlock Pty */
+
+#define VKI_FIOASYNC 0x5452
+#define VKI_TIOCSERGETLSR 0x5459 /* Get line status register */
+
+#define VKI_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+//----------------------------------------------------------------------
+// From asm-generic/poll.h
+//----------------------------------------------------------------------
+
+/* These are specified by iBCS2 */
+#define VKI_POLLIN 0x0001
+
+struct vki_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/user.h
+//----------------------------------------------------------------------
+
+struct vki_user_i387_struct {
+ long cwd;
+ long swd;
+ long twd;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
+};
+
+struct vki_user_fxsr_struct {
+ unsigned short cwd;
+ unsigned short swd;
+ unsigned short twd;
+ unsigned short fop;
+ long fip;
+ long fcs;
+ long foo;
+ long fos;
+ long mxcsr;
+ long reserved;
+ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
+ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
+ long padding[56];
+};
+
+struct vki_user_regs_struct {
+ long uregs[18];
+};
+#define ARM_cpsr uregs[16]
+#define ARM_pc uregs[15]
+#define ARM_lr uregs[14]
+#define ARM_sp uregs[13]
+#define ARM_ip uregs[12]
+#define ARM_fp uregs[11]
+#define ARM_r10 uregs[10]
+#define ARM_r9 uregs[9]
+#define ARM_r8 uregs[8]
+#define ARM_r7 uregs[7]
+#define ARM_r6 uregs[6]
+#define ARM_r5 uregs[5]
+#define ARM_r4 uregs[4]
+#define ARM_r3 uregs[3]
+#define ARM_r2 uregs[2]
+#define ARM_r1 uregs[1]
+#define ARM_r0 uregs[0]
+#define ARM_ORIG_r0 uregs[17]
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/elf.h
+//----------------------------------------------------------------------
+
+typedef unsigned long vki_elf_greg_t;
+
+#define VKI_ELF_NGREG (sizeof (struct vki_user_regs_struct) / sizeof(vki_elf_greg_t))
+typedef vki_elf_greg_t vki_elf_gregset_t[VKI_ELF_NGREG];
+
+typedef struct vki_user_i387_struct vki_elf_fpregset_t;
+typedef struct vki_user_fxsr_struct vki_elf_fpxregset_t;
+
+#define VKI_AT_SYSINFO 32
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/ucontext.h
+//----------------------------------------------------------------------
+
+struct vki_ucontext {
+ unsigned long uc_flags;
+ struct vki_ucontext *uc_link;
+ vki_stack_t uc_stack;
+ struct vki_sigcontext uc_mcontext;
+ vki_sigset_t uc_sigmask; /* mask last for extensibility */
+ int __unused[32 - (sizeof (vki_sigset_t) / sizeof (int))];
+ unsigned long uc_regspace[128] __attribute__((__aligned__(8)));
+
+};
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/segment.h
+//----------------------------------------------------------------------
+
+#define VKI_GDT_ENTRY_TLS_ENTRIES 3
+#define VKI_GDT_ENTRY_TLS_MIN 6
+#define VKI_GDT_ENTRY_TLS_MAX (VKI_GDT_ENTRY_TLS_MIN + VKI_GDT_ENTRY_TLS_ENTRIES - 1)
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/ldt.h
+//----------------------------------------------------------------------
+
+/* [[Nb: This is the structure passed to the modify_ldt syscall. Just so as
+ to confuse and annoy everyone, this is _not_ the same as an
+ VgLdtEntry and has to be translated into such. The logic for doing
+ so, in vg_ldt.c, is copied from the kernel sources.]] */
+struct vki_user_desc {
+ unsigned int entry_number;
+ unsigned long base_addr;
+ unsigned int limit;
+ unsigned int seg_32bit:1;
+ unsigned int contents:2;
+ unsigned int read_exec_only:1;
+ unsigned int limit_in_pages:1;
+ unsigned int seg_not_present:1;
+ unsigned int useable:1;
+ // [[Nb: this field is not in the kernel sources, but it has always
+ // been in the Valgrind sources so I will keep it there in case it's
+ // important... this is an x86-defined data structure so who
+ // knows; maybe it's important to set this field to zero at some
+ // point. --njn]]
+ unsigned int reserved:25;
+};
+
+// [[Nb: for our convenience within Valgrind, use a more specific name]]
+typedef struct vki_user_desc vki_modify_ldt_t;
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/ipcbuf.h
+//----------------------------------------------------------------------
+
+struct vki_ipc64_perm
+{
+ __vki_kernel_key_t key;
+ __vki_kernel_uid32_t uid;
+ __vki_kernel_gid32_t gid;
+ __vki_kernel_uid32_t cuid;
+ __vki_kernel_gid32_t cgid;
+ __vki_kernel_mode_t mode;
+ unsigned short __pad1;
+ unsigned short seq;
+ unsigned short __pad2;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/sembuf.h
+//----------------------------------------------------------------------
+
+struct vki_semid64_ds {
+ struct vki_ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __vki_kernel_time_t sem_otime; /* last semop time */
+ unsigned long __unused1;
+ __vki_kernel_time_t sem_ctime; /* last change time */
+ unsigned long __unused2;
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/msgbuf.h
+//----------------------------------------------------------------------
+
+struct vki_msqid64_ds {
+ struct vki_ipc64_perm msg_perm;
+ __vki_kernel_time_t msg_stime; /* last msgsnd time */
+ unsigned long __unused1;
+ __vki_kernel_time_t msg_rtime; /* last msgrcv time */
+ unsigned long __unused2;
+ __vki_kernel_time_t msg_ctime; /* last change time */
+ unsigned long __unused3;
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __vki_kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __vki_kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/ipc.h
+//----------------------------------------------------------------------
+
+struct vki_ipc_kludge {
+ struct vki_msgbuf __user *msgp;
+ long msgtyp;
+};
+
+#define VKI_SEMOP 1
+#define VKI_SEMGET 2
+#define VKI_SEMCTL 3
+#define VKI_SEMTIMEDOP 4
+#define VKI_MSGSND 11
+#define VKI_MSGRCV 12
+#define VKI_MSGGET 13
+#define VKI_MSGCTL 14
+#define VKI_SHMAT 21
+#define VKI_SHMDT 22
+#define VKI_SHMGET 23
+#define VKI_SHMCTL 24
+
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/shmbuf.h
+//----------------------------------------------------------------------
+
+struct vki_shmid64_ds {
+ struct vki_ipc64_perm shm_perm; /* operation perms */
+ vki_size_t shm_segsz; /* size of segment (bytes) */
+ __vki_kernel_time_t shm_atime; /* last attach time */
+ unsigned long __unused1;
+ __vki_kernel_time_t shm_dtime; /* last detach time */
+ unsigned long __unused2;
+ __vki_kernel_time_t shm_ctime; /* last change time */
+ unsigned long __unused3;
+ __vki_kernel_pid_t shm_cpid; /* pid of creator */
+ __vki_kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct vki_shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+//----------------------------------------------------------------------
+// DRM ioctls
+//----------------------------------------------------------------------
+
+// jrs 20050207: where did all this stuff come from? Is it really
+// i386 specific, or should it go into the linux-generic category?
+//struct vki_drm_buf_pub {
+// Int idx; /**< Index into the master buffer list */
+// Int total; /**< Buffer size */
+// Int used; /**< Amount of buffer in use (for DMA) */
+// void __user *address; /**< Address of buffer */
+//};
+//
+//struct vki_drm_buf_map {
+// Int count; /**< Length of the buffer list */
+// void __user *virtual; /**< Mmap'd area in user-virtual */
+// struct vki_drm_buf_pub __user *list; /**< Buffer information */
+//};
+//
+///* We need to pay attention to this, because it mmaps memory */
+//#define VKI_DRM_IOCTL_MAP_BUFS _VKI_IOWR('d', 0x19, struct vki_drm_buf_map)
+
+//----------------------------------------------------------------------
+// From linux-2.6.9/include/asm-i386/ptrace.h
+//----------------------------------------------------------------------
+
+#define VKI_PTRACE_GETREGS 12
+#define VKI_PTRACE_SETREGS 13
+#define VKI_PTRACE_GETFPREGS 14
+#define VKI_PTRACE_SETFPREGS 15
+#define VKI_PTRACE_GETFPXREGS 18
+#define VKI_PTRACE_SETFPXREGS 19
+
+//----------------------------------------------------------------------
+// From linux-2.6.15.4/include/asm-i386/vm86.h
+//----------------------------------------------------------------------
+
+#define VKI_VM86_PLUS_INSTALL_CHECK 0
+#define VKI_VM86_ENTER 1
+#define VKI_VM86_ENTER_NO_BYPASS 2
+#define VKI_VM86_REQUEST_IRQ 3
+#define VKI_VM86_FREE_IRQ 4
+#define VKI_VM86_GET_IRQ_BITS 5
+#define VKI_VM86_GET_AND_RESET_IRQ 6
+
+struct vki_vm86_regs {
+/*
+ * normal regs, with special meaning for the segment descriptors..
+ */
+ long ebx;
+ long ecx;
+ long edx;
+ long esi;
+ long edi;
+ long ebp;
+ long eax;
+ long __null_ds;
+ long __null_es;
+ long __null_fs;
+ long __null_gs;
+ long orig_eax;
+ long eip;
+ unsigned short cs, __csh;
+ long eflags;
+ long esp;
+ unsigned short ss, __ssh;
+/*
+ * these are specific to v86 mode:
+ */
+ unsigned short es, __esh;
+ unsigned short ds, __dsh;
+ unsigned short fs, __fsh;
+ unsigned short gs, __gsh;
+};
+
+struct vki_revectored_struct {
+ unsigned long __map[8]; /* 256 bits */
+};
+
+struct vki_vm86_struct {
+ struct vki_vm86_regs regs;
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct vki_revectored_struct int_revectored;
+ struct vki_revectored_struct int21_revectored;
+};
+
+struct vki_vm86plus_info_struct {
+ unsigned long force_return_for_pic:1;
+ unsigned long vm86dbg_active:1; /* for debugger */
+ unsigned long vm86dbg_TFpendig:1; /* for debugger */
+ unsigned long unused:28;
+ unsigned long is_vm86pus:1; /* for vm86 internal use */
+ unsigned char vm86dbg_intxxtab[32]; /* for debugger */
+};
+
+struct vki_vm86plus_struct {
+ struct vki_vm86_regs regs;
+ unsigned long flags;
+ unsigned long screen_bitmap;
+ unsigned long cpu_type;
+ struct vki_revectored_struct int_revectored;
+ struct vki_revectored_struct int21_revectored;
+ struct vki_vm86plus_info_struct vm86plus;
+};
+
+//----------------------------------------------------------------------
+// And that's it!
+//----------------------------------------------------------------------
+
+#endif // __VKI_ARM_LINUX_H
+
+/*--------------------------------------------------------------------*/
+/*--- end vki-arm-linux.h ---*/
+/*--------------------------------------------------------------------*/
# include "vki-posixtypes-ppc32-linux.h"
#elif defined(VGA_ppc64)
# include "vki-posixtypes-ppc64-linux.h"
+#elif defined(VGA_arm)
+# include "vki-posixtypes-arm-linux.h"
#else
# error Unknown platform
#endif
# include "vki-ppc32-linux.h"
#elif defined(VGA_ppc64)
# include "vki-ppc64-linux.h"
+#elif defined(VGA_arm)
+# include "vki-arm-linux.h"
#else
# error Unknown platform
#endif
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- arm/Linux-specific kernel interface: posix types. ---*/
+/*--- vki-posixtypes-arm-linux.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2000-2009 Julian Seward
+ jseward@acm.org
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VKI_POSIXTYPES_ARM_LINUX_H
+#define __VKI_POSIXTYPES_ARM_LINUX_H
+
+//----------------------------------------------------------------------
+// From linux-2.6.8.1/include/asm-i386/posix_types.h
+//----------------------------------------------------------------------
+
+typedef unsigned short __vki_kernel_mode_t;
+typedef long __vki_kernel_off_t;
+typedef int __vki_kernel_pid_t;
+typedef unsigned short __vki_kernel_ipc_pid_t;
+typedef unsigned short __vki_kernel_uid_t;
+typedef unsigned short __vki_kernel_gid_t;
+typedef unsigned int __vki_kernel_size_t;
+typedef long __vki_kernel_time_t;
+typedef long __vki_kernel_suseconds_t;
+typedef long __vki_kernel_clock_t;
+typedef int __vki_kernel_timer_t;
+typedef int __vki_kernel_clockid_t;
+typedef char * __vki_kernel_caddr_t;
+typedef unsigned int __vki_kernel_uid32_t;
+typedef unsigned int __vki_kernel_gid32_t;
+
+typedef unsigned short __vki_kernel_old_uid_t;
+typedef unsigned short __vki_kernel_old_gid_t;
+
+typedef long long __vki_kernel_loff_t;
+
+typedef struct {
+ int val[2];
+} __vki_kernel_fsid_t;
+
+#endif // __VKI_POSIXTYPES_ARM_LINUX_H
+
+/*--------------------------------------------------------------------*/
+/*--- end vki-posixtypes-arm-linux.h ---*/
+/*--------------------------------------------------------------------*/
--- /dev/null
+
+/*--------------------------------------------------------------------*/
+/*--- System call numbers for arm-linux. ---*/
+/*--- vki-scnums-arm-linux.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+ This file is part of Valgrind, a dynamic binary instrumentation
+ framework.
+
+ Copyright (C) 2008-2009 Evan Geller
+ gaze@bea.ms
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307, USA.
+
+ The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __VKI_SCNUMS_ARM_LINUX_H
+#define __VKI_SCNUMS_ARM_LINUX_H
+
+// From linux-2.6.26.2/include/asm-arm/unistd.h
+
+#define __NR_restart_syscall 0
+#define __NR_exit 1
+#define __NR_fork 2
+#define __NR_read 3
+#define __NR_write 4
+#define __NR_open 5
+#define __NR_close 6
+ /* 7 was sys_waitpid */
+#define __NR_creat 8
+#define __NR_link 9
+#define __NR_unlink 10
+#define __NR_execve 11
+#define __NR_chdir 12
+#define __NR_time 13
+#define __NR_mknod 14
+#define __NR_chmod 15
+#define __NR_lchown 16
+ /* 17 was sys_break */
+ /* 18 was sys_stat */
+#define __NR_lseek 19
+#define __NR_getpid 20
+#define __NR_mount 21
+#define __NR_umount 22
+#define __NR_setuid 23
+#define __NR_getuid 24
+#define __NR_stime 25
+#define __NR_ptrace 26
+#define __NR_alarm 27
+ /* 28 was sys_fstat */
+#define __NR_pause 29
+#define __NR_utime 30
+ /* 31 was sys_stty */
+ /* 32 was sys_gtty */
+#define __NR_access 33
+#define __NR_nice 34
+ /* 35 was sys_ftime */
+#define __NR_sync 36
+#define __NR_kill 37
+#define __NR_rename 38
+#define __NR_mkdir 39
+#define __NR_rmdir 40
+#define __NR_dup 41
+#define __NR_pipe 42
+#define __NR_times 43
+ /* 44 was sys_prof */
+#define __NR_brk 45
+#define __NR_setgid 46
+#define __NR_getgid 47
+ /* 48 was sys_signal */
+#define __NR_geteuid 49
+#define __NR_getegid 50
+#define __NR_acct 51
+#define __NR_umount2 52
+ /* 53 was sys_lock */
+#define __NR_ioctl 54
+#define __NR_fcntl 55
+ /* 56 was sys_mpx */
+#define __NR_setpgid 57
+ /* 58 was sys_ulimit */
+ /* 59 was sys_olduname */
+#define __NR_umask 60
+#define __NR_chroot 61
+#define __NR_ustat 62
+#define __NR_dup2 63
+#define __NR_getppid 64
+#define __NR_getpgrp 65
+#define __NR_setsid 66
+#define __NR_sigaction 67
+ /* 68 was sys_sgetmask */
+ /* 69 was sys_ssetmask */
+#define __NR_setreuid 70
+#define __NR_setregid 71
+#define __NR_sigsuspend 72
+#define __NR_sigpending 73
+#define __NR_sethostname 74
+#define __NR_setrlimit 75
+#define __NR_getrlimit 76 /* Back compat 2GB limited rlimit */
+#define __NR_getrusage 77
+#define __NR_gettimeofday 78
+#define __NR_settimeofday 79
+#define __NR_getgroups 80
+#define __NR_setgroups 81
+#define __NR_select 82
+#define __NR_symlink 83
+ /* 84 was sys_lstat */
+#define __NR_readlink 85
+#define __NR_uselib 86
+#define __NR_swapon 87
+#define __NR_reboot 88
+#define __NR_readdir 89
+#define __NR_mmap 90
+#define __NR_munmap 91
+#define __NR_truncate 92
+#define __NR_ftruncate 93
+#define __NR_fchmod 94
+#define __NR_fchown 95
+#define __NR_getpriority 96
+#define __NR_setpriority 97
+ /* 98 was sys_profil */
+#define __NR_statfs 99
+#define __NR_fstatfs 100
+ /* 101 was sys_ioperm */
+#define __NR_socketcall 102
+#define __NR_syslog 103
+#define __NR_setitimer 104
+#define __NR_getitimer 105
+#define __NR_stat 106
+#define __NR_lstat 107
+#define __NR_fstat 108
+ /* 109 was sys_uname */
+ /* 110 was sys_iopl */
+#define __NR_vhangup 111
+ /* 112 was sys_idle */
+#define __NR_syscall 113 /* syscall to call a syscall! */
+#define __NR_wait4 114
+#define __NR_swapoff 115
+#define __NR_sysinfo 116
+#define __NR_ipc 117
+#define __NR_fsync 118
+#define __NR_sigreturn 119
+#define __NR_clone 120
+#define __NR_setdomainname 121
+#define __NR_uname 122
+ /* 123 was sys_modify_ldt */
+#define __NR_adjtimex 124
+#define __NR_mprotect 125
+#define __NR_sigprocmask 126
+ /* 127 was sys_create_module */
+#define __NR_init_module 128
+#define __NR_delete_module 129
+ /* 130 was sys_get_kernel_syms */
+#define __NR_quotactl 131
+#define __NR_getpgid 132
+#define __NR_fchdir 133
+#define __NR_bdflush 134
+#define __NR_sysfs 135
+#define __NR_personality 136
+ /* 137 was sys_afs_syscall */
+#define __NR_setfsuid 138
+#define __NR_setfsgid 139
+#define __NR__llseek 140
+#define __NR_getdents 141
+#define __NR__newselect 142
+#define __NR_flock 143
+#define __NR_msync 144
+#define __NR_readv 145
+#define __NR_writev 146
+#define __NR_getsid 147
+#define __NR_fdatasync 148
+#define __NR__sysctl 149
+#define __NR_mlock 150
+#define __NR_munlock 151
+#define __NR_mlockall 152
+#define __NR_munlockall 153
+#define __NR_sched_setparam 154
+#define __NR_sched_getparam 155
+#define __NR_sched_setscheduler 156
+#define __NR_sched_getscheduler 157
+#define __NR_sched_yield 158
+#define __NR_sched_get_priority_max 159
+#define __NR_sched_get_priority_min 160
+#define __NR_sched_rr_get_interval 161
+#define __NR_nanosleep 162
+#define __NR_mremap 163
+#define __NR_setresuid 164
+#define __NR_getresuid 165
+ /* 166 was sys_vm86 */
+ /* 167 was sys_query_module */
+#define __NR_poll 168
+#define __NR_nfsservctl 169
+#define __NR_setresgid 170
+#define __NR_getresgid 171
+#define __NR_prctl 172
+#define __NR_rt_sigreturn 173
+#define __NR_rt_sigaction 174
+#define __NR_rt_sigprocmask 175
+#define __NR_rt_sigpending 176
+#define __NR_rt_sigtimedwait 177
+#define __NR_rt_sigqueueinfo 178
+#define __NR_rt_sigsuspend 179
+#define __NR_pread64 180
+#define __NR_pwrite64 181
+#define __NR_chown 182
+#define __NR_getcwd 183
+#define __NR_capget 184
+#define __NR_capset 185
+#define __NR_sigaltstack 186
+#define __NR_sendfile 187
+ /* 188 reserved */
+ /* 189 reserved */
+#define __NR_vfork 190
+#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+#define __NR_truncate64 193
+#define __NR_ftruncate64 194
+#define __NR_stat64 195
+#define __NR_lstat64 196
+#define __NR_fstat64 197
+#define __NR_lchown32 198
+#define __NR_getuid32 199
+#define __NR_getgid32 200
+#define __NR_geteuid32 201
+#define __NR_getegid32 202
+#define __NR_setreuid32 203
+#define __NR_setregid32 204
+#define __NR_getgroups32 205
+#define __NR_setgroups32 206
+#define __NR_fchown32 207
+#define __NR_setresuid32 208
+#define __NR_getresuid32 209
+#define __NR_setresgid32 210
+#define __NR_getresgid32 211
+#define __NR_chown32 212
+#define __NR_setuid32 213
+#define __NR_setgid32 214
+#define __NR_setfsuid32 215
+#define __NR_setfsgid32 216
+#define __NR_getdents64 217
+#define __NR_pivot_root 218
+#define __NR_mincore 219
+#define __NR_madvise 220
+#define __NR_fcntl64 221
+ /* 222 for tux */
+ /* 223 is unused */
+#define __NR_gettid 224
+#define __NR_readahead 225
+#define __NR_setxattr 226
+#define __NR_lsetxattr 227
+#define __NR_fsetxattr 228
+#define __NR_getxattr 229
+#define __NR_lgetxattr 230
+#define __NR_fgetxattr 231
+#define __NR_listxattr 232
+#define __NR_llistxattr 233
+#define __NR_flistxattr 234
+#define __NR_removexattr 235
+#define __NR_lremovexattr 236
+#define __NR_fremovexattr 237
+#define __NR_tkill 238
+#define __NR_sendfile64 239
+#define __NR_futex 240
+#define __NR_sched_setaffinity 241
+#define __NR_sched_getaffinity 242
+#define __NR_io_setup 243
+#define __NR_io_destroy 244
+#define __NR_io_getevents 245
+#define __NR_io_submit 246
+#define __NR_io_cancel 247
+#define __NR_exit_group 248
+#define __NR_lookup_dcookie 249
+#define __NR_epoll_create 250
+#define __NR_epoll_ctl 251
+#define __NR_epoll_wait 252
+#define __NR_remap_file_pages 253
+ /* 254 for set_thread_area */
+ /* 255 for get_thread_area */
+#define __NR_set_tid_address 256
+#define __NR_timer_create 257
+#define __NR_timer_settime 258
+#define __NR_timer_gettime 259
+#define __NR_timer_getoverrun 260
+#define __NR_timer_delete 261
+#define __NR_clock_settime 262
+#define __NR_clock_gettime 263
+#define __NR_clock_getres 264
+#define __NR_clock_nanosleep 265
+#define __NR_statfs64 266
+#define __NR_fstatfs64 267
+#define __NR_tgkill 268
+#define __NR_utimes 269
+#define __NR_arm_fadvise64_64 270
+#define __NR_fadvise64 270 //Added by Johan, 2008-10-11, not sure why it's called _arm_.. otherwise.
+#define __NR_pciconfig_iobase 271
+#define __NR_pciconfig_read 272
+#define __NR_pciconfig_write 273
+#define __NR_mq_open 274
+#define __NR_mq_unlink 275
+#define __NR_mq_timedsend 276
+#define __NR_mq_timedreceive 277
+#define __NR_mq_notify 278
+#define __NR_mq_getsetattr 279
+#define __NR_waitid 280
+#define __NR_socket 281
+#define __NR_bind 282
+#define __NR_connect 283
+#define __NR_listen 284
+#define __NR_accept 285
+#define __NR_getsockname 286
+#define __NR_getpeername 287
+#define __NR_socketpair 288
+#define __NR_send 289
+#define __NR_sendto 290
+#define __NR_recv 291
+#define __NR_recvfrom 292
+#define __NR_shutdown 293
+#define __NR_setsockopt 294
+#define __NR_getsockopt 295
+#define __NR_sendmsg 296
+#define __NR_recvmsg 297
+#define __NR_semop 298
+#define __NR_semget 299
+#define __NR_semctl 300
+#define __NR_msgsnd 301
+#define __NR_msgrcv 302
+#define __NR_msgget 303
+#define __NR_msgctl 304
+#define __NR_shmat 305
+#define __NR_shmdt 306
+#define __NR_shmget 307
+#define __NR_shmctl 308
+#define __NR_add_key 309
+#define __NR_request_key 310
+#define __NR_keyctl 311
+#define __NR_semtimedop 312
+#define __NR_vserver 313
+#define __NR_ioprio_set 314
+#define __NR_ioprio_get 315
+#define __NR_inotify_init 316
+#define __NR_inotify_add_watch 317
+#define __NR_inotify_rm_watch 318
+#define __NR_mbind 319
+#define __NR_get_mempolicy 320
+#define __NR_set_mempolicy 321
+#define __NR_openat 322
+#define __NR_mkdirat 323
+#define __NR_mknodat 324
+#define __NR_fchownat 325
+#define __NR_futimesat 326
+#define __NR_fstatat64 327
+#define __NR_unlinkat 328
+#define __NR_renameat 329
+#define __NR_linkat 330
+#define __NR_symlinkat 331
+#define __NR_readlinkat 332
+#define __NR_fchmodat 333
+#define __NR_faccessat 334
+ /* 335 for pselect6 */
+ /* 336 for ppoll */
+#define __NR_unshare 337
+#define __NR_set_robust_list 338
+#define __NR_get_robust_list 339
+#define __NR_splice 340
+#define __NR_arm_sync_file_range 341
+#define __NR_sync_file_range2 __NR_arm_sync_file_range
+#define __NR_tee 342
+#define __NR_vmsplice 343
+#define __NR_move_pages 344
+#define __NR_getcpu 345
+ /* 346 for epoll_pwait */
+#define __NR_kexec_load 347
+#define __NR_utimensat 348
+#define __NR_signalfd 349
+#define __NR_timerfd_create 350
+#define __NR_eventfd 351
+#define __NR_fallocate 352
+#define __NR_timerfd_settime 353
+#define __NR_timerfd_gettime 354
+
+
+#define __NR_ARM_BASE (0x0f0000)
+#define __NR_ARM_breakpoint (__NR_ARM_BASE+1)
+#define __NR_ARM_cacheflush (__NR_ARM_BASE+2)
+#define __NR_ARM_usr26 (__NR_ARM_BASE+3)
+#define __NR_ARM_usr32 (__NR_ARM_BASE+4)
+#define __NR_ARM_set_tls (__NR_ARM_BASE+5)
+
+
+#endif /* __VKI_SCNUMS_ARM_LINUX_H */
+
+/*--------------------------------------------------------------------*/
+/*--- end vki-scnums-arm-linux.h ---*/
+/*--------------------------------------------------------------------*/
# define MC_SIZEOF_GUEST_STATE sizeof(VexGuestPPC64State)
#endif
+#if defined(VGA_arm)
+# include "libvex_guest_arm.h"
+# define MC_SIZEOF_GUEST_STATE sizeof(VexGuestARMState)
+#endif
+
static inline Bool host_is_big_endian ( void ) {
UInt x = 0x11223344;
return 0x1122 == *(UShort*)(&x);
offset,szB);
tl_assert(0);
# undef GOF
+# undef SZB
+
+ /* --------------------- arm --------------------- */
+
+# elif defined(VGA_arm)
+
+# define GOF(_fieldname) \
+ (offsetof(VexGuestARMState,guest_##_fieldname))
+# define SZB(_fieldname) \
+ (sizeof(((VexGuestARMState*)0)->guest_##_fieldname))
+
+ Int o = offset;
+ Int sz = szB;
+ tl_assert(sz > 0);
+ tl_assert(host_is_little_endian());
+
+ if (o == GOF(R0) && sz == 4) return o;
+ if (o == GOF(R1) && sz == 4) return o;
+ if (o == GOF(R2) && sz == 4) return o;
+ if (o == GOF(R3) && sz == 4) return o;
+ if (o == GOF(R4) && sz == 4) return o;
+ if (o == GOF(R5) && sz == 4) return o;
+ if (o == GOF(R6) && sz == 4) return o;
+ if (o == GOF(R7) && sz == 4) return o;
+ if (o == GOF(R8) && sz == 4) return o;
+ if (o == GOF(R9) && sz == 4) return o;
+ if (o == GOF(R10) && sz == 4) return o;
+ if (o == GOF(R11) && sz == 4) return o;
+ if (o == GOF(R12) && sz == 4) return o;
+ if (o == GOF(R13) && sz == 4) return o;
+ if (o == GOF(R14) && sz == 4) return o;
+
+ /* EAZG: These may be completely wrong. */
+ if (o == GOF(R15) && sz == 4) return -1; /* slot unused */
+ if (o == GOF(CC_OP) && sz == 4) return -1; /* slot unused */
+
+ if (o == GOF(CC_DEP1) && sz == 4) return o;
+ if (o == GOF(CC_DEP2) && sz == 4) return o;
+
+ if (o == GOF(CC_NDEP) && sz == 4) return -1; /* slot unused */
+
+ //if (o == GOF(SYSCALLNO) && sz == 4) return -1; /* slot unused */
+ //if (o == GOF(CC) && sz == 4) return -1; /* slot unused */
+ //if (o == GOF(EMWARN) && sz == 4) return -1; /* slot unused */
+ //if (o == GOF(TISTART) && sz == 4) return -1; /* slot unused */
+ //if (o == GOF(NRADDR) && sz == 4) return -1; /* slot unused */
+
+ if (o == GOF(FPSCR) && sz == 4) return -1;
+ if (o == GOF(TPIDRURO) && sz == 4) return -1;
+
+ if (o >= GOF(D0) && o+sz <= GOF(D0) +SZB(D0)) return -1;
+ if (o >= GOF(D1) && o+sz <= GOF(D1) +SZB(D1)) return -1;
+ if (o >= GOF(D2) && o+sz <= GOF(D2) +SZB(D2)) return -1;
+ if (o >= GOF(D3) && o+sz <= GOF(D3) +SZB(D3)) return -1;
+ if (o >= GOF(D4) && o+sz <= GOF(D4) +SZB(D4)) return -1;
+ if (o >= GOF(D5) && o+sz <= GOF(D5) +SZB(D5)) return -1;
+ if (o >= GOF(D6) && o+sz <= GOF(D6) +SZB(D6)) return -1;
+ if (o >= GOF(D7) && o+sz <= GOF(D7) +SZB(D7)) return -1;
+ if (o >= GOF(D8) && o+sz <= GOF(D8) +SZB(D8)) return -1;
+ if (o >= GOF(D9) && o+sz <= GOF(D9) +SZB(D9)) return -1;
+ if (o >= GOF(D10) && o+sz <= GOF(D10)+SZB(D10)) return -1;
+ if (o >= GOF(D11) && o+sz <= GOF(D11)+SZB(D11)) return -1;
+ if (o >= GOF(D12) && o+sz <= GOF(D12)+SZB(D12)) return -1;
+ if (o >= GOF(D13) && o+sz <= GOF(D13)+SZB(D13)) return -1;
+ if (o >= GOF(D14) && o+sz <= GOF(D14)+SZB(D14)) return -1;
+ if (o >= GOF(D15) && o+sz <= GOF(D15)+SZB(D15)) return -1;
+
+ VG_(printf)("MC_(get_otrack_shadow_offset)(arm)(off=%d,sz=%d)\n",
+ offset,szB);
+ tl_assert(0);
+# undef GOF
# undef SZB
# else
VG_(printf)("\n");
tl_assert(0);
+ /* --------------------- arm --------------------- */
+# elif defined(VGA_arm)
+
+ VG_(printf)("get_reg_array_equiv_int_type(arm): unhandled: ");
+ ppIRRegArray(arr);
+ VG_(printf)("\n");
+ tl_assert(0);
+
# else
# error "FIXME: not implemented for this architecture"
# endif
/* I32 x I64 x I64 -> I32 */
if (t1 == Ity_I32 && t2 == Ity_I64 && t3 == Ity_I64
&& finalVty == Ity_I32) {
- if (0) VG_(printf)("mkLazy3: I32 x I64 x I64 -> I64\n");
+ if (0) VG_(printf)("mkLazy3: I32 x I64 x I64 -> I32\n");
at = mkPCastTo(mce, Ity_I64, va1);
at = mkUifU(mce, Ity_I64, at, va2);
at = mkUifU(mce, Ity_I64, at, va3);
return at;
}
+ /* I32 x I32 x I32 -> I32 */
+ /* 32-bit FP idiom, as (eg) happens on ARM */
+ if (t1 == Ity_I32 && t2 == Ity_I32 && t3 == Ity_I32
+ && finalVty == Ity_I32) {
+ if (0) VG_(printf)("mkLazy3: I32 x I32 x I32 -> I32\n");
+ at = va1;
+ at = mkUifU(mce, Ity_I32, at, va2);
+ at = mkUifU(mce, Ity_I32, at, va3);
+ at = mkPCastTo(mce, Ity_I32, at);
+ return at;
+ }
+
if (1) {
VG_(printf)("mkLazy3: ");
ppIRType(t1);
case Iop_PRem1C3210F64:
/* I32(rm) x F64 x F64 -> I32 */
return mkLazy3(mce, Ity_I32, vatom1, vatom2, vatom3);
+ case Iop_AddF32:
+ case Iop_SubF32:
+ case Iop_MulF32:
+ case Iop_DivF32:
+ /* I32(rm) x F32 x F32 -> I32 */
+ return mkLazy3(mce, Ity_I32, vatom1, vatom2, vatom3);
default:
ppIROp(op);
VG_(tool_panic)("memcheck:expr2vbits_Triop");
/* I32(rm) x I64/F64 -> I64/F64 */
return mkLazy2(mce, Ity_I64, vatom1, vatom2);
+ case Iop_F64toI32U:
case Iop_F64toI32S:
case Iop_F64toF32:
/* First arg is I32 (rounding mode), second is F64 (data). */
case Iop_F32toF64:
case Iop_I32StoF64:
+ case Iop_I32UtoF64:
case Iop_NegF64:
case Iop_AbsF64:
case Iop_Est5FRSqrt:
case Iop_Clz32:
case Iop_Ctz32:
case Iop_TruncF64asF32:
+ case Iop_NegF32:
+ case Iop_AbsF32:
return mkPCastTo(mce, Ity_I32, vatom);
case Iop_1Uto64:
case Iop_ReinterpF64asI64:
case Iop_ReinterpI64asF64:
case Iop_ReinterpI32asF32:
+ case Iop_ReinterpF32asI32:
case Iop_NotV128:
case Iop_Not64:
case Iop_Not32:
);
#elif defined(VGA_ppc32)
/* Nasty hack. Does correctly atomically do *p += n, but only if p
- is 8-aligned -- guaranteed by caller. */
+ is 4-aligned -- guaranteed by caller. */
unsigned long success;
do {
__asm__ __volatile__(
: /*trash*/ "memory", "cc", "r15"
);
} while (success != 1);
+#elif defined(VGA_arm)
+ *p += n;
#else
# error "Unsupported arch"
#endif
: /*trash*/ "memory", "cc", "r15"
);
} while (success != 1);
+#elif defined(VGA_arm)
+ *p += n;
#else
# error "Unsupported arch"
#endif
: /*trash*/ "memory", "cc", "r15"
);
} while (success != 1);
+#elif defined(VGA_arm)
+ *p += n;
#else
# error "Unsupported arch"
#endif
__attribute__((noinline)) void atomic_add_64bit ( long long int* p, int n )
{
-#if defined(VGA_x86) || defined(VGA_ppc32)
+#if defined(VGA_x86) || defined(VGA_ppc32) || defined(VGA_arm)
/* do nothing; is not supported */
#elif defined(VGA_amd64)
// this is a bit subtle. It relies on the fact that, on a 64-bit platform,
--- /dev/null
+
+### jrs: re-check this against known-good equivalents, eg x86 version
+
+EXTRA_DIST = $(noinst_SCRIPTS) \
+ instructions.stderr.exp instructions.stdout.exp
+# if any tests appear here, remember to include @FLAG_M32@ in the
+# compilation flags
+#
+
+check_PROGRAMS = instructions
+
+AM_CFLAGS = $(WERROR) -Winline -Wall -Wshadow \
+ @FLAG_M32@ -g -I$(top_srcdir)/include \
+ $(FLAG_MMMX) $(FLAG_MSSE)
+AM_CXXFLAGS = $(AM_CFLAGS)
+AM_CCASFLAGS = @FLAG_M32@
if ( 0 == strcmp( arch, "ppc32" ) ) return True;
}
+#elif defined(VGP_arm_linux)
+ if ( 0 == strcmp( arch, "arm" ) ) return True;
+
#else
# error Unknown platform
#endif // VGP_*