/auxprogs/getoff-mips32-linux
/auxprogs/getoff-mips64-linux
/auxprogs/getoff-nanomips-linux
+/auxprogs/getoff-riscv64-linux
/auxprogs/getoff-amd64-solaris
/auxprogs/getoff-x86-solaris
/auxprogs/getoff-*-freebsd
/memcheck/tests/arm64-linux/Makefile.in
/memcheck/tests/arm64-linux/scalar
+# /memcheck/tests/riscv64-linux/
+/memcheck/tests/riscv64-linux/*.stderr.diff
+/memcheck/tests/riscv64-linux/*.stderr.out
+/memcheck/tests/riscv64-linux/*.stdout.diff
+/memcheck/tests/riscv64-linux/*.stdout.out
+/memcheck/tests/riscv64-linux/.deps
+/memcheck/tests/riscv64-linux/Makefile
+/memcheck/tests/riscv64-linux/Makefile.in
+/memcheck/tests/riscv64-linux/context_float
+/memcheck/tests/riscv64-linux/context_integer
+/memcheck/tests/riscv64-linux/scalar
+
# /memcheck/tests/common/
/memcheck/tests/common/Makefile
/memcheck/tests/common/Makefile.in
/none/tests/s390x/vec2
/none/tests/s390x/vec2_float
+# /none/tests/riscv64/
+/none/tests/riscv64/*.stderr.diff
+/none/tests/riscv64/*.stderr.out
+/none/tests/riscv64/*.stdout.diff
+/none/tests/riscv64/*.stdout.out
+/none/tests/riscv64/.deps
+/none/tests/riscv64/Makefile
+/none/tests/riscv64/Makefile.in
+/none/tests/riscv64/allexec
+/none/tests/riscv64/atomic
+/none/tests/riscv64/compressed
+/none/tests/riscv64/csr
+/none/tests/riscv64/float32
+/none/tests/riscv64/float64
+/none/tests/riscv64/integer
+/none/tests/riscv64/muldiv
+
# /none/tests/scripts/
/none/tests/scripts/*.dSYM
/none/tests/scripts/*.so
$(AM_CFLAGS_PSO_BASE)
AM_CCASFLAGS_MIPS64_LINUX = @FLAG_M64@ -g
+AM_FLAG_M3264_RISCV64_LINUX = @FLAG_M64@
+AM_CFLAGS_RISCV64_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE)
+AM_CFLAGS_PSO_RISCV64_LINUX = @FLAG_M64@ $(AM_CFLAGS_BASE) $(AM_CFLAGS_PSO_BASE)
+AM_CCASFLAGS_RISCV64_LINUX = @FLAG_M64@ -g
+
AM_FLAG_M3264_X86_SOLARIS = @FLAG_M32@
AM_CFLAGS_X86_SOLARIS = @FLAG_M32@ @PREFERRED_STACK_BOUNDARY_2@ \
$(AM_CFLAGS_BASE) -fomit-frame-pointer @SOLARIS_UNDEF_LARGESOURCE@
PRELOAD_LDFLAGS_MIPS32_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@
PRELOAD_LDFLAGS_NANOMIPS_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M32@
PRELOAD_LDFLAGS_MIPS64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@
+PRELOAD_LDFLAGS_RISCV64_LINUX = $(PRELOAD_LDFLAGS_COMMON_LINUX) @FLAG_M64@
PRELOAD_LDFLAGS_X86_SOLARIS = $(PRELOAD_LDFLAGS_COMMON_SOLARIS) @FLAG_M32@
PRELOAD_LDFLAGS_AMD64_SOLARIS = $(PRELOAD_LDFLAGS_COMMON_SOLARIS) @FLAG_M64@
README.android_emulator \
README.mips \
README.aarch64 \
+ README.riscv64 \
README.solaris \
README.freebsd \
NEWS.old \
-static -nodefaultlibs -nostartfiles -u __start @FLAG_NO_BUILD_ID@ \
@FLAG_M64@
+TOOL_LDFLAGS_RISCV64_LINUX = \
+ $(TOOL_LDFLAGS_COMMON_LINUX) @FLAG_M64@
+
TOOL_LDFLAGS_X86_SOLARIS = \
$(TOOL_LDFLAGS_COMMON_SOLARIS) @FLAG_M32@
LIBREPLACEMALLOC_MIPS64_LINUX = \
$(top_builddir)/coregrind/libreplacemalloc_toolpreload-mips64-linux.a
+LIBREPLACEMALLOC_RISCV64_LINUX = \
+ $(top_builddir)/coregrind/libreplacemalloc_toolpreload-riscv64-linux.a
+
LIBREPLACEMALLOC_X86_SOLARIS = \
$(top_builddir)/coregrind/libreplacemalloc_toolpreload-x86-solaris.a
$(LIBREPLACEMALLOC_MIPS64_LINUX) \
-Wl,--no-whole-archive
+LIBREPLACEMALLOC_LDFLAGS_RISCV64_LINUX = \
+ -Wl,--whole-archive \
+ $(LIBREPLACEMALLOC_RISCV64_LINUX) \
+ -Wl,--no-whole-archive
+
LIBREPLACEMALLOC_LDFLAGS_X86_SOLARIS = \
-Wl,--whole-archive \
$(LIBREPLACEMALLOC_X86_SOLARIS) \
pub/libvex_guest_s390x.h \
pub/libvex_guest_mips32.h \
pub/libvex_guest_mips64.h \
+ pub/libvex_guest_riscv64.h \
pub/libvex_s390x_common.h \
pub/libvex_ir.h \
pub/libvex_trc_values.h \
priv/guest_mips_defs.h \
priv/mips_defs.h \
priv/guest_nanomips_defs.h \
+ priv/guest_riscv64_defs.h \
priv/host_generic_regs.h \
priv/host_generic_simd64.h \
priv/host_generic_simd128.h \
priv/s390_defs.h \
priv/host_mips_defs.h \
priv/host_nanomips_defs.h \
- priv/common_nanomips_defs.h
+ priv/common_nanomips_defs.h \
+ priv/host_riscv64_defs.h
BUILT_SOURCES = pub/libvex_guest_offsets.h
CLEANFILES = pub/libvex_guest_offsets.h
pub/libvex_guest_arm64.h \
pub/libvex_guest_s390x.h \
pub/libvex_guest_mips32.h \
- pub/libvex_guest_mips64.h
+ pub/libvex_guest_mips64.h \
+ pub/libvex_guest_riscv64.h
rm -f auxprogs/genoffsets.s
$(mkdir_p) auxprogs pub
$(CC) $(CFLAGS_FOR_GENOFFSETS) \
priv/guest_mips_toIR.c \
priv/guest_nanomips_helpers.c \
priv/guest_nanomips_toIR.c \
+ priv/guest_riscv64_helpers.c \
+ priv/guest_riscv64_toIR.c \
priv/host_generic_regs.c \
priv/host_generic_simd64.c \
priv/host_generic_simd128.c \
priv/host_mips_defs.c \
priv/host_nanomips_defs.c \
priv/host_mips_isel.c \
- priv/host_nanomips_isel.c
+ priv/host_nanomips_isel.c \
+ priv/host_riscv64_defs.c \
+ priv/host_riscv64_isel.c
LIBVEXMULTIARCH_SOURCES = priv/multiarch_main_main.c
*D1c = (cache_t) { 65536, 2, 64 };
*LLc = (cache_t) { 262144, 8, 64 };
+#elif defined(VGA_riscv64)
+
+ // Default cache configuration is SiFive FU740-C000 (HiFive Unmatched)
+ *I1c = (cache_t) { 32768, 4, 64 };
+ *D1c = (cache_t) { 32768, 8, 64 };
+ *LLc = (cache_t) { 2097152, 16, 64 };
+
#else
#error "Unknown arch"
# define N_IADDR_LO_ZERO_BITS 2
#elif defined(VGA_x86) || defined(VGA_amd64)
# define N_IADDR_LO_ZERO_BITS 0
-#elif defined(VGA_s390x) || defined(VGA_arm)
+#elif defined(VGA_s390x) || defined(VGA_arm) || defined(VGA_riscv64)
# define N_IADDR_LO_ZERO_BITS 1
#else
# error "Unsupported architecture"
ARCH_MAX="nanomips"
;;
+ riscv64)
+ AC_MSG_RESULT([ok (${host_cpu})])
+ ARCH_MAX="riscv64"
+ ;;
+
*)
AC_MSG_RESULT([no (${host_cpu})])
AC_MSG_ERROR([Unsupported host architecture. Sorry])
valt_load_address_sec_inner="0xUNSET"
AC_MSG_RESULT([ok (${ARCH_MAX}-${VGCONF_OS})])
;;
+ riscv64-linux)
+ VGCONF_ARCH_PRI="riscv64"
+ VGCONF_ARCH_SEC=""
+ VGCONF_PLATFORM_PRI_CAPS="RISCV64_LINUX"
+ VGCONF_PLATFORM_SEC_CAPS=""
+ valt_load_address_pri_norml="0x58000000"
+ valt_load_address_pri_inner="0x38000000"
+ valt_load_address_sec_norml="0xUNSET"
+ valt_load_address_sec_inner="0xUNSET"
+ AC_MSG_RESULT([ok (${ARCH_MAX}-${VGCONF_OS})])
+ ;;
x86-solaris)
VGCONF_ARCH_PRI="x86"
VGCONF_ARCH_SEC=""
test x$VGCONF_PLATFORM_PRI_CAPS = xMIPS64_LINUX )
AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_NANOMIPS,
test x$VGCONF_PLATFORM_PRI_CAPS = xNANOMIPS_LINUX )
+AM_CONDITIONAL(VGCONF_ARCHS_INCLUDE_RISCV64,
+ test x$VGCONF_PLATFORM_PRI_CAPS = xRISCV64_LINUX )
# Set up VGCONF_PLATFORMS_INCLUDE_<platform>. Either one or two of these
# become defined.
test x$VGCONF_PLATFORM_PRI_CAPS = xMIPS64_LINUX)
AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_NANOMIPS_LINUX,
test x$VGCONF_PLATFORM_PRI_CAPS = xNANOMIPS_LINUX)
+AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_RISCV64_LINUX,
+ test x$VGCONF_PLATFORM_PRI_CAPS = xRISCV64_LINUX)
AM_CONDITIONAL(VGCONF_PLATFORMS_INCLUDE_X86_FREEBSD,
test x$VGCONF_PLATFORM_PRI_CAPS = xX86_FREEBSD \
-o x$VGCONF_PLATFORM_SEC_CAPS = xX86_FREEBSD)
-o x$VGCONF_PLATFORM_PRI_CAPS = xS390X_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xMIPS32_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xMIPS64_LINUX \
- -o x$VGCONF_PLATFORM_PRI_CAPS = xNANOMIPS_LINUX)
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xNANOMIPS_LINUX \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xRISCV64_LINUX)
AM_CONDITIONAL(VGCONF_OS_IS_FREEBSD,
test x$VGCONF_PLATFORM_PRI_CAPS = xX86_FREEBSD \
-o x$VGCONF_PLATFORM_PRI_CAPS = xAMD64_FREEBSD \
-o x$VGCONF_PLATFORM_PRI_CAPS = xARM64_LINUX \
-o x$VGCONF_PLATFORM_PRI_CAPS = xARM64_FREEBSD \
-o x$VGCONF_PLATFORM_PRI_CAPS = xMIPS64_LINUX \
- -o x$VGCONF_PLATFORM_PRI_CAPS = xS390X_LINUX ; then
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xS390X_LINUX \
+ -o x$VGCONF_PLATFORM_PRI_CAPS = xRISCV64_LINUX ; then
mflag_primary=$FLAG_M64
elif test x$VGCONF_PLATFORM_PRI_CAPS = xX86_DARWIN ; then
mflag_primary="$FLAG_M32 -arch i386"
memcheck/tests/amd64-linux/Makefile
memcheck/tests/arm64-linux/Makefile
memcheck/tests/x86-linux/Makefile
+ memcheck/tests/riscv64-linux/Makefile
memcheck/tests/amd64-solaris/Makefile
memcheck/tests/x86-solaris/Makefile
memcheck/tests/amd64-freebsd/Makefile
none/tests/mips32/Makefile
none/tests/mips64/Makefile
none/tests/nanomips/Makefile
+ none/tests/riscv64/Makefile
none/tests/linux/Makefile
none/tests/darwin/Makefile
none/tests/solaris/Makefile
m_dispatch/dispatch-mips32-linux.S \
m_dispatch/dispatch-mips64-linux.S \
m_dispatch/dispatch-nanomips-linux.S \
+ m_dispatch/dispatch-riscv64-linux.S \
m_dispatch/dispatch-x86-freebsd.S \
m_dispatch/dispatch-amd64-freebsd.S \
m_dispatch/dispatch-arm64-freebsd.S \
m_gdbserver/valgrind-low-mips32.c \
m_gdbserver/valgrind-low-mips64.c \
m_gdbserver/valgrind-low-nanomips.c \
+ m_gdbserver/valgrind-low-riscv64.c \
m_gdbserver/version.c \
m_initimg/initimg-linux.c \
m_initimg/initimg-freebsd.c \
m_sigframe/sigframe-mips32-linux.c \
m_sigframe/sigframe-mips64-linux.c \
m_sigframe/sigframe-nanomips-linux.c \
+ m_sigframe/sigframe-riscv64-linux.c \
m_sigframe/sigframe-x86-darwin.c \
m_sigframe/sigframe-amd64-darwin.c \
m_sigframe/sigframe-solaris.c \
m_syswrap/syscall-mips32-linux.S \
m_syswrap/syscall-mips64-linux.S \
m_syswrap/syscall-nanomips-linux.S \
+ m_syswrap/syscall-riscv64-linux.S \
m_syswrap/syscall-x86-freebsd.S \
m_syswrap/syscall-amd64-freebsd.S \
m_syswrap/syscall-arm64-freebsd.S \
m_syswrap/syswrap-mips32-linux.c \
m_syswrap/syswrap-mips64-linux.c \
m_syswrap/syswrap-nanomips-linux.c \
+ m_syswrap/syswrap-riscv64-linux.c \
m_syswrap/syswrap-x86-darwin.c \
m_syswrap/syswrap-amd64-darwin.c \
m_syswrap/syswrap-xen.c \
m_gdbserver/mips64-linux-valgrind.xml \
m_gdbserver/mips64-fpu-valgrind-s1.xml \
m_gdbserver/mips64-fpu-valgrind-s2.xml \
- m_gdbserver/mips64-fpu.xml
+ m_gdbserver/mips64-fpu.xml \
+ m_gdbserver/riscv64-cpu-valgrind-s1.xml \
+ m_gdbserver/riscv64-cpu-valgrind-s2.xml \
+ m_gdbserver/riscv64-cpu.xml \
+ m_gdbserver/riscv64-linux.xml \
+ m_gdbserver/riscv64-linux-valgrind.xml \
+ m_gdbserver/riscv64-fpu-valgrind-s1.xml \
+ m_gdbserver/riscv64-fpu-valgrind-s2.xml \
+ m_gdbserver/riscv64-fpu.xml
# so as to make sure these get copied into the install tree
vglibdir = $(pkglibexecdir)
#include <string.h>
#include <unistd.h>
+/* Provide own definitions for elf.h constants that might not be yet available
+ on some older systems. */
#ifndef EM_X86_64
-#define EM_X86_64 62 // elf.h doesn't define this on some older systems
+#define EM_X86_64 62
#endif
#ifndef EM_AARCH64
-#define EM_AARCH64 183 // ditto
+#define EM_AARCH64 183
#endif
#ifndef EM_PPC64
-#define EM_PPC64 21 // ditto
+#define EM_PPC64 21
#endif
#ifndef EM_NANOMIPS
#define E_MIPS_ABI2 0x00000020
#endif
+#ifndef EM_RISCV
+#define EM_RISCV 243
+#endif
+
/* Report fatal errors */
__attribute__((noreturn))
static void barf ( const char *format, ... )
(header.ehdr64.e_ident[EI_OSABI] == ELFOSABI_SYSV ||
header.ehdr64.e_ident[EI_OSABI] == ELFOSABI_LINUX)) {
platform = "ppc64le-linux";
+ } else if (header.ehdr64.e_machine == EM_RISCV &&
+ (header.ehdr64.e_ident[EI_OSABI] == ELFOSABI_SYSV ||
+ header.ehdr64.e_ident[EI_OSABI] == ELFOSABI_LINUX)) {
+ platform = "riscv64-linux";
}
} else if (header.c[EI_DATA] == ELFDATA2MSB) {
# if !defined(VGPV_arm_linux_android) \
the executable (eg because it's a shell script). VG_PLATFORM is the
default_platform. Its value is defined in coregrind/Makefile.am and
typically it is the primary build target. Unless the primary build
- target is not built is not built in which case VG_PLATFORM is the
- secondary build target. */
+ target is not built in which case VG_PLATFORM is the secondary build
+ target. */
# if defined(VGO_linux)
if ((0==strcmp(VG_PLATFORM,"x86-linux")) ||
(0==strcmp(VG_PLATFORM,"amd64-linux")) ||
(0==strcmp(VG_PLATFORM,"s390x-linux")) ||
(0==strcmp(VG_PLATFORM,"mips32-linux")) ||
(0==strcmp(VG_PLATFORM,"mips64-linux")) ||
- (0==strcmp(VG_PLATFORM,"nanomips-linux")))
+ (0==strcmp(VG_PLATFORM,"nanomips-linux")) ||
+ (0==strcmp(VG_PLATFORM,"riscv64-linux")))
default_platform = VG_PLATFORM;
# elif defined(VGO_solaris)
if ((0==strcmp(VG_PLATFORM,"x86-solaris")) ||
# elif defined(VGP_amd64_linux) \
|| defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
|| defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
- || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
+ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
+ || defined(VGP_riscv64_linux)
res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
prot, flags, fd, offset);
# elif defined(VGP_x86_darwin)
SysRes ML_(am_open) ( const HChar* pathname, Int flags, Int mode )
{
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
- /* ARM64 wants to use __NR_openat rather than __NR_open. */
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
+ /* More recent Linux platforms have only __NR_openat and no __NR_open. */
SysRes res = VG_(do_syscall4)(__NR_openat,
VKI_AT_FDCWD, (UWord)pathname, flags, mode);
# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
Int ML_(am_readlink)(const HChar* path, HChar* buf, UInt bufsiz)
{
SysRes res;
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
(UWord)path, (UWord)buf, bufsiz);
# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
#elif defined(VGA_arm) || defined(VGA_ppc32) || \
defined(VGA_ppc64be) || defined(VGA_ppc64le) || \
defined(VGA_mips32) || defined(VGA_mips64) || \
- defined(VGA_arm64) || defined(VGA_nanomips)
+ defined(VGA_arm64) || defined(VGA_nanomips) || \
+ defined(VGA_riscv64)
static Bool
get_cache_info(VexArchInfo *vai)
{
prs->pr_sid = VG_(getpgrp)();
#endif
-#if defined(VGP_s390x_linux)
+#if defined(VGP_s390x_linux) || defined(VGP_riscv64_linux)
/* prs->pr_reg has struct type. Need to take address. */
regs = (struct vki_user_regs_struct *)&(prs->pr_reg);
#elif defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
regs[VKI_MIPS32_EF_CP0_STATUS] = arch->vex.guest_CP0_status;
regs[VKI_MIPS32_EF_CP0_EPC] = arch->vex.guest_PC;
# undef DO
+#elif defined(VGP_riscv64_linux)
+ regs->pc = arch->vex.guest_pc;
+ regs->ra = arch->vex.guest_x1;
+ regs->sp = arch->vex.guest_x2;
+ regs->gp = arch->vex.guest_x3;
+ regs->tp = arch->vex.guest_x4;
+ regs->t0 = arch->vex.guest_x5;
+ regs->t1 = arch->vex.guest_x6;
+ regs->t2 = arch->vex.guest_x7;
+ regs->s0 = arch->vex.guest_x8;
+ regs->s1 = arch->vex.guest_x9;
+ regs->a0 = arch->vex.guest_x10;
+ regs->a1 = arch->vex.guest_x11;
+ regs->a2 = arch->vex.guest_x12;
+ regs->a3 = arch->vex.guest_x13;
+ regs->a4 = arch->vex.guest_x14;
+ regs->a5 = arch->vex.guest_x15;
+ regs->a6 = arch->vex.guest_x16;
+ regs->a7 = arch->vex.guest_x17;
+ regs->s2 = arch->vex.guest_x18;
+ regs->s3 = arch->vex.guest_x19;
+ regs->s4 = arch->vex.guest_x20;
+ regs->s5 = arch->vex.guest_x21;
+ regs->s6 = arch->vex.guest_x22;
+ regs->s7 = arch->vex.guest_x23;
+ regs->s8 = arch->vex.guest_x24;
+ regs->s9 = arch->vex.guest_x25;
+ regs->s10 = arch->vex.guest_x26;
+ regs->s11 = arch->vex.guest_x27;
+ regs->t3 = arch->vex.guest_x28;
+ regs->t4 = arch->vex.guest_x29;
+ regs->t5 = arch->vex.guest_x30;
+ regs->t6 = arch->vex.guest_x31;
#elif defined(VGP_amd64_freebsd)
regs->rflags = LibVEX_GuestAMD64_get_rflags( &arch->vex );
regs->rsp = arch->vex.guest_RSP;
# undef DO
#elif defined(VGP_nanomips_linux)
+#elif defined(VGP_riscv64_linux)
+ fpu->d.f[0] = arch->vex.guest_f0;
+ fpu->d.f[1] = arch->vex.guest_f1;
+ fpu->d.f[2] = arch->vex.guest_f2;
+ fpu->d.f[3] = arch->vex.guest_f3;
+ fpu->d.f[4] = arch->vex.guest_f4;
+ fpu->d.f[5] = arch->vex.guest_f5;
+ fpu->d.f[6] = arch->vex.guest_f6;
+ fpu->d.f[7] = arch->vex.guest_f7;
+ fpu->d.f[8] = arch->vex.guest_f8;
+ fpu->d.f[9] = arch->vex.guest_f9;
+ fpu->d.f[10] = arch->vex.guest_f10;
+ fpu->d.f[11] = arch->vex.guest_f11;
+ fpu->d.f[12] = arch->vex.guest_f12;
+ fpu->d.f[13] = arch->vex.guest_f13;
+ fpu->d.f[14] = arch->vex.guest_f14;
+ fpu->d.f[15] = arch->vex.guest_f15;
+ fpu->d.f[16] = arch->vex.guest_f16;
+ fpu->d.f[17] = arch->vex.guest_f17;
+ fpu->d.f[18] = arch->vex.guest_f18;
+ fpu->d.f[19] = arch->vex.guest_f19;
+ fpu->d.f[20] = arch->vex.guest_f20;
+ fpu->d.f[21] = arch->vex.guest_f21;
+ fpu->d.f[22] = arch->vex.guest_f22;
+ fpu->d.f[23] = arch->vex.guest_f23;
+ fpu->d.f[24] = arch->vex.guest_f24;
+ fpu->d.f[25] = arch->vex.guest_f25;
+ fpu->d.f[26] = arch->vex.guest_f26;
+ fpu->d.f[27] = arch->vex.guest_f27;
+ fpu->d.f[28] = arch->vex.guest_f28;
+ fpu->d.f[29] = arch->vex.guest_f29;
+ fpu->d.f[30] = arch->vex.guest_f30;
+ fpu->d.f[31] = arch->vex.guest_f31;
+ fpu->d.fcsr = arch->vex.guest_fcsr;
+
#elif defined(VGP_x86_freebsd)
#elif defined(VGP_amd64_freebsd)
# elif defined(VGP_arm64_linux) || defined(VGP_arm64_freebsd)
if (regno == 31) { *a = regs->sp; return True; }
if (regno == 29) { *a = regs->fp; return True; }
+# elif defined(VGP_riscv64_linux)
+ if (regno == 2) { *a = regs->sp; return True; }
+ if (regno == 8) { *a = regs->fp; return True; }
# else
# error "Unknown platform"
# endif
is_rx_map = seg->hasR && seg->hasX;
is_rw_map = seg->hasR && seg->hasW;
# elif defined(VGA_amd64) || defined(VGA_ppc64be) || defined(VGA_ppc64le) \
- || defined(VGA_arm) || defined(VGA_arm64)
+ || defined(VGA_arm) || defined(VGA_arm64) || defined(VGA_riscv64)
is_rx_map = seg->hasR && seg->hasX && !seg->hasW;
is_rw_map = seg->hasR && seg->hasW && !seg->hasX;
# elif defined(VGP_s390x_linux)
case Creg_IA_SP: return eec->uregs->sp;
case Creg_IA_BP: return eec->uregs->fp;
case Creg_MIPS_RA: return eec->uregs->ra;
-# elif defined(VGA_ppc32) || defined(VGA_ppc64be) \
- || defined(VGA_ppc64le)
# elif defined(VGP_arm64_linux) || defined(VGP_arm64_freebsd)
case Creg_ARM64_SP: return eec->uregs->sp;
case Creg_ARM64_X30: return eec->uregs->x30;
case Creg_ARM64_X29: return eec->uregs->x29;
+# elif defined(VGA_ppc32) || defined(VGA_ppc64be) \
+ || defined(VGA_ppc64le) || defined(VGP_riscv64_linux)
# else
# error "Unsupported arch"
# endif
case CFIC_ARM64_X29REL:
cfa = cfsi_m->cfa_off + uregs->x29;
break;
-
+# elif defined(VGP_riscv64_linux)
+ case CFIC_IA_SPREL:
+ cfa = cfsi_m->cfa_off + uregs->sp;
+ break;
+ case CFIC_IA_BPREL:
+ cfa = cfsi_m->cfa_off + uregs->fp;
+ break;
# else
# error "Unsupported arch"
# endif
return compute_cfa(&uregs,
min_accessible, max_accessible, ce->di, ce->cfsi_m);
}
+#elif defined(VGA_riscv64)
+ { D3UnwindRegs uregs;
+ uregs.pc = ip;
+ uregs.sp = sp;
+ uregs.fp = fp;
+ uregs.ra = 0;
+ return compute_cfa(&uregs,
+ min_accessible, max_accessible, ce->di, ce->cfsi_m);
+ }
# else
return 0; /* indicates failure */
For arm64, the unwound registers are: X29(FP) X30(LR) SP PC.
For s390, the unwound registers are: R11(FP) R14(LR) R15(SP) F0..F7 PC.
+
+ For riscv64, the unwound registers are: X2(SP) X8(FP) PC
*/
Bool VG_(use_CF_info) ( /*MOD*/D3UnwindRegs* uregsHere,
Addr min_accessible,
ipHere = uregsHere->pc;
# elif defined(VGP_arm64_freebsd)
ipHere = uregsHere->pc;
+# elif defined(VGP_riscv64_linux)
+ ipHere = uregsHere->pc;
# else
# error "Unknown arch"
# endif
COMPUTE(uregsPrev.sp, uregsHere->sp, cfsi_m->sp_how, cfsi_m->sp_off);
COMPUTE(uregsPrev.x30, uregsHere->x30, cfsi_m->x30_how, cfsi_m->x30_off);
COMPUTE(uregsPrev.x29, uregsHere->x29, cfsi_m->x29_how, cfsi_m->x29_off);
+# elif defined(VGP_riscv64_linux)
+ /* Compute register values in the caller's frame. Notice that the previous
+ pc is equal to the previous ra and is calculated as such. The previous ra
+ is however set to 0 here as this helps to promptly fail cases where an
+ inner frame uses the CFIR_SAME rule for ra which is bogus. */
+ COMPUTE(uregsPrev.pc, uregsHere->ra, cfsi_m->ra_how, cfsi_m->ra_off);
+ COMPUTE(uregsPrev.sp, uregsHere->sp, cfsi_m->sp_how, cfsi_m->sp_off);
+ COMPUTE(uregsPrev.fp, uregsHere->fp, cfsi_m->fp_how, cfsi_m->fp_off);
+ uregsPrev.ra = 0;
# else
# error "Unknown arch"
# endif
}
DiCfSI_m;
#elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
+typedef
+ struct {
+ UChar cfa_how; /* a CFIC_ value */
+ UChar ra_how; /* a CFIR_ value */
+ UChar sp_how; /* a CFIR_ value */
+ UChar fp_how; /* a CFIR_ value */
+ Int cfa_off;
+ Int ra_off;
+ Int sp_off;
+ Int fp_off;
+ }
+ DiCfSI_m;
+#elif defined(VGA_riscv64)
typedef
struct {
UChar cfa_how; /* a CFIC_ value */
# define FP_REG 30
# define SP_REG 29
# define RA_REG_DEFAULT 31
+#elif defined(VGP_riscv64_linux)
+# define FP_REG 8
+# define SP_REG 2
+# define RA_REG_DEFAULT 1
#else
# error "Unknown platform"
#endif
# define N_CFI_REGS 128
#elif defined(VGP_s390x_linux)
# define N_CFI_REGS 66
+#elif defined(VGP_riscv64_linux)
+# define N_CFI_REGS 128
#else
# define N_CFI_REGS 20
#endif
start out as RR_Same. */
ctx->state[j].reg[29/*FP*/].tag = RR_Same;
ctx->state[j].reg[30/*LR*/].tag = RR_Same;
+# elif defined(VGA_riscv64)
+ /* Registers fp and ra start out implicitly as RR_Same. */
+ ctx->state[j].reg[FP_REG].tag = RR_Same;
+ ctx->state[j].reg[RA_REG_DEFAULT].tag = RR_Same;
# endif
}
}
if (ctxs->cfa_is_regoff && ctxs->cfa_reg == SP_REG) {
si_m->cfa_off = ctxs->cfa_off;
# if defined(VGA_x86) || defined(VGA_amd64) || defined(VGA_s390x) \
- || defined(VGA_mips32) || defined(VGA_nanomips) || defined(VGA_mips64)
+ || defined(VGA_mips32) || defined(VGA_nanomips) \
+ || defined(VGA_mips64) || defined(VGA_riscv64)
si_m->cfa_how = CFIC_IA_SPREL;
# elif defined(VGA_arm)
si_m->cfa_how = CFIC_ARM_R13REL;
if (ctxs->cfa_is_regoff && ctxs->cfa_reg == FP_REG) {
si_m->cfa_off = ctxs->cfa_off;
# if defined(VGA_x86) || defined(VGA_amd64) || defined(VGA_s390x) \
- || defined(VGA_mips32) || defined(VGA_nanomips) || defined(VGA_mips64)
+ || defined(VGA_mips32) || defined(VGA_nanomips) \
+ || defined(VGA_mips64) || defined(VGA_riscv64)
si_m->cfa_how = CFIC_IA_BPREL;
# elif defined(VGA_arm)
si_m->cfa_how = CFIC_ARM_R12REL;
# elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le)
/* These don't use CFI based unwinding (is that really true?) */
+# elif defined(VGA_riscv64)
+
+ /* --- entire tail of this fn specialised for riscv64 --- */
+
+ SUMMARISE_HOW(si_m->ra_how, si_m->ra_off, ctxs->reg[ctx->ra_reg]);
+ SUMMARISE_HOW(si_m->fp_how, si_m->fp_off, ctxs->reg[FP_REG]);
+
+ /* on riscv64, it seems the old sp value before the call is always
+ the same as the CFA. Therefore ... */
+ si_m->sp_how = CFIR_CFAREL;
+ si_m->sp_off = 0;
+
+ /* bogus looking range? Note, we require that the difference is
+ representable in 32 bits. */
+ if (loc_start >= ctx->loc)
+ { why = 4; goto failed; }
+ if (ctx->loc - loc_start > 10000000 /* let's say */)
+ { why = 5; goto failed; }
+
+ *base = loc_start + ctx->initloc;
+ *len = (UInt)(ctx->loc - loc_start);
+
+ return True;
+
# else
# error "Unknown arch"
# endif
if (dwreg == srcuc->ra_reg)
return ML_(CfiExpr_CfiReg)( dstxa, Creg_ARM64_X30 );
# elif defined(VGA_ppc32) || defined(VGA_ppc64be) \
- || defined(VGA_ppc64le)
+ || defined(VGA_ppc64le) || defined(VGA_riscv64)
# else
# error "Unknown arch"
# endif
while (tries > 0) {
SysRes res;
-#if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+#if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
(UWord)path, (UWord)buf, bufsiz);
#elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
|| defined(VGP_arm_linux) || defined (VGP_s390x_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
|| defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux) \
|| defined(VGP_x86_solaris) || defined(VGP_amd64_solaris) \
|| defined(VGP_x86_freebsd) || defined(VGP_amd64_freebsd) \
|| defined(VGP_arm64_freebsd)
SHOW_HOW(si_m->x30_how, si_m->x30_off);
VG_(printf)(" X29=");
SHOW_HOW(si_m->x29_how, si_m->x29_off);
+# elif defined(VGA_riscv64)
+ VG_(printf)(" SP=");
+ SHOW_HOW(si_m->sp_how, si_m->sp_off);
+ VG_(printf)(" FP=");
+ SHOW_HOW(si_m->fp_how, si_m->fp_off);
# else
# error "Unknown arch"
# endif
return a0;
}
+#elif defined(VGP_riscv64_linux)
+
+static UInt local_sys_write_stderr ( const HChar* buf, Int n )
+{
+ register RegWord a0 asm("a0") = 2; /* stderr */
+ register RegWord a1 asm("a1") = (RegWord)buf;
+ register RegWord a2 asm("a2") = n;
+ register RegWord a7 asm("a7") = __NR_write;
+ __asm__ volatile (
+ "ecall\n"
+ : "+r" (a0)
+ : "r" (a1), "r" (a2), "r" (a7)
+ );
+ return a0 >= 0 ? (UInt)a0 : -1;
+}
+
+static UInt local_sys_getpid ( void )
+{
+ register RegWord a0 asm("a0");
+ register RegWord a7 asm("a7") = __NR_getpid;
+ __asm__ volatile (
+ "ecall\n"
+ : "=r" (a0)
+ : "r" (a7)
+ );
+ return (UInt)a0;
+}
+
#elif defined(VGP_x86_solaris)
static UInt local_sys_write_stderr ( const HChar* buf, Int n )
{
mips64_init_architecture(&the_low_target);
#elif defined(VGA_nanomips)
nanomips_init_architecture(&the_low_target);
+#elif defined(VGA_riscv64)
+ riscv64_init_architecture(&the_low_target);
#else
#error "architecture missing in target.c valgrind_initialize_target"
#endif
extern void mips32_init_architecture (struct valgrind_target_ops *target);
extern void mips64_init_architecture (struct valgrind_target_ops *target);
extern void nanomips_init_architecture (struct valgrind_target_ops *target);
+extern void riscv64_init_architecture (struct valgrind_target_ops *target);
#endif
&& !defined(VGP_ppc64le_linux) \
&& !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux) \
&& !defined(VGP_nanomips_linux) \
- && !defined(VGP_s390x_linux)
+ && !defined(VGP_s390x_linux) \
+ && !defined(VGP_riscv64_linux)
case AT_SYSINFO_EHDR: {
/* Trash this, because we don't reproduce it */
+ /* riscv64-linux: Keep the VDSO mapping on this platform present.
+ It contains __vdso_rt_sigreturn() which the kernel sets the ra
+ register to point to on a signal delivery. */
const NSegment* ehdrseg = VG_(am_find_nsegment)((Addr)auxv->u.a_ptr);
vg_assert(ehdrseg);
VG_(am_munmap_valgrind)(ehdrseg->start, ehdrseg->end - ehdrseg->start);
arch->vex.guest_PC = iifii.initial_client_IP;
arch->vex.guest_r31 = iifii.initial_client_SP;
+# elif defined(VGP_riscv64_linux)
+ vg_assert(0 == sizeof(VexGuestRISCV64State) % LibVEX_GUEST_STATE_ALIGN);
+
+ /* Zero out the initial state. */
+ LibVEX_GuestRISCV64_initialise(&arch->vex);
+
+ /* Mark all registers as undefined ... */
+ VG_(memset)(&arch->vex_shadow1, 0xFF, sizeof(VexGuestRISCV64State));
+ VG_(memset)(&arch->vex_shadow2, 0x00, sizeof(VexGuestRISCV64State));
+ /* ... except x2 (sp), pc and fcsr. */
+ arch->vex_shadow1.guest_x2 = 0;
+ arch->vex_shadow1.guest_pc = 0;
+ arch->vex_shadow1.guest_fcsr = 0;
+
+ /* Put essential stuff into the new state. */
+ arch->vex.guest_x2 = iifii.initial_client_SP;
+ arch->vex.guest_pc = iifii.initial_client_IP;
+ /* Initialize fcsr in the same way as done by the Linux kernel:
+ accrued exception flags cleared; round to nearest, ties to even. */
+ arch->vex.guest_fcsr = 0;
+
+ /* Tell the tool about the registers we just wrote. */
+ VG_TRACK(post_reg_write, Vg_CoreStartup, /*tid*/1, VG_O_STACK_PTR, 8);
+ VG_TRACK(post_reg_write, Vg_CoreStartup, /*tid*/1, VG_O_INSTR_PTR, 8);
+ VG_TRACK(post_reg_write, Vg_CoreStartup, /*tid*/1,
+ offsetof(VexGuestRISCV64State, guest_fcsr), 4);
+
+#define PRECISE_GUEST_REG_DEFINEDNESS_AT_STARTUP 1
+
# else
# error Unknown platform
# endif
(srP)->misc.MIPS32.r31 = (UInt)ra; \
(srP)->misc.MIPS32.r28 = (UInt)gp; \
}
+#elif defined(VGP_riscv64_linux)
+# define GET_STARTREGS(srP) \
+ { ULong pc, sp, fp, ra; \
+ __asm__ __volatile__( \
+ "jal %0, 0f;" \
+ "0:\n" \
+ "mv %1, sp;" \
+ "mv %2, fp;" \
+ "mv %3, ra;" \
+ : "=r" (pc), \
+ "=r" (sp), \
+ "=r" (fp), \
+ "=r" (ra) \
+ ); \
+ (srP)->r_pc = pc; \
+ (srP)->r_sp = sp; \
+ (srP)->misc.RISCV64.r_fp = fp; \
+ (srP)->misc.RISCV64.r_ra = ra; \
+ }
#else
# error Unknown platform
#endif
SysRes VG_(mknod) ( const HChar* pathname, Int mode, UWord dev )
{
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
- /* ARM64 wants to use __NR_mknodat rather than __NR_mknod. */
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
+ /* More recent Linux platforms have only __NR_mknodat and no __NR_mknod. */
SysRes res = VG_(do_syscall4)(__NR_mknodat,
VKI_AT_FDCWD, (UWord)pathname, mode, dev);
# elif defined(VGO_linux) || defined(VGO_darwin)
SysRes VG_(open) ( const HChar* pathname, Int flags, Int mode )
{
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
- /* ARM64 wants to use __NR_openat rather than __NR_open. */
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
+ /* More recent Linux platforms have only __NR_openat and no __NR_open. */
SysRes res = VG_(do_syscall4)(__NR_openat,
VKI_AT_FDCWD, (UWord)pathname, flags, mode);
# elif defined(VGO_linux) || defined(VGO_freebsd)
} else {
return -1;
}
-# elif defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+# elif defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
SysRes res = VG_(do_syscall2)(__NR_pipe2, (UWord)fd, 0);
return sr_isError(res) ? -1 : 0;
# elif defined(VGO_linux)
SysRes VG_(dup2) ( Int oldfd, Int newfd )
{
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
/* We only have dup3, that means we have to mimic dup2.
The only real difference is when oldfd == newfd.
dup3 always returns an error, but dup2 returns only an
# if defined(VGO_solaris) || defined(VGP_arm64_linux)
SysRes res = VG_(do_syscall4)(__NR_renameat, VKI_AT_FDCWD, (UWord)old_name,
VKI_AT_FDCWD, (UWord)new_name);
-# elif defined(VGP_nanomips_linux)
+# elif defined(VGP_nanomips_linux) || defined(VGP_riscv64_linux)
SysRes res = VG_(do_syscall5)(__NR_renameat2, VKI_AT_FDCWD, (UWord)old_name,
VKI_AT_FDCWD, (UWord)new_name, 0);
Int VG_(unlink) ( const HChar* file_name )
{
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
SysRes res = VG_(do_syscall2)(__NR_unlinkat, VKI_AT_FDCWD,
(UWord)file_name);
# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
SysRes VG_(poll) (struct vki_pollfd *fds, Int nfds, Int timeout)
{
SysRes res;
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
- /* ARM64 wants to use __NR_ppoll rather than __NR_poll. */
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
+ /* More recent Linux platforms have only __NR_ppoll and no __NR_poll. */
struct vki_timespec timeout_ts;
if (timeout >= 0) {
timeout_ts.tv_sec = timeout / 1000;
{
SysRes res;
/* res = readlink( path, buf, bufsiz ); */
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
(UWord)path, (UWord)buf, bufsiz);
# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
UWord w = (irusr ? VKI_R_OK : 0)
| (iwusr ? VKI_W_OK : 0)
| (ixusr ? VKI_X_OK : 0);
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
SysRes res = VG_(do_syscall3)(__NR_faccessat, VKI_AT_FDCWD, (UWord)path, w);
# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
SysRes res = VG_(do_syscall2)(__NR_access, (UWord)path, w);
return res;
# elif defined(VGP_amd64_linux) || defined(VGP_s390x_linux) \
|| defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
- || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
+ || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
+ || defined(VGP_riscv64_linux)
res = VG_(do_syscall4)(__NR_pread64, fd, (UWord)buf, count, offset);
return res;
# elif defined(VGP_amd64_freebsd) || defined(VGP_arm64_freebsd)
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) || defined(VGO_freebsd)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux) || defined(VGO_freebsd)
SysRes res;
res = VG_(do_syscall3)(__NR_socket, domain, type, protocol );
return sr_isError(res) ? -1 : sr_Res(res);
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) || defined(VGO_freebsd)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux) || defined(VGO_freebsd)
SysRes res;
res = VG_(do_syscall3)(__NR_connect, sockfd, (UWord)serv_addr, addrlen);
return sr_isError(res) ? -1 : sr_Res(res);
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) || defined(VGO_freebsd)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux) || defined(VGO_freebsd)
SysRes res;
res = VG_(do_syscall6)(__NR_sendto, sd, (UWord)msg,
count, VKI_MSG_NOSIGNAL, 0,0);
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
- || defined(VGP_nanomips_linux) || defined(VGO_freebsd) \
- || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
+ || defined(VGP_nanomips_linux) || defined(VGP_riscv64_linux) \
+ || defined(VGO_freebsd)
SysRes res;
res = VG_(do_syscall3)( __NR_getsockname,
(UWord)sd, (UWord)name, (UWord)namelen );
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
- || defined(VGP_nanomips_linux) || defined(VGO_freebsd)
+ || defined(VGP_nanomips_linux) || defined(VGP_riscv64_linux) \
+ || defined(VGO_freebsd)
SysRes res;
res = VG_(do_syscall3)( __NR_getpeername,
(UWord)sd, (UWord)name, (UWord)namelen );
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
|| defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
- || defined(VGO_freebsd)
+ || defined(VGP_riscv64_linux) || defined(VGO_freebsd)
SysRes res;
res = VG_(do_syscall5)( __NR_getsockopt,
(UWord)sd, (UWord)level, (UWord)optname,
# elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
- || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
SysRes res;
res = VG_(do_syscall5)( __NR_setsockopt,
(UWord)sd, (UWord)level, (UWord)optname,
* the /proc/self link is pointing...
*/
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
(UWord)"/proc/self",
(UWord)pid, sizeof(pid));
Int VG_(getpgrp) ( void )
{
/* ASSUMES SYSCALL ALWAYS SUCCEEDS */
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
return sr_Res( VG_(do_syscall1)(__NR_getpgid, 0) );
# elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
return sr_Res( VG_(do_syscall0)(__NR_getpgrp) );
|| defined(VGO_darwin) || defined(VGP_s390x_linux) \
|| defined(VGP_mips32_linux) || defined(VGP_arm64_linux) \
|| defined(VGO_solaris) || defined(VGP_nanomips_linux) \
- || defined(VGO_freebsd)
+ || defined(VGP_riscv64_linux) || defined(VGO_freebsd)
SysRes sres;
sres = VG_(do_syscall2)(__NR_getgroups, size, (Addr)list);
if (sr_isError(sres))
fds[0] = fds[1] = -1;
}
-# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+# if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
SysRes res;
res = VG_(do_syscall5)(__NR_clone, VKI_SIGCHLD,
(UWord)NULL, (UWord)NULL, (UWord)NULL, (UWord)NULL);
(UWord) nbytes, (UWord) 3);
vg_assert( !sr_isError(sres) );
-# elif defined(VGA_nanomips)
-
+# elif defined(VGA_nanomips)
__builtin___clear_cache(ptr, (char*)ptr + nbytes);
+# elif defined(VGP_riscv64_linux)
+ /* Make data stores to the area visible to all RISC-V harts. */
+ __asm__ __volatile__("fence w,r");
+
+ /* Ask the kernel to execute fence.i on all harts to guarantee that an
+ instruction fetch on each hart will see any previous data stores visible
+ to the same hart. */
+ Addr startaddr = (Addr)ptr;
+ Addr endaddr = startaddr + nbytes;
+ SysRes sres = VG_(do_syscall3)(__NR_riscv_flush_icache, startaddr, endaddr,
+ 0 /*flags*/);
+ vg_assert(!sr_isError(sres));
+
# endif
}
= VG_(threads)[tid].arch.vex.guest_r31;
regs->misc.MIPS64.r28
= VG_(threads)[tid].arch.vex.guest_r28;
+# elif defined(VGA_riscv64)
+ regs->r_pc = VG_(threads)[tid].arch.vex.guest_pc;
+ regs->r_sp = VG_(threads)[tid].arch.vex.guest_x2;
+ regs->misc.RISCV64.r_fp = VG_(threads)[tid].arch.vex.guest_x8;
+ regs->misc.RISCV64.r_ra = VG_(threads)[tid].arch.vex.guest_x1;
# else
# error "Unknown arch"
# endif
(*f)(tid, "x28", vex->guest_X28);
(*f)(tid, "x29", vex->guest_X29);
(*f)(tid, "x30", vex->guest_X30);
+#elif defined(VGA_riscv64)
+ (*f)(tid, "x0" , vex->guest_x0 );
+ (*f)(tid, "x1" , vex->guest_x1 );
+ (*f)(tid, "x2" , vex->guest_x2 );
+ (*f)(tid, "x3" , vex->guest_x3 );
+ (*f)(tid, "x4" , vex->guest_x4 );
+ (*f)(tid, "x5" , vex->guest_x5 );
+ (*f)(tid, "x6" , vex->guest_x6 );
+ (*f)(tid, "x7" , vex->guest_x7 );
+ (*f)(tid, "x8" , vex->guest_x8 );
+ (*f)(tid, "x9" , vex->guest_x9 );
+ (*f)(tid, "x10", vex->guest_x10);
+ (*f)(tid, "x11", vex->guest_x11);
+ (*f)(tid, "x12", vex->guest_x12);
+ (*f)(tid, "x13", vex->guest_x13);
+ (*f)(tid, "x14", vex->guest_x14);
+ (*f)(tid, "x15", vex->guest_x15);
+ (*f)(tid, "x16", vex->guest_x16);
+ (*f)(tid, "x17", vex->guest_x17);
+ (*f)(tid, "x18", vex->guest_x18);
+ (*f)(tid, "x19", vex->guest_x19);
+ (*f)(tid, "x20", vex->guest_x20);
+ (*f)(tid, "x21", vex->guest_x21);
+ (*f)(tid, "x22", vex->guest_x22);
+ (*f)(tid, "x23", vex->guest_x23);
+ (*f)(tid, "x24", vex->guest_x24);
+ (*f)(tid, "x25", vex->guest_x25);
+ (*f)(tid, "x26", vex->guest_x26);
+ (*f)(tid, "x27", vex->guest_x27);
+ (*f)(tid, "x28", vex->guest_x28);
+ (*f)(tid, "x29", vex->guest_x29);
+ (*f)(tid, "x30", vex->guest_x30);
+ (*f)(tid, "x31", vex->guest_x31);
#else
# error Unknown arch
#endif
return True;
}
+
+#elif defined(VGA_riscv64)
+ {
+ va = VexArchRISCV64;
+ vai.endness = VexEndnessLE;
+
+ /* Hardware baseline is RV64GC. */
+ vai.hwcaps = 0;
+
+ VG_(debugLog)(1, "machine", "hwcaps = 0x%x\n", vai.hwcaps);
+
+ VG_(machine_get_cache_info)(&vai);
+
+ return True;
+ }
+
#else
# error "Unknown arch"
#endif
# elif defined(VGA_mips64)
return 8;
+# elif defined(VGA_riscv64)
+ /* 64-bit integer and floating-point registers, no vector set. */
+ return 8;
+
# else
# error "Unknown arch"
# endif
|| defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
|| defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
|| defined(VGP_x86_solaris) || defined(VGP_amd64_solaris) \
- || defined(VGP_nanomips_linux)
+ || defined(VGP_nanomips_linux) || defined(VGP_riscv64_linux)
return f;
# elif defined(VGP_ppc64be_linux)
/* ppc64-linux uses the AIX scheme, in which f is a pointer to a
VG_TRACK(post_reg_write, Vg_CoreClientReq, tid,
offsetof(VexGuestPPC64State, guest_GPR3),
sizeof(VG_(threads)[tid].arch.vex.guest_GPR3));
+# elif defined(VGA_riscv64)
+ VG_(threads)[tid].arch.vex.guest_x10 = to_run;
+ VG_TRACK(post_reg_write, Vg_CoreClientReq, tid,
+ offsetof(VexGuestRISCV64State, guest_x10),
+ sizeof(VG_(threads)[tid].arch.vex.guest_x10));
# elif defined(VGA_s390x)
VG_(threads)[tid].arch.vex.guest_r2 = to_run;
VG_TRACK(post_reg_write, Vg_CoreClientReq, tid,
".set pop \n\t"
".previous \n\t"
);
+#elif defined(VGP_riscv64_linux)
+asm("\n"
+ "\t.text\n"
+ "\t.type _start,@function\n"
+ "\t.global _start\n"
+ "_start:\n"
+ /* establish the global pointer in gp */
+ ".option push\n"
+ ".option norelax\n"
+ "\tla gp, __global_pointer$\n"
+ ".option pop\n"
+ /* set up the new stack in t0 */
+ "\tla t0, vgPlain_interim_stack\n"
+ "\tli t1, "VG_STRINGIFY(VG_STACK_GUARD_SZB)"\n"
+ "\tadd t0, t0, t1\n"
+ "\tli t1, "VG_STRINGIFY(VG_DEFAULT_STACK_ACTIVE_SZB)"\n"
+ "\tadd t0, t0, t1\n"
+ "\tli t1, 0xFFFFFF00\n"
+ "\tand t0, t0, t1\n"
+ /* install it, and collect the original one */
+ "\tmv a0, sp\n"
+ "\tmv sp, t0\n"
+ /* call _start_in_C_linux, passing it the startup sp */
+ "\tj _start_in_C_linux\n"
+ "\tunimp\n"
+ ".previous\n"
+);
#else
# error "Unknown platform"
#endif
VgSmc VG_(clo_smc_check) = Vg_SmcAllNonFile;
#elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_ppc64le) \
|| defined(VGA_arm) || defined(VGA_arm64) \
- || defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
+ || defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips) \
+ || defined(VGA_riscv64)
VgSmc VG_(clo_smc_check) = Vg_SmcStack;
#else
# error "Unknown arch"
if (VG_STREQ(soname, VG_U_LD_LINUX_AARCH64_SO_1)) return True;
if (VG_STREQ(soname, VG_U_LD_LINUX_ARMHF_SO_3)) return True;
if (VG_STREQ(soname, VG_U_LD_LINUX_MIPSN8_S0_1)) return True;
+ if (VG_STREQ(soname, VG_U_LD_LINUX_RISCV64_SO_1)) return True;
# elif defined(VGO_freebsd)
if (VG_STREQ(soname, VG_U_LD_ELF_SO_1)) return True;
if (VG_STREQ(soname, VG_U_LD_ELF32_SO_1)) return True;
);
}
+# elif defined(VGP_riscv64_linux)
+ if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+ add_hardwired_spec(
+ "ld-linux-riscv64-lp64d.so.1", "strlen",
+ (Addr)&VG_(riscv64_linux_REDIR_FOR_strlen),
+ complain_about_stripped_glibc_ldso
+ );
+ add_hardwired_spec(
+ "ld-linux-riscv64-lp64d.so.1", "index",
+ (Addr)&VG_(riscv64_linux_REDIR_FOR_index),
+ complain_about_stripped_glibc_ldso
+ );
+ }
+
# elif defined(VGP_x86_solaris)
/* If we're using memcheck, use these intercepts right from
the start, otherwise ld.so makes a lot of noise. */
# if defined(VGA_mips32) || defined(VGA_mips64)
/* no special requirements */
# endif
+
+# if defined(VGA_riscv64)
+ /* no special requirements */
+# endif
}
// NO_VGDB_POLL value ensures vgdb is not polled, while
tst->arch.vex.guest_LLaddr = (RegWord)(-1);
# elif defined(VGP_arm64_linux) || defined(VGP_arm64_freebsd)
tst->arch.vex.guest_LLSC_SIZE = 0;
+# elif defined(VGP_riscv64_linux)
+ tst->arch.vex.guest_LLSC_SIZE = 0;
# endif
if (0) {
#elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
# define VG_CLREQ_ARGS guest_r12
# define VG_CLREQ_RET guest_r11
+#elif defined(VGA_riscv64)
+# define VG_CLREQ_ARGS guest_x14
+# define VG_CLREQ_RET guest_x13
#else
# error Unknown arch
#endif
(srP)->misc.MIPS32.r28 = (uc)->uc_mcontext.sc_regs[28]; \
}
+#elif defined(VGP_riscv64_linux)
+# define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.sc_regs.pc)
+# define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.sc_regs.sp)
+# define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
+ /* Convert the value in uc_mcontext.sc_regs.a0 into a SysRes. */ \
+ VG_(mk_SysRes_riscv64_linux)( (uc)->uc_mcontext.sc_regs.a0 )
+# define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
+ { (srP)->r_pc = (uc)->uc_mcontext.sc_regs.pc; \
+ (srP)->r_sp = (uc)->uc_mcontext.sc_regs.sp; \
+ (srP)->misc.RISCV64.r_fp = (uc)->uc_mcontext.sc_regs.s0; \
+ (srP)->misc.RISCV64.r_ra = (uc)->uc_mcontext.sc_regs.ra; \
+ }
+
#elif defined(VGP_x86_solaris)
# define VG_UCONTEXT_INSTR_PTR(uc) ((Addr)(uc)->uc_mcontext.gregs[VKI_EIP])
# define VG_UCONTEXT_STACK_PTR(uc) ((Addr)(uc)->uc_mcontext.gregs[VKI_UESP])
(srP)->r_sp = (uc)->uc_mcontext.gregs[VKI_REG_RSP]; \
(srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.gregs[VKI_REG_RBP]; \
}
+
#else
# error Unknown platform
#endif
if (skss_handler != VKI_SIG_IGN && skss_handler != VKI_SIG_DFL)
skss_flags |= VKI_SA_SIGINFO;
+# if !defined(VGP_riscv64_linux)
/* use our own restorer */
skss_flags |= VKI_SA_RESTORER;
+# endif
/* Create SKSS entry for this signal. */
if (sig != VKI_SIGKILL && sig != VKI_SIGSTOP)
" li $t4, " #name "\n" \
" syscall[32]\n" \
".previous\n"
+
+#elif defined(VGP_riscv64_linux)
+/* Not used on riscv64. */
+# define _MY_SIGRETURN(name) \
+ ".text\n" \
+ ".globl my_sigreturn\n" \
+ "my_sigreturn:\n" \
+ " unimp\n" \
+ ".previous\n"
+
#elif defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
/* Not used on Solaris. */
# define _MY_SIGRETURN(name) \
ksa.ksa_handler = skss.skss_per_sig[sig].skss_handler;
ksa.sa_flags = skss.skss_per_sig[sig].skss_flags;
-# if !defined(VGP_ppc32_linux) && \
+# if !defined(VGP_ppc32_linux) && !defined(VGP_mips32_linux) && \
+ !defined(VGP_riscv64_linux) && \
!defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
- !defined(VGP_mips32_linux) && !defined(VGO_solaris) && !defined(VGO_freebsd)
+ !defined(VGO_solaris) && !defined(VGO_freebsd)
ksa.sa_restorer = my_sigreturn;
# endif
/* Re above ifdef (also the assertion below), PaulM says:
# endif
vg_assert(ksa_old.sa_flags
== skss_old.skss_per_sig[sig].skss_flags);
-# if !defined(VGP_ppc32_linux) && \
+# if !defined(VGP_ppc32_linux) && !defined(VGP_mips32_linux) && \
+ !defined(VGP_mips64_linux) && !defined(VGP_nanomips_linux) && \
+ !defined(VGP_riscv64_linux) && \
!defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
- !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux) && \
- !defined(VGP_nanomips_linux) && !defined(VGO_solaris) && \
- !defined(VGO_freebsd)
+ !defined(VGO_solaris) && !defined(VGO_freebsd)
vg_assert(ksa_old.sa_restorer == my_sigreturn);
# endif
VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
old_act->sa_flags = scss.scss_per_sig[signo].scss_flags;
old_act->sa_mask = scss.scss_per_sig[signo].scss_mask;
-# if !defined(VGO_darwin) && !defined(VGO_freebsd) && \
- !defined(VGO_solaris)
+# if !defined(VGP_riscv64_linux) && !defined(VGO_darwin) && \
+ !defined(VGO_freebsd) && !defined(VGO_solaris)
old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
# endif
}
scss.scss_per_sig[signo].scss_mask = new_act->sa_mask;
scss.scss_per_sig[signo].scss_restorer = NULL;
-# if !defined(VGO_darwin) && !defined(VGO_freebsd) && \
- !defined(VGO_solaris)
+# if !defined(VGP_riscv64_linux) && !defined(VGO_darwin) && \
+ !defined(VGO_freebsd) && !defined(VGO_solaris)
scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
# endif
sa.ksa_handler = VKI_SIG_DFL;
sa.sa_flags = 0;
-# if !defined(VGO_darwin) && !defined(VGO_freebsd) && \
- !defined(VGO_solaris)
+# if !defined(VGP_riscv64_linux) && !defined(VGO_darwin) && \
+ !defined(VGO_freebsd) && !defined(VGO_solaris)
sa.sa_restorer = 0;
# endif
VG_(sigemptyset)(&sa.sa_mask);
VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
sa->ksa_handler,
(UInt)sa->sa_flags,
-# if !defined(VGO_darwin) && !defined(VGO_freebsd) && \
- !defined(VGO_solaris)
+# if !defined(VGP_riscv64_linux) && !defined(VGO_darwin) && \
+ !defined(VGO_freebsd) && !defined(VGO_solaris)
sa->sa_restorer
# else
(void*)0
sa.ksa_handler = VKI_SIG_DFL;
sa.sa_flags = 0;
-# if !defined(VGO_darwin) && !defined(VGO_freebsd) && \
- !defined(VGO_solaris)
+# if !defined(VGP_riscv64_linux) && !defined(VGO_darwin) && \
+ !defined(VGO_freebsd) && !defined(VGO_solaris)
sa.sa_restorer = 0;
# endif
VG_(sigemptyset)(&sa.sa_mask);
tsa.ksa_handler = (void *)sync_signalhandler;
tsa.sa_flags = VKI_SA_SIGINFO;
-# if !defined(VGO_darwin) && !defined(VGO_freebsd) && \
- !defined(VGO_solaris)
+# if !defined(VGP_riscv64_linux) && !defined(VGO_darwin) && \
+ !defined(VGO_freebsd) && !defined(VGO_solaris)
tsa.sa_restorer = 0;
# endif
VG_(sigfillset)(&tsa.sa_mask);
scss.scss_per_sig[i].scss_mask = sa.sa_mask;
scss.scss_per_sig[i].scss_restorer = NULL;
-# if !defined(VGO_darwin) && !defined(VGO_freebsd) && \
- !defined(VGO_solaris)
+# if !defined(VGP_riscv64_linux) && !defined(VGO_darwin) && \
+ !defined(VGO_freebsd) && !defined(VGO_solaris)
scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
# endif
#endif
+/* ------------------------ riscv64 ------------------------- */
+
+#if defined(VGP_riscv64_linux)
+
+UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
+ /*OUT*/Addr* ips, UInt max_n_ips,
+ /*OUT*/Addr* sps, /*OUT*/Addr* fps,
+ const UnwindStartRegs* startRegs,
+ Addr fp_max_orig )
+{
+ Bool debug = False;
+ Int i;
+ Addr fp_max;
+ UInt n_found = 0;
+ const Int cmrf = VG_(clo_merge_recursive_frames);
+
+ vg_assert(sizeof(Addr) == sizeof(UWord));
+ vg_assert(sizeof(Addr) == sizeof(void*));
+
+ D3UnwindRegs uregs;
+ uregs.pc = startRegs->r_pc;
+ uregs.sp = startRegs->r_sp;
+ uregs.fp = startRegs->misc.RISCV64.r_fp;
+ uregs.ra = startRegs->misc.RISCV64.r_ra;
+ Addr fp_min = uregs.sp - VG_STACK_REDZONE_SZB;
+
+ /* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
+ stopping when the trail goes cold, which we guess to be
+ when FP is not a reasonable stack location. */
+
+ fp_max = fp_max_orig;
+ if (fp_max >= sizeof(Addr))
+ fp_max -= sizeof(Addr);
+
+ if (debug)
+ VG_(printf)("\nmax_n_ips=%u fp_min=0x%lx fp_max_orig=0x%lx, "
+ "fp_max=0x%lx pc=0x%lx sp=0x%lx fp=0x%lx ra=0x%lx\n",
+ max_n_ips, fp_min, fp_max_orig, fp_max,
+ uregs.pc, uregs.sp, uregs.fp, uregs.ra);
+
+ if (sps) sps[0] = uregs.sp;
+ if (fps) fps[0] = uregs.fp;
+ ips[0] = uregs.pc;
+ i = 1;
+
+ /* Loop unwinding the stack, using CFI. */
+ while (True) {
+ if (debug)
+ VG_(printf)("i: %d, pc: 0x%lx, sp: 0x%lx, fp: 0x%lx, ra: 0x%lx\n",
+ i, uregs.pc, uregs.sp, uregs.fp, uregs.ra);
+ if (i >= max_n_ips)
+ break;
+
+ if (VG_(use_CF_info)( &uregs, fp_min, fp_max )) {
+ if (sps) sps[i] = uregs.sp;
+ if (fps) fps[i] = uregs.fp;
+ ips[i++] = uregs.pc - 1;
+ if (debug)
+ VG_(printf)(
+ "USING CFI: pc: 0x%lx, sp: 0x%lx, fp: 0x%lx, ra: 0x%lx\n",
+ uregs.pc, uregs.sp, uregs.fp, uregs.ra);
+ uregs.pc = uregs.pc - 1;
+ RECURSIVE_MERGE(cmrf,ips,i);
+ continue;
+ }
+
+ /* A problem on the first frame? Lets assume it was a bad jump.
+ We will use the link register and the current stack and frame
+ pointers and see if we can use the CFI in the next round. */
+ if (i == 1) {
+ uregs.pc = uregs.ra;
+ uregs.ra = 0;
+
+ if (sps) sps[i] = uregs.sp;
+ if (fps) fps[i] = uregs.fp;
+ ips[i++] = uregs.pc - 1;
+ if (debug)
+ VG_(printf)(
+ "USING bad-jump: pc: 0x%lx, sp: 0x%lx, fp: 0x%lx, ra: 0x%lx\n",
+ uregs.pc, uregs.sp, uregs.fp, uregs.ra);
+ uregs.pc = uregs.pc - 1;
+ RECURSIVE_MERGE(cmrf,ips,i);
+ continue;
+ }
+
+ /* No luck. We have to give up. */
+ break;
+ }
+
+ n_found = i;
+ return n_found;
+}
+
+#endif
+
/*------------------------------------------------------------*/
/*--- ---*/
/*--- END platform-dependent unwinder worker functions ---*/
return res;
}
+SysRes VG_(mk_SysRes_riscv64_linux) ( Long val ) {
+ SysRes res;
+ res._isError = val >= -4095 && val <= -1;
+ if (res._isError) {
+ res._val = (ULong)(-val);
+ } else {
+ res._val = (ULong)val;
+ }
+ return res;
+}
+
/* Generic constructors. */
SysRes VG_(mk_SysRes_Success) ( UWord res ) {
SysRes r;
".previous \n\t"
);
+#elif defined(VGP_riscv64_linux)
+/* Calling convention is: args in a0-a5, sysno in a7, return value in a0.
+ Return value follows the usual convention that -4095 .. -1 (both inclusive)
+ is an error value. All other values are success values.
+
+ Registers a0 to a5 remain unchanged, but syscall_no is in a6 and needs to be
+ moved to a7.
+*/
+extern UWord do_syscall_WRK (
+ UWord a1, UWord a2, UWord a3,
+ UWord a4, UWord a5, UWord a6,
+ UWord syscall_no
+ );
+asm(
+".text\n"
+".globl do_syscall_WRK\n"
+"do_syscall_WRK:\n"
+" mv a7, a6\n"
+" li a6, 0\n"
+" ecall\n"
+" ret\n"
+".previous\n"
+);
+
#elif defined(VGP_x86_solaris)
extern ULong
do_syscall_WRK(a1, a2, a3, a4, a5, a6, sysno, ®_a0);
return VG_(mk_SysRes_nanomips_linux)(reg_a0);
+# elif defined(VGP_riscv64_linux)
+ UWord val = do_syscall_WRK(a1, a2, a3, a4, a5, a6, sysno);
+ return VG_(mk_SysRes_riscv64_linux)(val);
+
# elif defined(VGP_x86_solaris)
UInt val, val2, err = False;
Bool restart;
Int* child_tid, /* a4 - 8 */
Int* parent_tid, /* a5 - 9 */
void* tls_ptr); /* a6 - 10 */
+extern UInt do_syscall_clone_riscv64_linux ( Word (*fn) (void *),
+ void* stack,
+ Int flags,
+ void* arg,
+ Int* child_tid,
+ Int* parent_tid,
+ void* tls_ptr);
#endif // __PRIV_SYSWRAP_LINUX_H
/*--------------------------------------------------------------------*/
|| defined(VGP_ppc32_linux) \
|| defined(VGP_arm_linux) || defined(VGP_s390x_linux) \
|| defined(VGP_arm64_linux) \
- || defined(VGP_nanomips_linux)
+ || defined(VGP_nanomips_linux) || defined(VGP_riscv64_linux)
Int o_arg1;
Int o_arg2;
Int o_arg3;
#endif
#if !defined(VGO_solaris) && !defined(VGP_arm64_linux) && \
- !defined(VGP_nanomips_linux)
+ !defined(VGP_nanomips_linux) && !defined(VGP_riscv64_linux)
static vki_sigset_t fork_saved_mask;
// In Linux, the sys_fork() function varies across architectures, but we
VG_(sigprocmask)(VKI_SIG_SETMASK, &fork_saved_mask, NULL);
}
}
-#endif // !defined(VGO_solaris) && !defined(VGP_arm64_linux)
+#endif
PRE(sys_ftruncate)
{
: "r" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
: "memory" , "$t4", "$a0"
);
+#elif defined(VGP_riscv64_linux)
+ asm volatile (
+ "sw %1, %0\n" /* set tst->status = VgTs_Empty */
+ "li a7, %2\n" /* set a7 = __NR_exit */
+ "ld a0, %3\n" /* set a0 = tst->os_state.exitcode */
+ "ecall\n" /* exit(tst->os_state.exitcode) */
+ : "=m" (tst->status)
+ : "r" (VgTs_Empty), "n" (__NR_exit), "m" (tst->os_state.exitcode)
+ : "a7", "a0"
+ );
#else
# error Unknown platform
#endif
(ML_(start_thread_NORETURN), stack, flags, ctst,
child_tidptr, parent_tidptr, NULL);
res = VG_ (mk_SysRes_nanomips_linux) (ret);
+#elif defined(VGP_riscv64_linux)
+ ULong a0;
+ ctst->arch.vex.guest_x10 = 0;
+ a0 = do_syscall_clone_riscv64_linux
+ (ML_(start_thread_NORETURN), stack, flags, ctst,
+ child_tidptr, parent_tidptr, NULL);
+ res = VG_(mk_SysRes_riscv64_linux)( a0 );
#else
# error Unknown platform
#endif
#elif defined(VGP_mips32_linux) || defined(VGP_nanomips_linux)
ctst->arch.vex.guest_ULR = tlsaddr;
ctst->arch.vex.guest_r27 = tlsaddr;
+#elif defined(VGP_riscv64_linux)
+ ctst->arch.vex.guest_x4 = tlsaddr;
#else
# error Unknown platform
#endif
|| defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
|| defined(VGP_arm_linux) || defined(VGP_mips32_linux) \
|| defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
- || defined(VGP_nanomips_linux)
+ || defined(VGP_nanomips_linux) || defined(VGP_riscv64_linux)
res = VG_(do_syscall5)( __NR_clone, flags,
(UWord)NULL, (UWord)parent_tidptr,
(UWord)NULL, (UWord)child_tidptr );
|| defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
|| defined(VGP_arm_linux) || defined(VGP_mips32_linux) \
|| defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
- || defined(VGP_nanomips_linux)
+ || defined(VGP_nanomips_linux) || defined(VGP_riscv64_linux)
#define ARG_CHILD_TIDPTR ARG5
#define PRA_CHILD_TIDPTR PRA5
#define ARG_TLS ARG4
PRE_MEM_READ( "sigaction(act->sa_handler)", (Addr)&sa->ksa_handler, sizeof(sa->ksa_handler));
PRE_MEM_READ( "sigaction(act->sa_mask)", (Addr)&sa->sa_mask, sizeof(sa->sa_mask));
PRE_MEM_READ( "sigaction(act->sa_flags)", (Addr)&sa->sa_flags, sizeof(sa->sa_flags));
+# if !defined(VGP_riscv64_linux)
+ /* Check the sa_restorer field. More recent Linux platforms completely
+ drop this member. */
if (ML_(safe_to_deref)(sa,sizeof(struct vki_old_sigaction))
&& (sa->sa_flags & VKI_SA_RESTORER))
PRE_MEM_READ( "sigaction(act->sa_restorer)", (Addr)&sa->sa_restorer, sizeof(sa->sa_restorer));
+# endif
}
if (ARG3 != 0) {
PRE_MEM_READ( "rt_sigaction(act->sa_handler)", (Addr)&sa->ksa_handler, sizeof(sa->ksa_handler));
PRE_MEM_READ( "rt_sigaction(act->sa_mask)", (Addr)&sa->sa_mask, sizeof(sa->sa_mask));
PRE_MEM_READ( "rt_sigaction(act->sa_flags)", (Addr)&sa->sa_flags, sizeof(sa->sa_flags));
+# if !defined(VGP_riscv64_linux)
if (ML_(safe_to_deref)(sa,sizeof(vki_sigaction_toK_t))
&& (sa->sa_flags & VKI_SA_RESTORER))
PRE_MEM_READ( "rt_sigaction(act->sa_restorer)", (Addr)&sa->sa_restorer, sizeof(sa->sa_restorer));
+# endif
}
if (ARG3 != 0)
PRE_MEM_WRITE( "rt_sigaction(oldact)", ARG3, sizeof(vki_sigaction_fromK_t));
#endif
#if defined(VGP_amd64_linux) || defined(VGP_s390x_linux) \
- || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
+ || defined(VGP_arm64_linux) || defined(VGP_nanomips_linux) \
+ || defined(VGP_riscv64_linux)
PRE(sys_lookup_dcookie)
{
*flags |= SfMayBlock;
canonical->arg7 = 0;
canonical->arg8 = 0;
+#elif defined(VGP_riscv64_linux)
+ VexGuestRISCV64State* gst = (VexGuestRISCV64State*)gst_vanilla;
+ canonical->sysno = gst->guest_x17; /* a7 */
+ canonical->arg1 = gst->guest_x10; /* a0 */
+ canonical->arg2 = gst->guest_x11; /* a1 */
+ canonical->arg3 = gst->guest_x12; /* a2 */
+ canonical->arg4 = gst->guest_x13; /* a3 */
+ canonical->arg5 = gst->guest_x14; /* a4 */
+ canonical->arg6 = gst->guest_x15; /* a5 */
+ canonical->arg7 = 0;
+ canonical->arg8 = 0;
+
#elif defined(VGP_x86_solaris)
VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
UWord *stack = (UWord *)gst->guest_ESP;
gst->guest_r10 = canonical->arg7;
gst->guest_r11 = canonical->arg8;
+#elif defined(VGP_riscv64_linux)
+ VexGuestRISCV64State* gst = (VexGuestRISCV64State*)gst_vanilla;
+ gst->guest_x17 = canonical->sysno; /* a7 */
+ gst->guest_x10 = canonical->arg1; /* a0 */
+ gst->guest_x11 = canonical->arg2; /* a1 */
+ gst->guest_x12 = canonical->arg3; /* a2 */
+ gst->guest_x13 = canonical->arg4; /* a3 */
+ gst->guest_x14 = canonical->arg5; /* a4 */
+ gst->guest_x15 = canonical->arg6; /* a5 */
+
#elif defined(VGP_x86_solaris)
VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
UWord *stack = (UWord *)gst->guest_ESP;
canonical->sres = VG_(mk_SysRes_s390x_linux)( gst->guest_r2 );
canonical->what = SsComplete;
+# elif defined(VGP_riscv64_linux)
+ VexGuestRISCV64State* gst = (VexGuestRISCV64State*)gst_vanilla;
+ canonical->sres = VG_(mk_SysRes_riscv64_linux)( gst->guest_x10 );
+ canonical->what = SsComplete;
+
# elif defined(VGP_x86_solaris)
VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
UInt carry = 1 & LibVEX_GuestX86_get_eflags(gst);
VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
OFFSET_mips32_r4, sizeof(UWord) );
+# elif defined(VGP_riscv64_linux)
+ VexGuestRISCV64State* gst = (VexGuestRISCV64State*)gst_vanilla;
+ vg_assert(canonical->what == SsComplete);
+ if (sr_isError(canonical->sres)) {
+ /* This isn't exactly right, in that really a Failure with res
+ not in the range 1 .. 4095 is unrepresentable in the
+ Linux-riscv64 scheme. Oh well. */
+ gst->guest_x10 = - (Long)sr_Err(canonical->sres);
+ } else {
+ gst->guest_x10 = sr_Res(canonical->sres);
+ }
+ VG_TRACK( post_reg_write, Vg_CoreSysCall, tid,
+ OFFSET_riscv64_x10, sizeof(UWord) );
+
# elif defined(VGP_x86_solaris)
VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
SysRes sres = canonical->sres;
layout->uu_arg7 = -1; /* impossible value */
layout->uu_arg8 = -1; /* impossible value */
+#elif defined(VGP_riscv64_linux)
+ layout->o_sysno = OFFSET_riscv64_x17; /* a7 */
+ layout->o_arg1 = OFFSET_riscv64_x10; /* a0 */
+ layout->o_arg2 = OFFSET_riscv64_x11; /* a1 */
+ layout->o_arg3 = OFFSET_riscv64_x12; /* a2 */
+ layout->o_arg4 = OFFSET_riscv64_x13; /* a3 */
+ layout->o_arg5 = OFFSET_riscv64_x14; /* a4 */
+ layout->o_arg6 = OFFSET_riscv64_x15; /* a5 */
+ layout->uu_arg7 = -1; /* impossible value */
+ layout->uu_arg8 = -1; /* impossible value */
+
#elif defined(VGP_x86_solaris)
layout->o_sysno = OFFSET_x86_EAX;
/* Syscall parameters are on the stack. */
arch->vex.guest_PC -= 2;
}
}
+
+#elif defined(VGP_riscv64_linux)
+ arch->vex.guest_pc -= 4; // sizeof(ecall)
+
+ /* Make sure our caller is actually sane, and we're really backing
+ back over a syscall.
+
+ ecall == 73 00 00 00
+ */
+ {
+ UChar *p = (UChar *)arch->vex.guest_pc;
+
+ if (p[0] != 0x73 || p[1] != 0x00 || p[2] != 0x00 || p[3] != 0x00)
+ VG_(message)(
+ Vg_DebugMsg,
+ "?! restarting over syscall at %#llx %02x %02x %02x %02x\n",
+ arch->vex.guest_pc, p[0], p[1], p[2], p[3]
+ );
+
+ vg_assert(p[0] == 0x73 && p[1] == 0x00 && p[2] == 0x00 && p[3] == 0x00);
+ }
+
#elif defined(VGP_x86_solaris)
arch->vex.guest_EIP -= 2; // sizeof(int $0x91) or sizeof(syscall)
# undef UD2_1024
# undef UD2_PAGE
+/*---------------- riscv64-linux ----------------*/
+#else
+#if defined(VGP_riscv64_linux)
+
+# define UD2_4 .word 0
+# define UD2_16 UD2_4 ; UD2_4 ; UD2_4 ; UD2_4
+# define UD2_64 UD2_16 ; UD2_16 ; UD2_16 ; UD2_16
+# define UD2_256 UD2_64 ; UD2_64 ; UD2_64 ; UD2_64
+# define UD2_1024 UD2_256 ; UD2_256 ; UD2_256 ; UD2_256
+# define UD2_PAGE UD2_1024 ; UD2_1024 ; UD2_1024 ; UD2_1024
+
+ /* a leading page of unexecutable code */
+ UD2_PAGE
+
+.global VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+
+.global VG_(riscv64_linux_SUBST_FOR_rt_sigreturn)
+.type VG_(riscv64_linux_SUBST_FOR_rt_sigreturn), @function
+VG_(riscv64_linux_SUBST_FOR_rt_sigreturn):
+ .cfi_startproc
+ .cfi_signal_frame
+ li a7, __NR_rt_sigreturn
+ ecall
+ .cfi_endproc
+.size VG_(riscv64_linux_SUBST_FOR_rt_sigreturn), \
+ .-VG_(riscv64_linux_SUBST_FOR_rt_sigreturn)
+
+.global VG_(riscv64_linux_REDIR_FOR_strlen)
+.type VG_(riscv64_linux_REDIR_FOR_strlen), @function
+VG_(riscv64_linux_REDIR_FOR_strlen):
+ mv a1, a0 /* copy the input string pointer to a1 */
+ li a0, 0 /* set the output length to 0 */
+ lbu a2, 0(a1) /* load the first character */
+ beq a2, zero, 2f /* check if the end of string is reached */
+1:
+ addi a0, a0, 1 /* increment the output length by 1 */
+ add a2, a1, a0 /* calculate address of the next character */
+ lbu a2, 0(a2) /* load the next character */
+ bne a2, zero, 1b /* check if the end of string is reached */
+ ret
+2:
+ ret
+.size VG_(riscv64_linux_REDIR_FOR_strlen), .-VG_(riscv64_linux_REDIR_FOR_strlen)
+
+.global VG_(riscv64_linux_REDIR_FOR_index)
+.type VG_(riscv64_linux_REDIR_FOR_index), @function
+VG_(riscv64_linux_REDIR_FOR_index):
+ andi a1, a1, 0xff /* mask the input character value */
+ j 2f /* jump into the test loop */
+1:
+ beq a2, zero, 3f /* check if the end of string is reached */
+ addi a0, a0, 1 /* advance to the next character */
+2:
+ lbu a2, 0(a0) /* load the next character */
+ bne a2, a1, 1b /* check if it matches the looked up character */
+ ret
+3:
+ li a0, 0 /* set the result to "not found" */
+ ret
+.size VG_(riscv64_linux_REDIR_FOR_index), .-VG_(riscv64_linux_REDIR_FOR_index)
+
+.global VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
+ /* and a trailing page of unexecutable code */
+ UD2_PAGE
+
+# undef UD2_4
+# undef UD2_16
+# undef UD2_64
+# undef UD2_256
+# undef UD2_1024
+# undef UD2_PAGE
+
/*---------------- x86-solaris ----------------*/
#else
#if defined(VGP_x86_solaris)
#endif
#endif
#endif
+#endif
/* Let the linker know we don't need an executable stack */
MARK_STACK_NO_EXEC
vex_archinfo.arm64_requires_fallback_LLSC;
# endif
+# if defined(VGP_riscv64_linux)
+ vex_abiinfo.guest__use_fallback_LLSC = True;
+# endif
+
/* Set up closure args. */
closure.tid = tid;
closure.nraddr = nraddr;
typedef
struct {
- ULong r_pc; /* x86:EIP, amd64:RIP, ppc:CIA, arm:R15, mips:pc */
- ULong r_sp; /* x86:ESP, amd64:RSP, ppc:R1, arm:R13, mips:sp */
+ ULong r_pc; /* x86:EIP, amd64:RIP, ppc:CIA, arm:R15, mips:pc,
+ riscv64: pc */
+ ULong r_sp; /* x86:ESP, amd64:RSP, ppc:R1, arm:R13, mips:sp,
+ riscv64: x2 */
union {
struct {
UInt r_ebp;
ULong r31; /* Return address of the last subroutine call */
ULong r28;
} MIPS64;
+ struct {
+ ULong r_fp; /* x8 */
+ ULong r_ra; /* x1 */
+ } RISCV64;
} misc;
}
UnwindStartRegs;
Addr f4; Addr f5; Addr f6; Addr f7; }
D3UnwindRegs;
#elif defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)
+typedef
+ struct { Addr pc; Addr sp; Addr fp; Addr ra; }
+ D3UnwindRegs;
+#elif defined(VGA_riscv64)
typedef
struct { Addr pc; Addr sp; Addr fp; Addr ra; }
D3UnwindRegs;
# define VG_ELF_MACHINE EM_NANOMIPS
# define VG_ELF_CLASS ELFCLASS32
# undef VG_PLAT_USES_PPCTOC
+#elif defined(VGP_riscv64_linux)
+# define VG_ELF_DATA2XXX ELFDATA2LSB
+# define VG_ELF_MACHINE EM_RISCV
+# define VG_ELF_CLASS ELFCLASS64
+# undef VG_PLAT_USES_PPCTOC
#else
# error Unknown platform
#endif
# define VG_INSTR_PTR guest_PC
# define VG_STACK_PTR guest_r29
# define VG_FRAME_PTR guest_r30
+#elif defined(VGA_riscv64)
+# define VG_INSTR_PTR guest_pc
+# define VG_STACK_PTR guest_x2
+# define VG_FRAME_PTR guest_x8
#else
# error Unknown arch
#endif
defined(VGP_x86_darwin) || \
defined(VGP_amd64_darwin) || \
defined(VGP_arm64_linux) || \
+ defined(VGP_riscv64_linux) || \
defined(VGP_amd64_solaris)
# define VG_MIN_MALLOC_SZB 16
#else
extern SysRes VG_(mk_SysRes_mips64_linux)( ULong v0, ULong v1,
ULong a3 );
extern SysRes VG_(mk_SysRes_nanomips_linux)( UWord a0);
+extern SysRes VG_(mk_SysRes_riscv64_linux) ( Long a0 );
extern SysRes VG_(mk_SysRes_x86_solaris) ( Bool isErr, UInt val, UInt val2 );
extern SysRes VG_(mk_SysRes_amd64_solaris) ( Bool isErr, ULong val, ULong val2 );
extern SysRes VG_(mk_SysRes_Error) ( UWord val );
extern UInt VG_(nanomips_linux_REDIR_FOR_strlen)( void* );
#endif
+#if defined(VGP_riscv64_linux)
+extern Addr VG_(riscv64_linux_SUBST_FOR_rt_sigreturn);
+extern HChar* VG_(riscv64_linux_REDIR_FOR_index)( const HChar*, Int );
+extern SizeT VG_(riscv64_linux_REDIR_FOR_strlen)( const HChar* );
+#endif
+
#if defined(VGP_x86_solaris)
extern SizeT VG_(x86_solaris_REDIR_FOR_strcmp)(const HChar *, const HChar *);
extern SizeT VG_(x86_solaris_REDIR_FOR_strlen)(const HChar *);
return merged & VG_TT_FAST_MASK;
}
-#elif defined(VGA_s390x) || defined(VGA_arm) || defined(VGA_nanomips)
+#elif defined(VGA_s390x) || defined(VGA_arm) || defined(VGA_nanomips) \
+ || defined(VGA_riscv64)
static inline UWord VG_TT_FAST_HASH ( Addr guest ) {
// Instructions are 2-byte aligned.
UWord merged = ((UWord)guest) >> 1;
sets to ever be used. So instead the function is
(address ^ (address >>u VG_TT_FAST_BITS))[VG_TT_FAST_BITS-1+2 : 0+2]'.
- On arm32, the minimum instruction size is 2, so we discard only the least
- significant bit of the address, hence:
- (address ^ (address >>u VG_TT_FAST_BITS))[VG_TT_FAST_BITS-1+1 : 0+1]'.
-
- On s390x the rightmost bit of an instruction address is zero, so the arm32
- scheme is used. */
+ On arm32/s390x/riscv64, the minimum instruction size is 2, so we discard only
+ the least significant bit of the address, hence:
+ (address ^ (address >>u VG_TT_FAST_BITS))[VG_TT_FAST_BITS-1+1 : 0+1]'. */
#define VG_TT_FAST_BITS 13
#define VG_TT_FAST_SETS (1 << VG_TT_FAST_BITS)
#if defined(VGA_amd64) || defined(VGA_arm64) \
|| defined(VGA_ppc64be) || defined(VGA_ppc64le) \
|| (defined(VGA_mips64) && defined(VGABI_64)) \
- || defined(VGA_s390x)
+ || defined(VGA_s390x) || defined(VGA_riscv64)
// And all other 64-bit hosts
# define VG_FAST_CACHE_SET_BITS 6
// These FCS_{g,h}{0,1,2,3} are the values of
#include <sys/user.h>
#include <sys/wait.h>
+#if defined(VGA_riscv64)
+/* Glibc on riscv64 does not provide a definition of user or user_regs_struct
+ in sys/user.h. Instead the definition of user_regs_struct is provided by the
+ kernel in asm/ptrace.h. Pull it and then define the expected user
+ structure. */
+#include <asm/ptrace.h>
+struct user {
+ struct user_regs_struct regs;
+};
+#endif
+
#ifdef PTRACE_GETREGSET
// TBD: better have a configure test instead ?
#define HAVE_PTRACE_GETREGSET
// So, better do not use PTRACE_GET/SETREGSET
// Rather we use PTRACE_GETREGS or PTRACE_PEEKUSER.
-// The only platform on which we must use PTRACE_GETREGSET is arm64.
+// The only platform on which we must use PTRACE_GETREGSET is here.
// The resulting vgdb cannot work in a bi-arch setup.
// -1 means we will check that PTRACE_GETREGSET works.
-# if defined(VGA_arm64)
+# if defined(VGA_arm64) || defined(VGA_riscv64)
#define USE_PTRACE_GETREGSET
# endif
#endif
sp = p[29];
#elif defined(VGA_mips64)
sp = user_mod.regs[29];
+#elif defined(VGA_riscv64)
+ sp = user_mod.regs.sp;
#else
I_die_here : (sp) architecture missing in vgdb-invoker-ptrace.c
#endif
#elif defined(VGA_mips64)
assert(0); // cannot vgdb a 32 bits executable with a 64 bits exe
+
+#elif defined(VGA_riscv64)
+ assert(0);
+
#else
I_die_here : architecture missing in vgdb-invoker-ptrace.c
#endif
user_mod.regs[31] = bad_return;
user_mod.regs[34] = shared64->invoke_gdbserver;
user_mod.regs[25] = shared64->invoke_gdbserver;
+#elif defined(VGA_riscv64)
+ user_mod.regs.a0 = check;
+ user_mod.regs.ra = bad_return;
+ user_mod.regs.pc = shared64->invoke_gdbserver;
#else
I_die_here: architecture missing in vgdb-invoker-ptrace.c
#endif
internals/porting-to-ARM.txt \
internals/qemu-aarch64-linux-HOWTO.txt \
internals/qemu-mips64-linux-HOWTO.txt \
+ internals/qemu-riscv64-linux-HOWTO.txt \
internals/register-uses.txt \
internals/s390-opcodes.csv \
internals/release-HOWTO.txt \
</literallayout>
</chapter>
+ <chapter id="dist.readme-riscv64"
+ xreflabel="Readme RISCV64">
+ <title>README.riscv64</title>
+ <literallayout>
+ <xi:include href="../../README.riscv64"
+ parse="text"
+ xmlns:xi="http://www.w3.org/2001/XInclude" />
+ </literallayout>
+ </chapter>
+
<chapter id="dist.readme-solaris"
xreflabel="Readme Solaris">
<title>README.solaris</title>
#define BITS_PER_BITS_PER_UWORD 5
#elif defined(VGA_amd64) || defined(VGA_ppc64be) || defined(VGA_ppc64le) \
|| defined(VGA_s390x) || (defined(VGA_mips64) && !defined(VGABI_N32)) \
- || defined(VGA_arm64)
+ || defined(VGA_arm64) || defined(VGA_riscv64)
#define BITS_PER_BITS_PER_UWORD 6
#else
#error Unknown platform.
#define STACK_POINTER_OFFSET OFFSET_mips32_r29
#elif defined(VGA_mips64)
#define STACK_POINTER_OFFSET OFFSET_mips64_r29
+#elif defined(VGA_riscv64)
+#define STACK_POINTER_OFFSET OFFSET_riscv64_x2
#else
#error Unknown architecture.
#endif
vki/vki-posixtypes-mips32-linux.h \
vki/vki-posixtypes-mips64-linux.h \
vki/vki-posixtypes-nanomips-linux.h \
+ vki/vki-posixtypes-riscv64-linux.h \
vki/vki-amd64-linux.h \
vki/vki-arm64-linux.h \
vki/vki-ppc32-linux.h \
vki/vki-mips32-linux.h \
vki/vki-mips64-linux.h \
vki/vki-nanomips-linux.h \
+ vki/vki-riscv64-linux.h \
vki/vki-scnums-amd64-linux.h \
vki/vki-scnums-arm64-linux.h \
vki/vki-scnums-ppc32-linux.h \
vki/vki-scnums-mips32-linux.h \
vki/vki-scnums-mips64-linux.h \
vki/vki-scnums-nanomips-linux.h \
+ vki/vki-scnums-riscv64-linux.h \
vki/vki-scnums-darwin.h \
vki/vki-scnums-solaris.h \
vki/vki-scnums-shared-linux.h \
#if defined(VGA_x86) || defined(VGA_amd64) || defined (VGA_arm) \
|| ((defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)) \
- && defined (_MIPSEL)) || defined(VGA_arm64) || defined(VGA_ppc64le)
+ && defined (_MIPSEL)) || defined(VGA_arm64) || defined(VGA_ppc64le) \
+ || defined(VGA_riscv64)
# define VG_LITTLEENDIAN 1
#elif defined(VGA_ppc32) || defined(VGA_ppc64be) || defined(VGA_s390x) \
|| ((defined(VGA_mips32) || defined(VGA_mips64) || defined(VGA_nanomips)) \
|| defined(VGA_ppc64be) || defined(VGA_ppc64le) \
|| defined(VGA_arm) || defined(VGA_s390x) \
|| defined(VGA_mips32) || defined(VGA_mips64) \
- || defined(VGA_arm64) || defined(VGA_nanomips)
+ || defined(VGA_arm64) || defined(VGA_nanomips) \
+ || defined(VGA_riscv64)
# define VG_REGPARM(n) /* */
#else
# error Unknown arch
#elif defined(VGA_mips64)
# include "libvex_guest_mips64.h"
typedef VexGuestMIPS64State VexGuestArchState;
+#elif defined(VGA_riscv64)
+# include "libvex_guest_riscv64.h"
+ typedef VexGuestRISCV64State VexGuestArchState;
#else
# error Unknown arch
#endif
# define VG_CLREQ_SZB 20
# define VG_STACK_REDZONE_SZB 0
+#elif defined(VGP_riscv64_linux)
+# define VG_MIN_INSTR_SZB 2
+# define VG_MAX_INSTR_SZB 4
+# define VG_CLREQ_SZB 20
+# define VG_STACK_REDZONE_SZB 0
+
#else
# error Unknown platform
#endif
#define VG_U_LD_LINUX_MIPSN8_S0_1 "ld-linux-mipsn8.so.1"
+#define VG_U_LD_LINUX_RISCV64_SO_1 "ld-linux-riscv64-lp64d.so.1"
+
#endif
/* --- Sonames for FreeBSD ELF linkers, plus unencoded versions. --- */
#elif defined(VGP_mips64_linux)
# include "vki/vki-scnums-mips64-linux.h"
+#elif defined(VGP_riscv64_linux)
+# include "vki/vki-scnums-shared-linux.h"
+# include "vki/vki-scnums-riscv64-linux.h"
+
#elif defined(VGP_x86_freebsd) || defined(VGP_amd64_freebsd) || defined(VGP_arm64_freebsd)
# include "vki/vki-scnums-freebsd.h"
#undef PLAT_mips32_linux
#undef PLAT_mips64_linux
#undef PLAT_nanomips_linux
+#undef PLAT_riscv64_linux
#undef PLAT_x86_solaris
#undef PLAT_amd64_solaris
# define PLAT_mips32_linux 1
#elif defined(__linux__) && defined(__nanomips__)
# define PLAT_nanomips_linux 1
+#elif defined(__linux__) && defined(__riscv) && (__riscv_xlen == 64)
+# define PLAT_riscv64_linux 1
#elif defined(__sun) && defined(__i386__)
# define PLAT_x86_solaris 1
#elif defined(__sun) && defined(__x86_64__)
} while (0)
#endif
+
+/* ----------------------- riscv64-linux ------------------------ */
+
+#if defined(PLAT_riscv64_linux)
+
+typedef
+ struct {
+ unsigned long int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ ".option push\n\t" \
+ ".option norvc\n\t" \
+ "srli zero, zero, 3\n\t" \
+ "srli zero, zero, 13\n\t" \
+ "srli zero, zero, 51\n\t" \
+ "srli zero, zero, 61\n\t"
+
+#define __SPECIAL_INSTRUCTION_POSTAMBLE \
+ ".option pop\n\t" \
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ \
+ __extension__ \
+ ({volatile unsigned long int _zzq_args[6]; \
+ volatile unsigned long int _zzq_result; \
+ _zzq_args[0] = (unsigned long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long int)(_zzq_arg5); \
+ __asm__ volatile("mv a3, %1\n\t" /*default*/ \
+ "mv a4, %2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* a3 = client_request ( a4 ) */ \
+ "or a0, a0, a0\n\t" \
+ __SPECIAL_INSTRUCTION_POSTAMBLE \
+ "mv %0, a3" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" ((unsigned long int)(_zzq_default)), \
+ "r" (&_zzq_args[0]) \
+ : "memory", "a3", "a4"); \
+ _zzq_result; \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ unsigned long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* a3 = guest_NRADDR */ \
+ "or a1, a1, a1\n\t" \
+ __SPECIAL_INSTRUCTION_POSTAMBLE \
+ "mv %0, a3" \
+ : "=r" (__addr) \
+ : \
+ : "memory", "a3" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* branch-and-link-to-noredir t0 */ \
+ "or a2, a2, a2\n\t" \
+ __SPECIAL_INSTRUCTION_POSTAMBLE
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or a3, a3, a3\n\t" \
+ __SPECIAL_INSTRUCTION_POSTAMBLE \
+ : : : "memory" \
+ ); \
+ } while (0)
+
+#endif /* PLAT_riscv64_linux */
+
/* Insert assembly code for other platforms here... */
#endif /* NVALGRIND */
#endif /* PLAT_mips64_linux */
+/* ----------------------- riscv64-linux ----------------------- */
+
+#if defined(PLAT_riscv64_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "ra", \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", \
+ "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", \
+ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", \
+ "ft8", "ft9", "ft10", "ft11", \
+ "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", "fa6", "fa7"
+
+/* s11 is callee-saved, so we can use it to save and restore sp around
+ the hidden call. */
+#define VALGRIND_ALIGN_STACK \
+ "mv s11, sp\n\t" \
+ "andi sp, sp, 0xfffffffffffffff0\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "mv sp, s11\n\t"
+
+/* These CALL_FN_ macros assume that on riscv64-linux,
+ sizeof(unsigned long) == 8. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ld a0, 8(%1) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld a2, 24(%1) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld a2, 24(%1) \n\t" \
+ "ld a3, 32(%1) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld a2, 24(%1) \n\t" \
+ "ld a3, 32(%1) \n\t" \
+ "ld a4, 40(%1) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld a2, 24(%1) \n\t" \
+ "ld a3, 32(%1) \n\t" \
+ "ld a4, 40(%1) \n\t" \
+ "ld a5, 48(%1) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld a2, 24(%1) \n\t" \
+ "ld a3, 32(%1) \n\t" \
+ "ld a4, 40(%1) \n\t" \
+ "ld a5, 48(%1) \n\t" \
+ "ld a6, 56(%1) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld a2, 24(%1) \n\t" \
+ "ld a3, 32(%1) \n\t" \
+ "ld a4, 40(%1) \n\t" \
+ "ld a5, 48(%1) \n\t" \
+ "ld a6, 56(%1) \n\t" \
+ "ld a7, 64(%1) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "addi sp, sp, -16 \n\t" \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld a2, 24(%1) \n\t" \
+ "ld a3, 32(%1) \n\t" \
+ "ld a4, 40(%1) \n\t" \
+ "ld a5, 48(%1) \n\t" \
+ "ld a6, 56(%1) \n\t" \
+ "ld a7, 64(%1) \n\t" \
+ "ld t0, 72(%1) \n\t" \
+ "sd t0, 0(sp) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "addi sp, sp, -16 \n\t" \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld a2, 24(%1) \n\t" \
+ "ld a3, 32(%1) \n\t" \
+ "ld a4, 40(%1) \n\t" \
+ "ld a5, 48(%1) \n\t" \
+ "ld a6, 56(%1) \n\t" \
+ "ld a7, 64(%1) \n\t" \
+ "ld t0, 72(%1) \n\t" \
+ "sd t0, 0(sp) \n\t" \
+ "ld t0, 80(%1) \n\t" \
+ "sd t0, 8(sp) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "addi sp, sp, -32 \n\t" \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld a2, 24(%1) \n\t" \
+ "ld a3, 32(%1) \n\t" \
+ "ld a4, 40(%1) \n\t" \
+ "ld a5, 48(%1) \n\t" \
+ "ld a6, 56(%1) \n\t" \
+ "ld a7, 64(%1) \n\t" \
+ "ld t0, 72(%1) \n\t" \
+ "sd t0, 0(sp) \n\t" \
+ "ld t0, 80(%1) \n\t" \
+ "sd t0, 8(sp) \n\t" \
+ "ld t0, 88(%1) \n\t" \
+ "sd t0, 16(sp) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11, \
+ arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "addi sp, sp, -32 \n\t" \
+ "ld a0, 8(%1) \n\t" \
+ "ld a1, 16(%1) \n\t" \
+ "ld a2, 24(%1) \n\t" \
+ "ld a3, 32(%1) \n\t" \
+ "ld a4, 40(%1) \n\t" \
+ "ld a5, 48(%1) \n\t" \
+ "ld a6, 56(%1) \n\t" \
+ "ld a7, 64(%1) \n\t" \
+ "ld t0, 72(%1) \n\t" \
+ "sd t0, 0(sp) \n\t" \
+ "ld t0, 80(%1) \n\t" \
+ "sd t0, 8(sp) \n\t" \
+ "ld t0, 88(%1) \n\t" \
+ "sd t0, 16(sp) \n\t" \
+ "ld t0, 96(%1) \n\t" \
+ "sd t0, 24(sp) \n\t" \
+ "ld t0, 0(%1) \n\t" /* target->t0 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_T0 \
+ VALGRIND_RESTORE_STACK \
+ "mv %0, a0" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS, "s11" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_riscv64_linux */
+
/* ------------------------------------------------------------------ */
/* ARCHITECTURE INDEPENDENT MACROS for CLIENT REQUESTS. */
/* */
#undef PLAT_mips32_linux
#undef PLAT_mips64_linux
#undef PLAT_nanomips_linux
+#undef PLAT_riscv64_linux
#undef PLAT_x86_solaris
#undef PLAT_amd64_solaris
# include "vki-posixtypes-mips64-linux.h"
#elif defined(VGA_nanomips)
# include "vki-posixtypes-nanomips-linux.h"
+#elif defined(VGA_riscv64)
+# include "vki-posixtypes-riscv64-linux.h"
#else
# error Unknown platform
#endif
# include "vki-mips64-linux.h"
#elif defined(VGA_nanomips)
# include "vki-nanomips-linux.h"
+#elif defined(VGA_riscv64)
+# include "vki-riscv64-linux.h"
#else
# error Unknown platform
#endif
# undef GOF
# undef SZB
+ /* ------------------- riscv64 ------------------- */
+
+# elif defined(VGA_riscv64)
+
+# define GOF(_fieldname) \
+ (offsetof(VexGuestRISCV64State,guest_##_fieldname))
+# define SZB(_fieldname) \
+ (sizeof(((VexGuestRISCV64State*)0)->guest_##_fieldname))
+
+ Int o = offset;
+ Int sz = szB;
+ Bool is48 = sz == 8 || sz == 4;
+
+ tl_assert(sz > 0);
+ tl_assert(host_is_little_endian());
+
+ if (o == GOF(x0) && is48) return -1;
+ if (o == GOF(x1) && is48) return o;
+ if (o == GOF(x2) && is48) return o;
+ if (o == GOF(x3) && is48) return o;
+ if (o == GOF(x4) && is48) return o;
+ if (o == GOF(x5) && is48) return o;
+ if (o == GOF(x6) && is48) return o;
+ if (o == GOF(x7) && is48) return o;
+ if (o == GOF(x8) && is48) return o;
+ if (o == GOF(x9) && is48) return o;
+ if (o == GOF(x10) && is48) return o;
+ if (o == GOF(x11) && is48) return o;
+ if (o == GOF(x12) && is48) return o;
+ if (o == GOF(x13) && is48) return o;
+ if (o == GOF(x14) && is48) return o;
+ if (o == GOF(x15) && is48) return o;
+ if (o == GOF(x16) && is48) return o;
+ if (o == GOF(x17) && is48) return o;
+ if (o == GOF(x18) && is48) return o;
+ if (o == GOF(x19) && is48) return o;
+ if (o == GOF(x20) && is48) return o;
+ if (o == GOF(x21) && is48) return o;
+ if (o == GOF(x22) && is48) return o;
+ if (o == GOF(x23) && is48) return o;
+ if (o == GOF(x24) && is48) return o;
+ if (o == GOF(x25) && is48) return o;
+ if (o == GOF(x26) && is48) return o;
+ if (o == GOF(x27) && is48) return o;
+ if (o == GOF(x28) && is48) return o;
+ if (o == GOF(x29) && is48) return o;
+ if (o == GOF(x30) && is48) return o;
+ if (o == GOF(x31) && is48) return o;
+ if (o == GOF(pc) && sz == 8) return -1;
+
+ if (o >= GOF(f0) && o+sz <= GOF(f0) +SZB(f0)) return GOF(f0);
+ if (o >= GOF(f1) && o+sz <= GOF(f1) +SZB(f1)) return GOF(f1);
+ if (o >= GOF(f2) && o+sz <= GOF(f2) +SZB(f2)) return GOF(f2);
+ if (o >= GOF(f3) && o+sz <= GOF(f3) +SZB(f3)) return GOF(f3);
+ if (o >= GOF(f4) && o+sz <= GOF(f4) +SZB(f4)) return GOF(f4);
+ if (o >= GOF(f5) && o+sz <= GOF(f5) +SZB(f5)) return GOF(f5);
+ if (o >= GOF(f6) && o+sz <= GOF(f6) +SZB(f6)) return GOF(f6);
+ if (o >= GOF(f7) && o+sz <= GOF(f7) +SZB(f7)) return GOF(f7);
+ if (o >= GOF(f8) && o+sz <= GOF(f8) +SZB(f8)) return GOF(f8);
+ if (o >= GOF(f9) && o+sz <= GOF(f9) +SZB(f9)) return GOF(f9);
+ if (o >= GOF(f10) && o+sz <= GOF(f10)+SZB(f10)) return GOF(f10);
+ if (o >= GOF(f11) && o+sz <= GOF(f11)+SZB(f11)) return GOF(f11);
+ if (o >= GOF(f12) && o+sz <= GOF(f12)+SZB(f12)) return GOF(f12);
+ if (o >= GOF(f13) && o+sz <= GOF(f13)+SZB(f13)) return GOF(f13);
+ if (o >= GOF(f14) && o+sz <= GOF(f14)+SZB(f14)) return GOF(f14);
+ if (o >= GOF(f15) && o+sz <= GOF(f15)+SZB(f15)) return GOF(f15);
+ if (o >= GOF(f16) && o+sz <= GOF(f16)+SZB(f16)) return GOF(f16);
+ if (o >= GOF(f17) && o+sz <= GOF(f17)+SZB(f17)) return GOF(f17);
+ if (o >= GOF(f18) && o+sz <= GOF(f18)+SZB(f18)) return GOF(f18);
+ if (o >= GOF(f19) && o+sz <= GOF(f19)+SZB(f19)) return GOF(f19);
+ if (o >= GOF(f20) && o+sz <= GOF(f20)+SZB(f20)) return GOF(f20);
+ if (o >= GOF(f21) && o+sz <= GOF(f21)+SZB(f21)) return GOF(f21);
+ if (o >= GOF(f22) && o+sz <= GOF(f22)+SZB(f22)) return GOF(f22);
+ if (o >= GOF(f23) && o+sz <= GOF(f23)+SZB(f23)) return GOF(f23);
+ if (o >= GOF(f24) && o+sz <= GOF(f24)+SZB(f24)) return GOF(f24);
+ if (o >= GOF(f25) && o+sz <= GOF(f25)+SZB(f25)) return GOF(f25);
+ if (o >= GOF(f26) && o+sz <= GOF(f26)+SZB(f26)) return GOF(f26);
+ if (o >= GOF(f27) && o+sz <= GOF(f27)+SZB(f27)) return GOF(f27);
+ if (o >= GOF(f28) && o+sz <= GOF(f28)+SZB(f28)) return GOF(f28);
+ if (o >= GOF(f29) && o+sz <= GOF(f29)+SZB(f29)) return GOF(f29);
+ if (o >= GOF(f30) && o+sz <= GOF(f30)+SZB(f30)) return GOF(f30);
+ if (o >= GOF(f31) && o+sz <= GOF(f31)+SZB(f31)) return GOF(f31);
+ if (o == GOF(fcsr) && sz == 4) return o;
+
+ if (o == GOF(EMNOTE) && sz == 4) return -1;
+ if (o == GOF(CMSTART) && sz == 8) return -1;
+ if (o == GOF(CMLEN) && sz == 8) return -1;
+ if (o == GOF(NRADDR) && sz == 4) return -1;
+
+ if (o == GOF(LLSC_SIZE) && sz == 8) return -1;
+ if (o == GOF(LLSC_ADDR) && sz == 8) return o;
+ if (o == GOF(LLSC_DATA) && sz == 8) return o;
+
+ VG_(printf)("MC_(get_otrack_shadow_offset)(riscv64)(off=%d,sz=%d)\n",
+ offset,szB);
+ tl_assert(0);
+# undef GOF
+
# else
# error "FIXME: not implemented for this architecture"
# endif
VG_(printf)("\n");
tl_assert(0);
+ /* ------------------- riscv64 ------------------- */
+# elif defined(VGA_riscv64)
+ VG_(printf)("get_reg_array_equiv_int_type(riscv64): unhandled: ");
+ ppIRRegArray(arr);
+ VG_(printf)("\n");
+ tl_assert(0);
+
# else
# error "FIXME: not implemented for this architecture"
# endif