#include "../pub/libvex_guest_s390x.h"
#include "../pub/libvex_guest_mips32.h"
#include "../pub/libvex_guest_mips64.h"
+#include "../pub/libvex_guest_riscv64.h"
#define VG_STRINGIFZ(__str) #__str
#define VG_STRINGIFY(__str) VG_STRINGIFZ(__str)
GENOFFSET(MIPS64,mips64,PC);
GENOFFSET(MIPS64,mips64,HI);
GENOFFSET(MIPS64,mips64,LO);
+
+ // riscv64
+ GENOFFSET(RISCV64,riscv64,x0);
+ GENOFFSET(RISCV64,riscv64,x1);
+ GENOFFSET(RISCV64,riscv64,x2);
+ GENOFFSET(RISCV64,riscv64,x3);
+ GENOFFSET(RISCV64,riscv64,x4);
+ GENOFFSET(RISCV64,riscv64,x5);
+ GENOFFSET(RISCV64,riscv64,x6);
+ GENOFFSET(RISCV64,riscv64,x7);
+ GENOFFSET(RISCV64,riscv64,x8);
+ GENOFFSET(RISCV64,riscv64,x9);
+ GENOFFSET(RISCV64,riscv64,x10);
+ GENOFFSET(RISCV64,riscv64,x11);
+ GENOFFSET(RISCV64,riscv64,x12);
+ GENOFFSET(RISCV64,riscv64,x13);
+ GENOFFSET(RISCV64,riscv64,x14);
+ GENOFFSET(RISCV64,riscv64,x15);
+ GENOFFSET(RISCV64,riscv64,x16);
+ GENOFFSET(RISCV64,riscv64,x17);
+ GENOFFSET(RISCV64,riscv64,x18);
+ GENOFFSET(RISCV64,riscv64,x19);
+ GENOFFSET(RISCV64,riscv64,x20);
+ GENOFFSET(RISCV64,riscv64,x21);
+ GENOFFSET(RISCV64,riscv64,x22);
+ GENOFFSET(RISCV64,riscv64,x23);
+ GENOFFSET(RISCV64,riscv64,x24);
+ GENOFFSET(RISCV64,riscv64,x25);
+ GENOFFSET(RISCV64,riscv64,x26);
+ GENOFFSET(RISCV64,riscv64,x27);
+ GENOFFSET(RISCV64,riscv64,x28);
+ GENOFFSET(RISCV64,riscv64,x29);
+ GENOFFSET(RISCV64,riscv64,x30);
+ GENOFFSET(RISCV64,riscv64,x31);
+ GENOFFSET(RISCV64,riscv64,pc);
+ GENOFFSET(RISCV64,riscv64,f0);
+ GENOFFSET(RISCV64,riscv64,f1);
+ GENOFFSET(RISCV64,riscv64,f2);
+ GENOFFSET(RISCV64,riscv64,f3);
+ GENOFFSET(RISCV64,riscv64,f4);
+ GENOFFSET(RISCV64,riscv64,f5);
+ GENOFFSET(RISCV64,riscv64,f6);
+ GENOFFSET(RISCV64,riscv64,f7);
+ GENOFFSET(RISCV64,riscv64,f8);
+ GENOFFSET(RISCV64,riscv64,f9);
+ GENOFFSET(RISCV64,riscv64,f10);
+ GENOFFSET(RISCV64,riscv64,f11);
+ GENOFFSET(RISCV64,riscv64,f12);
+ GENOFFSET(RISCV64,riscv64,f13);
+ GENOFFSET(RISCV64,riscv64,f14);
+ GENOFFSET(RISCV64,riscv64,f15);
+ GENOFFSET(RISCV64,riscv64,f16);
+ GENOFFSET(RISCV64,riscv64,f17);
+ GENOFFSET(RISCV64,riscv64,f18);
+ GENOFFSET(RISCV64,riscv64,f19);
+ GENOFFSET(RISCV64,riscv64,f20);
+ GENOFFSET(RISCV64,riscv64,f21);
+ GENOFFSET(RISCV64,riscv64,f22);
+ GENOFFSET(RISCV64,riscv64,f23);
+ GENOFFSET(RISCV64,riscv64,f24);
+ GENOFFSET(RISCV64,riscv64,f25);
+ GENOFFSET(RISCV64,riscv64,f26);
+ GENOFFSET(RISCV64,riscv64,f27);
+ GENOFFSET(RISCV64,riscv64,f28);
+ GENOFFSET(RISCV64,riscv64,f29);
+ GENOFFSET(RISCV64,riscv64,f30);
+ GENOFFSET(RISCV64,riscv64,f31);
+ GENOFFSET(RISCV64,riscv64,fcsr);
}
/*--------------------------------------------------------------------*/
#include "libvex_basictypes.h"
+#include "main_util.h"
/*---------------------------------------------------------*/
/*--- Representing HOST REGISTERS ---*/
#include "libvex_guest_s390x.h"
#include "libvex_guest_mips32.h"
#include "libvex_guest_mips64.h"
+#include "libvex_guest_riscv64.h"
#include "main_globals.h"
#include "main_util.h"
#include "host_s390_defs.h"
#include "host_mips_defs.h"
#include "host_nanomips_defs.h"
+#include "host_riscv64_defs.h"
#include "guest_generic_bb_to_IR.h"
#include "guest_x86_defs.h"
#include "guest_s390_defs.h"
#include "guest_mips_defs.h"
#include "guest_nanomips_defs.h"
+#include "guest_riscv64_defs.h"
#include "host_generic_simd128.h"
#define NANOMIPSST(f) vassert(0)
#endif
+#if defined(VGA_riscv64) || defined(VEXMULTIARCH)
+#define RISCV64FN(f) f
+#define RISCV64ST(f) f
+#else
+#define RISCV64FN(f) NULL
+#define RISCV64ST(f) vassert(0)
+#endif
+
/* This file contains the top level interface to the library. */
/* --------- fwds ... --------- */
vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_NRADDR ) == 4);
break;
+ case VexArchRISCV64:
+ preciseMemExnsFn
+ = RISCV64FN(guest_riscv64_state_requires_precise_mem_exns);
+ disInstrFn = RISCV64FN(disInstr_RISCV64);
+ specHelper = RISCV64FN(guest_riscv64_spechelper);
+ guest_layout = RISCV64FN(&riscv64guest_layout);
+ offB_CMSTART = offsetof(VexGuestRISCV64State,guest_CMSTART);
+ offB_CMLEN = offsetof(VexGuestRISCV64State,guest_CMLEN);
+ offB_GUEST_IP = offsetof(VexGuestRISCV64State,guest_pc);
+ szB_GUEST_IP = sizeof( ((VexGuestRISCV64State*)0)->guest_pc );
+ vassert(vta->archinfo_guest.endness == VexEndnessLE);
+ vassert(0 == sizeof(VexGuestRISCV64State) % LibVEX_GUEST_STATE_ALIGN);
+ vassert(sizeof( ((VexGuestRISCV64State*)0)->guest_CMSTART ) == 8);
+ vassert(sizeof( ((VexGuestRISCV64State*)0)->guest_CMLEN ) == 8);
+ vassert(sizeof( ((VexGuestRISCV64State*)0)->guest_NRADDR ) == 8);
+ break;
+
default:
vpanic("LibVEX_Translate: unsupported guest insn set");
}
offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR);
break;
+ case VexArchRISCV64:
+ preciseMemExnsFn
+ = RISCV64FN(guest_riscv64_state_requires_precise_mem_exns);
+ guest_sizeB = sizeof(VexGuestRISCV64State);
+ offB_HOST_EvC_COUNTER = offsetof(VexGuestRISCV64State,host_EvC_COUNTER);
+ offB_HOST_EvC_FAILADDR = offsetof(VexGuestRISCV64State,host_EvC_FAILADDR);
+ break;
+
default:
vpanic("LibVEX_Codegen: unsupported guest insn set");
}
|| vta->archinfo_host.endness == VexEndnessBE);
break;
+ case VexArchRISCV64:
+ mode64 = True;
+ rRegUniv = RISCV64FN(getRRegUniverse_RISCV64());
+ getRegUsage
+ = CAST_TO_TYPEOF(getRegUsage) RISCV64FN(getRegUsage_RISCV64Instr);
+ mapRegs = CAST_TO_TYPEOF(mapRegs) RISCV64FN(mapRegs_RISCV64Instr);
+ genSpill = CAST_TO_TYPEOF(genSpill) RISCV64FN(genSpill_RISCV64);
+ genReload = CAST_TO_TYPEOF(genReload) RISCV64FN(genReload_RISCV64);
+ genMove = CAST_TO_TYPEOF(genMove) RISCV64FN(genMove_RISCV64);
+ ppInstr = CAST_TO_TYPEOF(ppInstr) RISCV64FN(ppRISCV64Instr);
+ ppReg = CAST_TO_TYPEOF(ppReg) RISCV64FN(ppHRegRISCV64);
+ iselSB = RISCV64FN(iselSB_RISCV64);
+ emit = CAST_TO_TYPEOF(emit) RISCV64FN(emit_RISCV64Instr);
+ vassert(vta->archinfo_host.endness == VexEndnessLE);
+ break;
+
default:
vpanic("LibVEX_Translate: unsupported host insn set");
}
place_to_chain,
disp_cp_chain_me_EXPECTED,
place_to_jump_to));
+ case VexArchRISCV64:
+ RISCV64ST(return chainXDirect_RISCV64(endness_host,
+ place_to_chain,
+ disp_cp_chain_me_EXPECTED,
+ place_to_jump_to));
default:
vassert(0);
}
place_to_unchain,
place_to_jump_to_EXPECTED,
disp_cp_chain_me));
+ case VexArchRISCV64:
+ RISCV64ST(return unchainXDirect_RISCV64(endness_host,
+ place_to_unchain,
+ place_to_jump_to_EXPECTED,
+ disp_cp_chain_me));
default:
vassert(0);
}
MIPS32ST(cached = evCheckSzB_MIPS()); break;
case VexArchMIPS64:
MIPS64ST(cached = evCheckSzB_MIPS()); break;
- case VexArchNANOMIPS:
+ case VexArchNANOMIPS:
NANOMIPSST(cached = evCheckSzB_NANOMIPS()); break;
+ case VexArchRISCV64:
+ RISCV64ST(cached = evCheckSzB_RISCV64()); break;
default:
vassert(0);
}
case VexArchNANOMIPS:
NANOMIPSST(return patchProfInc_NANOMIPS(endness_host, place_to_patch,
location_of_counter));
+ case VexArchRISCV64:
+ RISCV64ST(return patchProfInc_RISCV64(endness_host, place_to_patch,
+ location_of_counter));
default:
vassert(0);
}
case VexArchMIPS32: return "MIPS32";
case VexArchMIPS64: return "MIPS64";
case VexArchNANOMIPS: return "NANOMIPS";
+ case VexArchRISCV64: return "RISCV64";
default: return "VexArch???";
}
}
case VexArchMIPS64:
case VexArchPPC64:
case VexArchS390X:
+ case VexArchRISCV64:
return Ity_I64;
default:
return "Unsupported baseline";
}
+static const HChar* show_hwcaps_riscv64 ( UInt hwcaps )
+{
+ return "riscv64";
+}
+
#undef NUM_HWCAPS
/* Thie function must not return NULL. */
static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
{
switch (arch) {
- case VexArchX86: return show_hwcaps_x86(hwcaps);
- case VexArchAMD64: return show_hwcaps_amd64(hwcaps);
- case VexArchPPC32: return show_hwcaps_ppc32(hwcaps);
- case VexArchPPC64: return show_hwcaps_ppc64(hwcaps);
- case VexArchARM: return show_hwcaps_arm(hwcaps);
- case VexArchARM64: return show_hwcaps_arm64(hwcaps);
- case VexArchS390X: return show_hwcaps_s390x(hwcaps);
- case VexArchMIPS32: return show_hwcaps_mips32(hwcaps);
- case VexArchMIPS64: return show_hwcaps_mips64(hwcaps);
+ case VexArchX86: return show_hwcaps_x86(hwcaps);
+ case VexArchAMD64: return show_hwcaps_amd64(hwcaps);
+ case VexArchPPC32: return show_hwcaps_ppc32(hwcaps);
+ case VexArchPPC64: return show_hwcaps_ppc64(hwcaps);
+ case VexArchARM: return show_hwcaps_arm(hwcaps);
+ case VexArchARM64: return show_hwcaps_arm64(hwcaps);
+ case VexArchS390X: return show_hwcaps_s390x(hwcaps);
+ case VexArchMIPS32: return show_hwcaps_mips32(hwcaps);
+ case VexArchMIPS64: return show_hwcaps_mips64(hwcaps);
+ case VexArchRISCV64: return show_hwcaps_riscv64(hwcaps);
default: return NULL;
}
}
return;
invalid_hwcaps(arch, hwcaps, "Unsupported baseline\n");
+ case VexArchRISCV64:
+ if (hwcaps == 0)
+ return;
+ invalid_hwcaps(arch, hwcaps, "Cannot handle capabilities\n");
+
default:
vpanic("unknown architecture");
}
extern void vex_bzero ( void* s, SizeT n );
+/* Math ops */
+
+/* Sign extend an N-bit value up to 64 bits, by copying bit N-1 into all higher
+ positions. */
+static inline ULong vex_sx_to_64( ULong x, UInt n )
+{
+ vassert(n >= 1 && n < 64);
+ return (ULong)((Long)(x << (64 - n)) >> (64 - n));
+}
+
+
/* Storage management: clear the area, and allocate from it. */
/* By default allocation occurs in the temporary area. However, it is
VexArchMIPS32,
VexArchMIPS64,
VexArchNANOMIPS,
+ VexArchRISCV64,
}
VexArch;
~~~~~
r21 is GSP.
+ riscv64
+ ~~~~~~~
+ On entry, x8/s0 should point to the guest state + 2048. RISC-V has
+ load/store instructions with immediate (offset from the base
+ register) in range -2048 to 2047. The adjustment of 2048 allows
+ LibVEX to effectively use the full range. When translating
+ riscv64->riscv64, only a single instruction is then needed to
+ read/write values in the guest state (primary + 2x shadow state
+ areas) and most of the spill area.
+
ALL GUEST ARCHITECTURES
~~~~~~~~~~~~~~~~~~~~~~~
The guest state must contain two pseudo-registers, guest_CMSTART
#undef VEX_HOST_WORDSIZE
#undef VEX_REGPARM
-/* The following 4 work OK for Linux. */
#if defined(__x86_64__)
# define VEX_HOST_WORDSIZE 8
# define VEX_REGPARM(_n) /* */
# define VEX_HOST_WORDSIZE 4
# define VEX_REGPARM(_n) /* */
+#elif defined(__riscv) && (__riscv_xlen == 64)
+# define VEX_HOST_WORDSIZE 8
+# define VEX_REGPARM(_n) /* */
+
#else
# error "Vex: Fatal: Can't establish the host architecture"
#endif
Irrm_PREPARE_SHORTER = 5, // Round to prepare for shorter
// precision
Irrm_AWAY_FROM_ZERO = 6, // Round to away from 0
- Irrm_NEAREST_TIE_TOWARD_0 = 7 // Round to nearest, ties towards 0
+ Irrm_NEAREST_TIE_TOWARD_0 = 7, // Round to nearest, ties towards 0
+ Irrm_INVALID = 8 // Invalid mode
}
IRRoundingMode;
VexTranslateResult tres;
VexControl vcon;
VexGuestExtents vge;
- VexArchInfo vai_x86, vai_amd64, vai_ppc32, vai_arm, vai_mips32, vai_mips64;
+ VexArchInfo vai_x86, vai_amd64, vai_ppc32, vai_arm, vai_mips32, vai_mips64,
+ vai_riscv64;
VexAbiInfo vbi;
VexTranslateArgs vta;
LibVEX_default_VexArchInfo(&vai_mips64);
vai_mips64.endness = VexEndnessLE;
+ LibVEX_default_VexArchInfo(&vai_riscv64);
+ vai_riscv64.hwcaps = 0;
+ vai_riscv64.endness = VexEndnessLE;
+
LibVEX_default_VexAbiInfo(&vbi);
vbi.guest_stack_redzone_size = 128;
vta.guest_bytes = &origbuf[18 +1];
vta.guest_bytes_addr = (Addr) &origbuf[18 +1];
#endif
+#if 0 /* riscv64 -> riscv64 */
+ vta.arch_guest = VexArchRISCV64;
+ vta.archinfo_guest = vai_riscv64;
+ vta.arch_host = VexArchRISCV64;
+ vta.archinfo_host = vai_riscv64;
+#endif
#if 1 /* no instrumentation */
vta.instrument1 = NULL;