]> git.ipfire.org Git - thirdparty/valgrind.git/commitdiff
Improve infrastructure for dealing with endianness in VEX. This patch
authorJulian Seward <jseward@acm.org>
Thu, 24 Jul 2014 12:42:03 +0000 (12:42 +0000)
committerJulian Seward <jseward@acm.org>
Thu, 24 Jul 2014 12:42:03 +0000 (12:42 +0000)
removes all decisions about endianness from VEX.  Instead, it requires
that the LibVEX_* calls pass in information about the guest or host
endianness (depending on context) and in turn it passes that info
through to all the places that need it:

* the front ends (xx_toIR.c)
* the back ends (xx_isel.c)
* the patcher functions (Chain, UnChain, PatchProfInc)

Mostly it is boring and ugly plumbing.  As far as types go, there is a
new type "VexEndness" that carries the endianness.  This also makes it
possible to stop using Bools to indicate endianness.  VexArchInfo has
a new field of type VexEndness.  Apart from that, no other changes in
types.

Followups: MIPS front and back ends have not yet been fixed up to use
the passed-in endianness information.  Currently they assume that the
endianness of both host and guest is the same as the endianness of the
target for which VEX is being compiled.

git-svn-id: svn://svn.valgrind.org/vex/trunk@2910

39 files changed:
VEX/priv/guest_amd64_defs.h
VEX/priv/guest_amd64_toIR.c
VEX/priv/guest_arm64_defs.h
VEX/priv/guest_arm64_toIR.c
VEX/priv/guest_arm_defs.h
VEX/priv/guest_arm_toIR.c
VEX/priv/guest_generic_bb_to_IR.c
VEX/priv/guest_generic_bb_to_IR.h
VEX/priv/guest_mips_defs.h
VEX/priv/guest_mips_toIR.c
VEX/priv/guest_ppc_defs.h
VEX/priv/guest_ppc_toIR.c
VEX/priv/guest_s390_defs.h
VEX/priv/guest_s390_toIR.c
VEX/priv/guest_x86_defs.h
VEX/priv/guest_x86_toIR.c
VEX/priv/host_amd64_defs.c
VEX/priv/host_amd64_defs.h
VEX/priv/host_amd64_isel.c
VEX/priv/host_arm64_defs.c
VEX/priv/host_arm64_defs.h
VEX/priv/host_arm64_isel.c
VEX/priv/host_arm_defs.c
VEX/priv/host_arm_defs.h
VEX/priv/host_arm_isel.c
VEX/priv/host_mips_defs.c
VEX/priv/host_mips_defs.h
VEX/priv/host_mips_isel.c
VEX/priv/host_ppc_defs.c
VEX/priv/host_ppc_defs.h
VEX/priv/host_ppc_isel.c
VEX/priv/host_s390_defs.c
VEX/priv/host_s390_defs.h
VEX/priv/host_s390_isel.c
VEX/priv/host_x86_defs.c
VEX/priv/host_x86_defs.h
VEX/priv/host_x86_isel.c
VEX/priv/main_main.c
VEX/pub/libvex.h

index 42a6a372ecb5748472ada2a2fef42a0cd622c3f2..008638e7ef286a413b41cc153911476717aaa453 100644 (file)
@@ -60,7 +60,7 @@ DisResult disInstr_AMD64 ( IRSB*        irbb,
                            VexArch      guest_arch,
                            VexArchInfo* archinfo,
                            VexAbiInfo*  abiinfo,
-                           Bool         host_bigendian,
+                           VexEndness   host_endness,
                            Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
index 563e9578cca4c9131f534f7efab2db6fd432898f..ba23ee246678cafed9cfc50f97832a526ad3e9a6 100644 (file)
    that we don't have to pass them around endlessly. */
 
 /* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
 
 /* Pointer to the guest code area (points to start of BB, not to the
    insn being processed). */
@@ -975,7 +975,7 @@ Int offsetIReg ( Int sz, UInt reg, Bool irregular )
 
 static IRExpr* getIRegCL ( void )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    return IRExpr_Get( OFFB_RCX, Ity_I8 );
 }
 
@@ -984,7 +984,7 @@ static IRExpr* getIRegCL ( void )
 
 static void putIRegAH ( IRExpr* e )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(typeOfIRExpr(irsb->tyenv, e) == Ity_I8);
    stmt( IRStmt_Put( OFFB_RAX+1, e ) );
 }
@@ -1006,7 +1006,7 @@ static const HChar* nameIRegRAX ( Int sz )
 
 static IRExpr* getIRegRAX ( Int sz )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    switch (sz) {
       case 1: return IRExpr_Get( OFFB_RAX, Ity_I8 );
       case 2: return IRExpr_Get( OFFB_RAX, Ity_I16 );
@@ -1019,7 +1019,7 @@ static IRExpr* getIRegRAX ( Int sz )
 static void putIRegRAX ( Int sz, IRExpr* e )
 {
    IRType ty = typeOfIRExpr(irsb->tyenv, e);
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    switch (sz) {
       case 8: vassert(ty == Ity_I64);
               stmt( IRStmt_Put( OFFB_RAX, e ));
@@ -1054,7 +1054,7 @@ static const HChar* nameIRegRDX ( Int sz )
 
 static IRExpr* getIRegRDX ( Int sz )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    switch (sz) {
       case 1: return IRExpr_Get( OFFB_RDX, Ity_I8 );
       case 2: return IRExpr_Get( OFFB_RDX, Ity_I16 );
@@ -1066,7 +1066,7 @@ static IRExpr* getIRegRDX ( Int sz )
 
 static void putIRegRDX ( Int sz, IRExpr* e )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(typeOfIRExpr(irsb->tyenv, e) == szToITy(sz));
    switch (sz) {
       case 8: stmt( IRStmt_Put( OFFB_RDX, e ));
@@ -1108,7 +1108,7 @@ static const HChar* nameIReg64 ( UInt regno )
 
 static IRExpr* getIReg32 ( UInt regno )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    return unop(Iop_64to32,
                IRExpr_Get( integerGuestReg64Offset(regno),
                            Ity_I64 ));
@@ -1132,7 +1132,7 @@ static const HChar* nameIReg32 ( UInt regno )
 
 static IRExpr* getIReg16 ( UInt regno )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    return IRExpr_Get( integerGuestReg64Offset(regno),
                       Ity_I16 );
 }
@@ -1253,7 +1253,7 @@ static UInt eregOfRexRM ( Prefix pfx, UChar mod_reg_rm )
 static UInt offsetIRegG ( Int sz, Prefix pfx, UChar mod_reg_rm )
 {
    UInt reg;
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(IS_VALID_PFX(pfx));
    vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
    reg = gregOfRexRM( pfx, mod_reg_rm );
@@ -1332,7 +1332,7 @@ const HChar* nameIRegV ( Int sz, Prefix pfx )
 static UInt offsetIRegE ( Int sz, Prefix pfx, UChar mod_reg_rm )
 {
    UInt reg;
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(IS_VALID_PFX(pfx));
    vassert(sz == 8 || sz == 4 || sz == 2 || sz == 1);
    reg = eregOfRexRM( pfx, mod_reg_rm );
@@ -1401,7 +1401,7 @@ static Int ymmGuestRegOffset ( UInt ymmreg )
 static Int xmmGuestRegOffset ( UInt xmmreg )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    return ymmGuestRegOffset( xmmreg );
 }
 
@@ -1411,7 +1411,7 @@ static Int xmmGuestRegOffset ( UInt xmmreg )
 static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 8);
    return xmmGuestRegOffset( xmmreg ) + 2 * laneno;
 }
@@ -1419,7 +1419,7 @@ static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
 static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 4);
    return xmmGuestRegOffset( xmmreg ) + 4 * laneno;
 }
@@ -1427,7 +1427,7 @@ static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
 static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 2);
    return xmmGuestRegOffset( xmmreg ) + 8 * laneno;
 }
@@ -1435,7 +1435,7 @@ static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
 static Int ymmGuestRegLane128offset ( UInt ymmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 2);
    return ymmGuestRegOffset( ymmreg ) + 16 * laneno;
 }
@@ -1443,7 +1443,7 @@ static Int ymmGuestRegLane128offset ( UInt ymmreg, Int laneno )
 static Int ymmGuestRegLane64offset ( UInt ymmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 4);
    return ymmGuestRegOffset( ymmreg ) + 8 * laneno;
 }
@@ -1451,7 +1451,7 @@ static Int ymmGuestRegLane64offset ( UInt ymmreg, Int laneno )
 static Int ymmGuestRegLane32offset ( UInt ymmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 8);
    return ymmGuestRegOffset( ymmreg ) + 4 * laneno;
 }
@@ -31745,7 +31745,7 @@ DisResult disInstr_AMD64 ( IRSB*        irsb_IN,
                            VexArch      guest_arch,
                            VexArchInfo* archinfo,
                            VexAbiInfo*  abiinfo,
-                           Bool         host_bigendian_IN,
+                           VexEndness   host_endness_IN,
                            Bool         sigill_diag_IN )
 {
    Int       i, x1, x2;
@@ -31756,7 +31756,7 @@ DisResult disInstr_AMD64 ( IRSB*        irsb_IN,
    vassert(guest_arch == VexArchAMD64);
    guest_code           = guest_code_IN;
    irsb                 = irsb_IN;
-   host_is_bigendian    = host_bigendian_IN;
+   host_endness         = host_endness_IN;
    guest_RIP_curr_instr = guest_IP;
    guest_RIP_bbstart    = guest_IP - delta;
 
index b8eb1ff9492f3a9c3a87a87f68467ef50f34a96f..7d772c831dbce2b77e1961d45739454e1c99620c 100644 (file)
@@ -50,7 +50,7 @@ DisResult disInstr_ARM64 ( IRSB*        irbb,
                            VexArch      guest_arch,
                            VexArchInfo* archinfo,
                            VexAbiInfo*  abiinfo,
-                           Bool         host_bigendian,
+                           VexEndness   host_endness,
                            Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
index f3b8d546f6e4894ecea9233520c5ef94d9ee56a7..4132780ff1f8dd15f0e38431f20431fc81b2eb66 100644 (file)
    not change during translation of the instruction.
 */
 
-/* CONST: is the host bigendian?  We need to know this in order to do
-   sub-register accesses to the SIMD/FP registers correctly. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness?  We need to know this in
+   order to do sub-register accesses to the SIMD/FP registers
+   correctly. */
+static VexEndness host_endness;
 
 /* CONST: The guest address for the instruction currently being
    translated.  */
@@ -1227,7 +1228,7 @@ static IRType preferredVectorSubTypeFromSize ( UInt szB )
    has the lowest offset. */
 static Int offsetQRegLane ( UInt qregNo, IRType laneTy, UInt laneNo )
 {
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    Int base = offsetQReg128(qregNo);
    /* Since the host is little-endian, the least significant lane
       will be at the lowest address. */
@@ -10355,7 +10356,7 @@ DisResult disInstr_ARM64 ( IRSB*        irsb_IN,
                            VexArch      guest_arch,
                            VexArchInfo* archinfo,
                            VexAbiInfo*  abiinfo,
-                           Bool         host_bigendian_IN,
+                           VexEndness   host_endness_IN,
                            Bool         sigill_diag_IN )
 {
    DisResult dres;
@@ -10365,7 +10366,7 @@ DisResult disInstr_ARM64 ( IRSB*        irsb_IN,
    vassert(guest_arch == VexArchARM64);
 
    irsb                = irsb_IN;
-   host_is_bigendian   = host_bigendian_IN;
+   host_endness        = host_endness_IN;
    guest_PC_curr_instr = (Addr64)guest_IP;
 
    /* Sanity checks */
index 776abb7aadd3f550f409f4337627cc92f31e8405..ce1801707ca86d00f5de88124eb2845214eb8221 100644 (file)
@@ -52,7 +52,7 @@ DisResult disInstr_ARM ( IRSB*        irbb,
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian,
+                         VexEndness   host_endness,
                          Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
index 7a530598ce51e19bc8f33be9d8239f662425c6e4..a7ab76b900634d04b8cb8dbfc6fc895df3d280e5 100644 (file)
    not change during translation of the instruction.
 */
 
-/* CONST: is the host bigendian?  This has to do with float vs double
-   register accesses on VFP, but it's complex and not properly thought
-   out. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness?  This has to do with float vs
+   double register accesses on VFP, but it's complex and not properly
+   thought out. */
+static VexEndness host_endness;
 
 /* CONST: The guest address for the instruction currently being
    translated.  This is the real, "decoded" address (not subject
@@ -849,11 +849,11 @@ static Int floatGuestRegOffset ( UInt fregNo )
    Int off;
    vassert(fregNo < 32);
    off = doubleGuestRegOffset(fregNo >> 1);
-   if (host_is_bigendian) {
-      vassert(0);
-   } else {
+   if (host_endness == VexEndnessLE) {
       if (fregNo & 1)
          off += 4;
+   } else {
+      vassert(0);
    }
    return off;
 }
@@ -21976,7 +21976,7 @@ DisResult disInstr_ARM ( IRSB*        irsb_IN,
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian_IN,
+                         VexEndness   host_endness_IN,
                          Bool         sigill_diag_IN )
 {
    DisResult dres;
@@ -21985,9 +21985,9 @@ DisResult disInstr_ARM ( IRSB*        irsb_IN,
    /* Set globals (see top of this file) */
    vassert(guest_arch == VexArchARM);
 
-   irsb              = irsb_IN;
-   host_is_bigendian = host_bigendian_IN;
-   __curr_is_Thumb   = isThumb;
+   irsb            = irsb_IN;
+   host_endness    = host_endness_IN;
+   __curr_is_Thumb = isThumb;
 
    if (isThumb) {
       guest_R15_curr_instr_notENC = (Addr32)guest_IP_ENCODED - 1;
index 8bba8de3957f2a6dc1c111e91af1789e4ffa3725..63e6a2afbc2b1a0dad2a6b03c8834492d63d47b5 100644 (file)
@@ -186,7 +186,7 @@ IRSB* bb_to_IR (
          /*IN*/ UChar*           guest_code,
          /*IN*/ Addr64           guest_IP_bbstart,
          /*IN*/ Bool             (*chase_into_ok)(void*,Addr64),
-         /*IN*/ Bool             host_bigendian,
+         /*IN*/ VexEndness       host_endness,
          /*IN*/ Bool             sigill_diag,
          /*IN*/ VexArch          arch_guest,
          /*IN*/ VexArchInfo*     archinfo_guest,
@@ -362,7 +362,7 @@ IRSB* bb_to_IR (
                             arch_guest,
                             archinfo_guest,
                             abiinfo_both,
-                            host_bigendian,
+                            host_endness,
                             sigill_diag );
 
       /* stay sane ... */
index 30e216dcd8e4865cd7b071ac35cbe7ee43cd8a0e..5a7f016f9b13c351e9d3cbbf7cb709f73098e8d7 100644 (file)
@@ -152,8 +152,8 @@ typedef
       /* ABI info for both guest and host */
       /*IN*/  VexAbiInfo*  abiinfo,
 
-      /* Is the host bigendian? */
-      /*IN*/  Bool         host_bigendian,
+      /* The endianness of the host */
+      /*IN*/  VexEndness   host_endness,
 
       /* Should diagnostics be printed for illegal instructions? */
       /*IN*/  Bool         sigill_diag
@@ -176,7 +176,7 @@ IRSB* bb_to_IR (
          /*IN*/ UChar*           guest_code,
          /*IN*/ Addr64           guest_IP_bbstart,
          /*IN*/ Bool             (*chase_into_ok)(void*,Addr64),
-         /*IN*/ Bool             host_bigendian,
+         /*IN*/ VexEndness       host_endness,
          /*IN*/ Bool             sigill_diag,
          /*IN*/ VexArch          arch_guest,
          /*IN*/ VexArchInfo*     archinfo_guest,
index da112c5b1b4df88563fd64e923785554785abe57..1092aca73acc7612491b424f5c6009479e3779b2 100644 (file)
@@ -51,7 +51,7 @@ extern DisResult disInstr_MIPS ( IRSB*        irbb,
                                  VexArch      guest_arch,
                                  VexArchInfo* archinfo,
                                  VexAbiInfo*  abiinfo,
-                                 Bool         host_bigendian,
+                                 VexEndness   host_endness,
                                  Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
index 0e9f1603df90e5540faeb953e6a53d4a13830783..57be41eb9bbb5a827212abb3905a43dd53bc3ac0 100644 (file)
    that we don't have to pass them around endlessly. CONST means does
    not change during translation of the instruction. */
 
-/* CONST: is the host bigendian?  This has to do with float vs double
-   register accesses on VFP, but it's complex and not properly thought
-   out. */
-static Bool host_is_bigendian;
+/* CONST: what is the host's endianness?  This has to do with float vs
+   double register accesses on VFP, but it's complex and not properly
+   thought out. */
+static VexEndness host_endness;
 
 /* Pointer to the guest code area. */
 static UChar *guest_code;
@@ -17202,7 +17202,7 @@ DisResult disInstr_MIPS( IRSB*        irsb_IN,
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian_IN,
+                         VexEndness   host_endness_IN,
                          Bool         sigill_diag_IN )
 {
    DisResult dres;
@@ -17217,7 +17217,7 @@ DisResult disInstr_MIPS( IRSB*        irsb_IN,
 
    guest_code = guest_code_IN;
    irsb = irsb_IN;
-   host_is_bigendian = host_bigendian_IN;
+   host_endness = host_endness_IN;
 #if defined(VGP_mips32_linux)
    guest_PC_curr_instr = (Addr32)guest_IP;
 #elif defined(VGP_mips64_linux)
index a27c96ff55207aa32a214fd5c2e0fb69b0a600aa..944989d69b1038f50b450141858ff0092e2d1fbd 100644 (file)
@@ -61,7 +61,7 @@ DisResult disInstr_PPC ( IRSB*        irbb,
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian,
+                         VexEndness   host_endness,
                          Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
index 0b8cbffb4f26876ad89ce3ed86e76bddef6b639c..2b4d4bd3c08b7a21e9c87d1449228ecd3a75c8a4 100644 (file)
    given insn. */
 
 /* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
 
 /* Pointer to the guest code area. */
 static UChar* guest_code;
@@ -1039,7 +1039,7 @@ static Int integerGuestRegOffset ( UInt archreg )
    // jrs: probably not necessary; only matters if we reference sub-parts
    // of the ppc registers, but that isn't the case
    // later: this might affect Altivec though?
-   vassert(host_is_bigendian);
+   vassert(host_endness == VexEndnessBE);
 
    switch (archreg) {
    case  0: return offsetofPPCGuestState(guest_GPR0);
@@ -19941,7 +19941,7 @@ DisResult disInstr_PPC ( IRSB*        irsb_IN,
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian_IN,
+                         VexEndness   host_endness_IN,
                          Bool         sigill_diag_IN )
 {
    IRType     ty;
@@ -19973,7 +19973,7 @@ DisResult disInstr_PPC ( IRSB*        irsb_IN,
    /* Set globals (see top of this file) */
    guest_code           = guest_code_IN;
    irsb                 = irsb_IN;
-   host_is_bigendian    = host_bigendian_IN;
+   host_endness         = host_endness_IN;
 
    guest_CIA_curr_instr = mkSzAddr(ty, guest_IP);
    guest_CIA_bbstart    = mkSzAddr(ty, guest_IP - delta);
index 63dd1af3d9147bec5cbacd2cb1f79aa75e574d6f..6aca7f530ffb5a43626ffc1e1cb62797c277f4ba 100644 (file)
@@ -50,7 +50,7 @@ DisResult disInstr_S390 ( IRSB*        irbb,
                           VexArch      guest_arch,
                           VexArchInfo* archinfo,
                           VexAbiInfo*  abiinfo,
-                          Bool         host_bigendian,
+                          VexEndness   host_endness,
                           Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
index b6fc165284a0bbe5f962d5e23c67989f23b1fc8e..4a5013b34137aa122ca8b32818ab2c760d40f75c 100644 (file)
@@ -16565,13 +16565,13 @@ disInstr_S390(IRSB        *irsb_IN,
               VexArch      guest_arch,
               VexArchInfo *archinfo,
               VexAbiInfo  *abiinfo,
-              Bool         host_bigendian,
+              VexEndness   host_endness,
               Bool         sigill_diag_IN)
 {
    vassert(guest_arch == VexArchS390X);
 
    /* The instruction decoder requires a big-endian machine. */
-   vassert(host_bigendian == True);
+   vassert(host_endness == VexEndnessBE);
 
    /* Set globals (see top of this file) */
    guest_IA_curr_instr = guest_IP;
index 1c64912c9efaa05d400c276912c2e460df794cb2..e7bc53094fb658db209d046536067bf98dbe65ab 100644 (file)
@@ -60,7 +60,7 @@ DisResult disInstr_X86 ( IRSB*        irbb,
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian,
+                         VexEndness   host_endness,
                          Bool         sigill_diag );
 
 /* Used by the optimiser to specialise calls to helpers. */
index 37afd9736e97f714ca427bd35ef7ce3b6dd640c8..2ba439416042a8e771f96b6058dd13da6b95abf5 100644 (file)
    given insn. */
 
 /* We need to know this to do sub-register accesses correctly. */
-static Bool host_is_bigendian;
+static VexEndness host_endness;
 
 /* Pointer to the guest code area (points to start of BB, not to the
    insn being processed). */
@@ -452,7 +452,7 @@ static Int integerGuestRegOffset ( Int sz, UInt archreg )
    vassert(archreg < 8);
 
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
 
    if (sz == 4 || sz == 2 || (sz == 1 && archreg < 4)) {
       switch (archreg) {
@@ -515,7 +515,7 @@ static Int xmmGuestRegOffset ( UInt xmmreg )
 static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 8);
    return xmmGuestRegOffset( xmmreg ) + 2 * laneno;
 }
@@ -523,7 +523,7 @@ static Int xmmGuestRegLane16offset ( UInt xmmreg, Int laneno )
 static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 4);
    return xmmGuestRegOffset( xmmreg ) + 4 * laneno;
 }
@@ -531,7 +531,7 @@ static Int xmmGuestRegLane32offset ( UInt xmmreg, Int laneno )
 static Int xmmGuestRegLane64offset ( UInt xmmreg, Int laneno )
 {
    /* Correct for little-endian host only. */
-   vassert(!host_is_bigendian);
+   vassert(host_endness == VexEndnessLE);
    vassert(laneno >= 0 && laneno < 2);
    return xmmGuestRegOffset( xmmreg ) + 8 * laneno;
 }
@@ -15421,7 +15421,7 @@ DisResult disInstr_X86 ( IRSB*        irsb_IN,
                          VexArch      guest_arch,
                          VexArchInfo* archinfo,
                          VexAbiInfo*  abiinfo,
-                         Bool         host_bigendian_IN,
+                         VexEndness   host_endness_IN,
                          Bool         sigill_diag_IN )
 {
    Int       i, x1, x2;
@@ -15432,7 +15432,7 @@ DisResult disInstr_X86 ( IRSB*        irsb_IN,
    vassert(guest_arch == VexArchX86);
    guest_code           = guest_code_IN;
    irsb                 = irsb_IN;
-   host_is_bigendian    = host_bigendian_IN;
+   host_endness         = host_endness_IN;
    guest_EIP_curr_instr = (Addr32)guest_IP;
    guest_EIP_bbstart    = (Addr32)toUInt(guest_IP - delta);
 
index cd5893dd2f77d578f943ec0f1ec0e6f8be0cddde..9a4aa3e76965d5c2d4d143e6464b0e24f63e388d 100644 (file)
@@ -2265,7 +2265,7 @@ static UChar* do_ffree_st ( UChar* p, Int n )
 
 Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
                       UChar* buf, Int nbuf, AMD64Instr* i, 
-                      Bool mode64,
+                      Bool mode64, VexEndness endness_host,
                       void* disp_cp_chain_me_to_slowEP,
                       void* disp_cp_chain_me_to_fastEP,
                       void* disp_cp_xindir,
@@ -3499,7 +3499,7 @@ Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
       p = doAMode_M(p, fake(4), i->Ain.EvCheck.amFailAddr);
       vassert(p - p0 == 8); /* also ensures that 0x03 offset above is ok */
       /* And crosscheck .. */
-      vassert(evCheckSzB_AMD64() == 8);
+      vassert(evCheckSzB_AMD64(endness_host) == 8);
       goto done;
    }
 
@@ -3542,7 +3542,7 @@ Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
 /* How big is an event check?  See case for Ain_EvCheck in
    emit_AMD64Instr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_AMD64 ( void )
+Int evCheckSzB_AMD64 ( VexEndness endness_host )
 {
    return 8;
 }
@@ -3550,10 +3550,13 @@ Int evCheckSzB_AMD64 ( void )
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
+VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
+                                   void* place_to_chain,
                                    void* disp_cp_chain_me_EXPECTED,
                                    void* place_to_jump_to )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         movabsq $disp_cp_chain_me_EXPECTED, %r11
         call *%r11
@@ -3636,10 +3639,13 @@ VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
+                                     void* place_to_unchain,
                                      void* place_to_jump_to_EXPECTED,
                                      void* disp_cp_chain_me )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is either:
         (general case)
           movabsq $place_to_jump_to_EXPECTED, %r11
@@ -3700,9 +3706,11 @@ VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
 
 /* Patch the counter address into a profile inc point, as previously
    created by the Ain_ProfInc case for emit_AMD64Instr. */
-VexInvalRange patchProfInc_AMD64 ( void*  place_to_patch,
+VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
+                                   void*  place_to_patch,
                                    ULong* location_of_counter )
 {
+   vassert(endness_host == VexEndnessLE);
    vassert(sizeof(ULong*) == 8);
    UChar* p = (UChar*)place_to_patch;
    vassert(p[0] == 0x49);
index d774332287fc01986e92b4c7467fd5f93b292cf3..c8b49c96f0e3ad42f4e4067dba2e65cde3ef9870 100644 (file)
@@ -754,8 +754,10 @@ extern void         getRegUsage_AMD64Instr ( HRegUsage*, AMD64Instr*, Bool );
 extern void         mapRegs_AMD64Instr     ( HRegRemap*, AMD64Instr*, Bool );
 extern Bool         isMove_AMD64Instr      ( AMD64Instr*, HReg*, HReg* );
 extern Int          emit_AMD64Instr        ( /*MB_MOD*/Bool* is_profInc,
-                                             UChar* buf, Int nbuf, AMD64Instr* i, 
+                                             UChar* buf, Int nbuf,
+                                             AMD64Instr* i, 
                                              Bool mode64,
+                                             VexEndness endness_host,
                                              void* disp_cp_chain_me_to_slowEP,
                                              void* disp_cp_chain_me_to_fastEP,
                                              void* disp_cp_xindir,
@@ -782,19 +784,22 @@ extern HInstrArray* iselSB_AMD64           ( IRSB*,
    and so assumes that they are both <= 128, and so can use the short
    offset encoding.  This is all checked with assertions, so in the
    worst case we will merely assert at startup. */
-extern Int evCheckSzB_AMD64 ( void );
+extern Int evCheckSzB_AMD64 ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
+                                          void* place_to_chain,
                                           void* disp_cp_chain_me_EXPECTED,
                                           void* place_to_jump_to );
 
-extern VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
+                                            void* place_to_unchain,
                                             void* place_to_jump_to_EXPECTED,
                                             void* disp_cp_chain_me );
 
 /* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_AMD64 ( void*  place_to_patch,
+extern VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
+                                          void*  place_to_patch,
                                           ULong* location_of_counter );
 
 
index 39d7941577914b6be10252e3194688fe09c477d5..8dc67b341eddad1d92ac605edfa971faddc1d81f 100644 (file)
@@ -4877,6 +4877,9 @@ HInstrArray* iselSB_AMD64 ( IRSB* bb,
                      | VEX_HWCAPS_AMD64_BMI
                      | VEX_HWCAPS_AMD64_AVX2)));
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
    /* Make up an initial environment to use. */
    env = LibVEX_Alloc(sizeof(ISelEnv));
    env->vreg_ctr = 0;
index e55430fc8d8957da18b9c18205e5268c6d25d2ab..ef451a3409eeaf2cb38ce92f29719601ced48fe6 100644 (file)
@@ -4104,7 +4104,7 @@ static UInt* do_load_or_store64 ( UInt* p,
 
 Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
                       UChar* buf, Int nbuf, ARM64Instr* i,
-                      Bool mode64,
+                      Bool mode64, VexEndness endness_host,
                       void* disp_cp_chain_me_to_slowEP,
                       void* disp_cp_chain_me_to_fastEP,
                       void* disp_cp_xindir,
@@ -6866,7 +6866,7 @@ Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
          /* nofail: */
 
          /* Crosscheck */
-         vassert(evCheckSzB_ARM64() == (UChar*)p - (UChar*)p0);
+         vassert(evCheckSzB_ARM64(endness_host) == (UChar*)p - (UChar*)p0);
          goto done;
       }
 
@@ -6917,7 +6917,7 @@ Int emit_ARM64Instr ( /*MB_MOD*/Bool* is_profInc,
 /* How big is an event check?  See case for ARM64in_EvCheck in
    emit_ARM64Instr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_ARM64 ( void )
+Int evCheckSzB_ARM64 ( VexEndness endness_host )
 {
    return 24;
 }
@@ -6925,10 +6925,13 @@ Int evCheckSzB_ARM64 ( void )
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_ARM64 ( void* place_to_chain,
+VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host,
+                                   void* place_to_chain,
                                    void* disp_cp_chain_me_EXPECTED,
                                    void* place_to_jump_to )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         movw x9, disp_cp_chain_me_to_EXPECTED[15:0]
         movk x9, disp_cp_chain_me_to_EXPECTED[31:15], lsl 16
@@ -6968,10 +6971,13 @@ VexInvalRange chainXDirect_ARM64 ( void* place_to_chain,
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_ARM64 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host,
+                                     void* place_to_unchain,
                                      void* place_to_jump_to_EXPECTED,
                                      void* disp_cp_chain_me )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         movw x9, place_to_jump_to_EXPECTED[15:0]
         movk x9, place_to_jump_to_EXPECTED[31:15], lsl 16
@@ -7009,7 +7015,8 @@ VexInvalRange unchainXDirect_ARM64 ( void* place_to_unchain,
 
 //ZZ /* Patch the counter address into a profile inc point, as previously
 //ZZ    created by the ARMin_ProfInc case for emit_ARMInstr. */
-//ZZ VexInvalRange patchProfInc_ARM ( void*  place_to_patch,
+//ZZ VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+//ZZ                                  void*  place_to_patch,
 //ZZ                                  ULong* location_of_counter )
 //ZZ {
 //ZZ    vassert(sizeof(ULong*) == 4);
index 90bf4c1af4ee802df132f0ebd3e88b0e30a4bf7b..f2f5bea2b4575cebfdcc5a802f40e42e1593393b 100644 (file)
@@ -843,6 +843,7 @@ extern Bool isMove_ARM64Instr      ( ARM64Instr*, HReg*, HReg* );
 extern Int  emit_ARM64Instr        ( /*MB_MOD*/Bool* is_profInc,
                                      UChar* buf, Int nbuf, ARM64Instr* i,
                                      Bool mode64,
+                                     VexEndness endness_host,
                                      void* disp_cp_chain_me_to_slowEP,
                                      void* disp_cp_chain_me_to_fastEP,
                                      void* disp_cp_xindir,
@@ -867,19 +868,22 @@ extern HInstrArray* iselSB_ARM64 ( IRSB*,
 /* How big is an event check?  This is kind of a kludge because it
    depends on the offsets of host_EvC_FAILADDR and
    host_EvC_COUNTER. */
-extern Int evCheckSzB_ARM64 ( void );
+extern Int evCheckSzB_ARM64 ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_ARM64 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_ARM64 ( VexEndness endness_host,
+                                          void* place_to_chain,
                                           void* disp_cp_chain_me_EXPECTED,
                                           void* place_to_jump_to );
 
-extern VexInvalRange unchainXDirect_ARM64 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_ARM64 ( VexEndness endness_host,
+                                            void* place_to_unchain,
                                             void* place_to_jump_to_EXPECTED,
                                             void* disp_cp_chain_me );
 
 //ZZ /* Patch the counter location into an existing ProfInc point. */
-//ZZ extern VexInvalRange patchProfInc_ARM ( void*  place_to_patch,
+//ZZ extern VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+//ZZ                                         void*  place_to_patch,
 //ZZ                                         ULong* location_of_counter );
 
 
index dfbe14685e601d2df9ec168ac67d387d1ad8978b..6f2d8bcd5627feb45d4c6b707977757eacbe336f 100644 (file)
@@ -7106,6 +7106,9 @@ HInstrArray* iselSB_ARM64 ( IRSB* bb,
    /* sanity ... */
    vassert(arch_host == VexArchARM64);
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
    /* guard against unexpected space regressions */
    vassert(sizeof(ARM64Instr) <= 32);
 
index 8ce938527039453cd1e01be7c1326dfa17b21ef9..ed41344ef55678c43e0c71f2898bc55e468f5021 100644 (file)
@@ -2971,7 +2971,7 @@ static UInt* do_load_or_store32 ( UInt* p,
 
 Int emit_ARMInstr ( /*MB_MOD*/Bool* is_profInc,
                     UChar* buf, Int nbuf, ARMInstr* i, 
-                    Bool mode64,
+                    Bool mode64, VexEndness endness_host,
                     void* disp_cp_chain_me_to_slowEP,
                     void* disp_cp_chain_me_to_fastEP,
                     void* disp_cp_xindir,
@@ -4644,7 +4644,7 @@ Int emit_ARMInstr ( /*MB_MOD*/Bool* is_profInc,
          /* nofail: */
 
          /* Crosscheck */
-         vassert(evCheckSzB_ARM() == (UChar*)p - (UChar*)p0);
+         vassert(evCheckSzB_ARM(endness_host) == (UChar*)p - (UChar*)p0);
          goto done;
       }
 
@@ -4695,7 +4695,7 @@ Int emit_ARMInstr ( /*MB_MOD*/Bool* is_profInc,
 /* How big is an event check?  See case for ARMin_EvCheck in
    emit_ARMInstr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_ARM ( void )
+Int evCheckSzB_ARM ( VexEndness endness_host )
 {
    return 24;
 }
@@ -4703,10 +4703,13 @@ Int evCheckSzB_ARM ( void )
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_ARM ( void* place_to_chain,
+VexInvalRange chainXDirect_ARM ( VexEndness endness_host,
+                                 void* place_to_chain,
                                  void* disp_cp_chain_me_EXPECTED,
                                  void* place_to_jump_to )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         movw r12, lo16(disp_cp_chain_me_to_EXPECTED)
         movt r12, hi16(disp_cp_chain_me_to_EXPECTED)
@@ -4783,10 +4786,13 @@ VexInvalRange chainXDirect_ARM ( void* place_to_chain,
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_ARM ( void* place_to_unchain,
+VexInvalRange unchainXDirect_ARM ( VexEndness endness_host,
+                                   void* place_to_unchain,
                                    void* place_to_jump_to_EXPECTED,
                                    void* disp_cp_chain_me )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         (general case)
           movw r12, lo16(place_to_jump_to_EXPECTED)
@@ -4844,9 +4850,11 @@ VexInvalRange unchainXDirect_ARM ( void* place_to_unchain,
 
 /* Patch the counter address into a profile inc point, as previously
    created by the ARMin_ProfInc case for emit_ARMInstr. */
-VexInvalRange patchProfInc_ARM ( void*  place_to_patch,
+VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+                                 void*  place_to_patch,
                                  ULong* location_of_counter )
 {
+   vassert(endness_host == VexEndnessLE);
    vassert(sizeof(ULong*) == 4);
    UInt* p = (UInt*)place_to_patch;
    vassert(0 == (3 & (HWord)p));
index f1ce4a741d5176c05189210cb341b49bfe79e89f..74889822a948fea686996fb6700d899b3f7dcb69 100644 (file)
@@ -1027,6 +1027,7 @@ extern Bool isMove_ARMInstr      ( ARMInstr*, HReg*, HReg* );
 extern Int  emit_ARMInstr        ( /*MB_MOD*/Bool* is_profInc,
                                    UChar* buf, Int nbuf, ARMInstr* i, 
                                    Bool mode64,
+                                   VexEndness endness_host,
                                    void* disp_cp_chain_me_to_slowEP,
                                    void* disp_cp_chain_me_to_fastEP,
                                    void* disp_cp_xindir,
@@ -1051,19 +1052,22 @@ extern HInstrArray* iselSB_ARM   ( IRSB*,
 /* How big is an event check?  This is kind of a kludge because it
    depends on the offsets of host_EvC_FAILADDR and
    host_EvC_COUNTER. */
-extern Int evCheckSzB_ARM ( void );
+extern Int evCheckSzB_ARM ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_ARM ( void* place_to_chain,
+extern VexInvalRange chainXDirect_ARM ( VexEndness endness_host,
+                                        void* place_to_chain,
                                         void* disp_cp_chain_me_EXPECTED,
                                         void* place_to_jump_to );
 
-extern VexInvalRange unchainXDirect_ARM ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_ARM ( VexEndness endness_host,
+                                          void* place_to_unchain,
                                           void* place_to_jump_to_EXPECTED,
                                           void* disp_cp_chain_me );
 
 /* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_ARM ( void*  place_to_patch,
+extern VexInvalRange patchProfInc_ARM ( VexEndness endness_host,
+                                        void*  place_to_patch,
                                         ULong* location_of_counter );
 
 
index 537a3555a6db6135482c582250cb2ff95644643c..8235282aaf3cdfe5b87e21b1c56a7ac758337afb 100644 (file)
@@ -6331,6 +6331,9 @@ HInstrArray* iselSB_ARM ( IRSB* bb,
    /* sanity ... */
    vassert(arch_host == VexArchARM);
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
    /* guard against unexpected space regressions */
    vassert(sizeof(ARMInstr) <= 28);
 
index 1bf81b2f59e45c1aeb85b738e7f077823b7a9c9d..58e50d45e60194900c5cbf7021fd89375643c3c7 100644 (file)
@@ -2920,6 +2920,7 @@ static UChar *mkMoveReg(UChar * p, UInt r_dst, UInt r_src)
 Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc,
                      UChar* buf, Int nbuf, MIPSInstr* i,
                      Bool mode64,
+                     VexEndness endness_host,
                      void* disp_cp_chain_me_to_slowEP,
                      void* disp_cp_chain_me_to_fastEP,
                      void* disp_cp_xindir,
@@ -4229,7 +4230,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc,
          /* nofail: */
 
          /* Crosscheck */
-         vassert(evCheckSzB_MIPS() == (UChar*)p - (UChar*)p0);
+         vassert(evCheckSzB_MIPS(endness_host) == (UChar*)p - (UChar*)p0);
          goto done;
       }
 
@@ -4315,7 +4316,7 @@ Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc,
 /* How big is an event check?  See case for Min_EvCheck in
    emit_MIPSInstr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_MIPS ( void )
+Int evCheckSzB_MIPS ( VexEndness endness_host )
 {
   UInt kInstrSize = 4;
   return 7*kInstrSize;
@@ -4323,11 +4324,13 @@ Int evCheckSzB_MIPS ( void )
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
+VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
+                                  void* place_to_chain,
                                   void* disp_cp_chain_me_EXPECTED,
                                   void* place_to_jump_to,
                                   Bool  mode64 )
 {
+   vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
    /* What we're expecting to see is:
         move r9, disp_cp_chain_me_to_EXPECTED
         jalr r9
@@ -4369,11 +4372,13 @@ VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
+VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
+                                    void* place_to_unchain,
                                     void* place_to_jump_to_EXPECTED,
                                     void* disp_cp_chain_me,
                                     Bool  mode64 )
 {
+   vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
    /* What we're expecting to see is:
         move r9, place_to_jump_to_EXPECTED
         jalr r9
@@ -4413,13 +4418,16 @@ VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
 
 /* Patch the counter address into a profile inc point, as previously
    created by the Min_ProfInc case for emit_MIPSInstr. */
-VexInvalRange patchProfInc_MIPS ( void*  place_to_patch,
+VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
+                                  void*  place_to_patch,
                                   ULong* location_of_counter, Bool mode64 )
 {
-   if (mode64)
+   vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
+   if (mode64) {
       vassert(sizeof(ULong*) == 8);
-   else
+   } else {
       vassert(sizeof(ULong*) == 4);
+   }
    UChar* p = (UChar*)place_to_patch;
    vassert(0 == (3 & (HWord)p));
    vassert(isLoadImm_EXACTLY2or6((UChar *)p, /*r*/9,
index 22881eea820d2f208059cff30edd737c5d2ae4e6..cfce11b2850d95bca239721c60566d9ec1259a57 100644 (file)
@@ -715,6 +715,7 @@ extern Bool       isMove_MIPSInstr      (MIPSInstr *, HReg *, HReg *);
 extern Int        emit_MIPSInstr        (/*MB_MOD*/Bool* is_profInc,
                                          UChar* buf, Int nbuf, MIPSInstr* i,
                                          Bool mode64,
+                                         VexEndness endness_host,
                                          void* disp_cp_chain_me_to_slowEP,
                                          void* disp_cp_chain_me_to_fastEP,
                                          void* disp_cp_xindir,
@@ -741,25 +742,28 @@ extern HInstrArray *iselSB_MIPS          ( IRSB*,
    and so assumes that they are both <= 128, and so can use the short
    offset encoding.  This is all checked with assertions, so in the
    worst case we will merely assert at startup. */
-extern Int evCheckSzB_MIPS ( void );
+extern Int evCheckSzB_MIPS ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
+extern VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
+                                         void* place_to_chain,
                                          void* disp_cp_chain_me_EXPECTED,
                                          void* place_to_jump_to,
                                          Bool  mode64 );
 
-extern VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
+                                           void* place_to_unchain,
                                            void* place_to_jump_to_EXPECTED,
                                            void* disp_cp_chain_me,
                                            Bool  mode64 );
 
 /* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_MIPS ( void*  place_to_patch,
+extern VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
+                                         void*  place_to_patch,
                                          ULong* location_of_counter,
                                          Bool  mode64 );
 
-#endif            /* ndef __LIBVEX_HOST_MIPS_HDEFS_H */
+#endif /* ndef __VEX_HOST_MIPS_DEFS_H */
 
 /*---------------------------------------------------------------*/
 /*--- end                                    host-mips_defs.h ---*/
index 233367d925f65ae678f2d45d5c20139f0a619d39..8ee3556aefaac5799d2472f7188f6e6a9e70cb68 100644 (file)
@@ -4173,6 +4173,10 @@ HInstrArray *iselSB_MIPS ( IRSB* bb,
            || VEX_PRID_COMP_BROADCOM == hwcaps_host
            || VEX_PRID_COMP_NETLOGIC);
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE
+           || archinfo_host->endness == VexEndnessBE);
+
    mode64 = arch_host != VexArchMIPS32;
 #if (__mips_fpr==64)
    fp_mode64 = ((VEX_MIPS_REV(hwcaps_host) == VEX_PRID_CPU_32FPR)
index 7c98aebaed53689d81967189e6e20a0439c84378..43101b03177e58ae1d1060d257412e913d2fb16b 100644 (file)
@@ -3720,7 +3720,7 @@ static UChar* mkFormVA ( UChar* p, UInt opc1, UInt r1, UInt r2,
 */
 Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
                     UChar* buf, Int nbuf, PPCInstr* i, 
-                    Bool mode64,
+                    Bool mode64, VexEndness endness_host,
                     void* disp_cp_chain_me_to_slowEP,
                     void* disp_cp_chain_me_to_fastEP,
                     void* disp_cp_xindir,
@@ -5707,7 +5707,7 @@ Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
       /* nofail: */
 
       /* Crosscheck */
-      vassert(evCheckSzB_PPC() == (UChar*)p - (UChar*)p0);
+      vassert(evCheckSzB_PPC(endness_host) == (UChar*)p - (UChar*)p0);
       goto done;
    }
 
@@ -5772,7 +5772,7 @@ Int emit_PPCInstr ( /*MB_MOD*/Bool* is_profInc,
 /* How big is an event check?  See case for Pin_EvCheck in
    emit_PPCInstr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_PPC ( void )
+Int evCheckSzB_PPC ( VexEndness endness_host )
 {
   return 28;
 }
@@ -5780,11 +5780,18 @@ Int evCheckSzB_PPC ( void )
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_PPC ( void* place_to_chain,
+VexInvalRange chainXDirect_PPC ( VexEndness endness_host,
+                                 void* place_to_chain,
                                  void* disp_cp_chain_me_EXPECTED,
                                  void* place_to_jump_to,
                                  Bool  mode64 )
 {
+   if (mode64) {
+      vassert(endness_host == VexEndnessBE); /* later: or LE */
+   } else {
+      vassert(endness_host == VexEndnessBE);
+   }
+
    /* What we're expecting to see is:
         imm32/64-fixed r30, disp_cp_chain_me_to_EXPECTED
         mtctr r30
@@ -5825,11 +5832,18 @@ VexInvalRange chainXDirect_PPC ( void* place_to_chain,
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_PPC ( void* place_to_unchain,
+VexInvalRange unchainXDirect_PPC ( VexEndness endness_host,
+                                   void* place_to_unchain,
                                    void* place_to_jump_to_EXPECTED,
                                    void* disp_cp_chain_me,
                                    Bool  mode64 )
 {
+   if (mode64) {
+      vassert(endness_host == VexEndnessBE); /* later: or LE */
+   } else {
+      vassert(endness_host == VexEndnessBE);
+   }
+
    /* What we're expecting to see is:
         imm32/64-fixed r30, place_to_jump_to_EXPECTED
         mtctr r30
@@ -5870,10 +5884,17 @@ VexInvalRange unchainXDirect_PPC ( void* place_to_unchain,
 
 /* Patch the counter address into a profile inc point, as previously
    created by the Pin_ProfInc case for emit_PPCInstr. */
-VexInvalRange patchProfInc_PPC ( void*  place_to_patch,
+VexInvalRange patchProfInc_PPC ( VexEndness endness_host,
+                                 void*  place_to_patch,
                                  ULong* location_of_counter,
                                  Bool   mode64 )
 {
+   if (mode64) {
+      vassert(endness_host == VexEndnessBE); /* later: or LE */
+   } else {
+      vassert(endness_host == VexEndnessBE);
+   }
+
    UChar* p = (UChar*)place_to_patch;
    vassert(0 == (3 & (HWord)p));
 
index 635cc59eadd49684ddf6541b5534d13a20ddaf32..7f3043f9f9ba2f62f3adc93f0efedb8f351af54d 100644 (file)
@@ -1138,6 +1138,7 @@ extern Bool         isMove_PPCInstr      ( PPCInstr*, HReg*, HReg* );
 extern Int          emit_PPCInstr        ( /*MB_MOD*/Bool* is_profInc,
                                            UChar* buf, Int nbuf, PPCInstr* i, 
                                            Bool mode64,
+                                           VexEndness endness_host,
                                            void* disp_cp_chain_me_to_slowEP,
                                            void* disp_cp_chain_me_to_fastEP,
                                            void* disp_cp_xindir,
@@ -1162,21 +1163,24 @@ extern HInstrArray* iselSB_PPC           ( IRSB*,
 /* How big is an event check?  This is kind of a kludge because it
    depends on the offsets of host_EvC_FAILADDR and
    host_EvC_COUNTER. */
-extern Int evCheckSzB_PPC ( void );
+extern Int evCheckSzB_PPC ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_PPC ( void* place_to_chain,
+extern VexInvalRange chainXDirect_PPC ( VexEndness endness_host,
+                                        void* place_to_chain,
                                         void* disp_cp_chain_me_EXPECTED,
                                         void* place_to_jump_to,
                                         Bool  mode64 );
 
-extern VexInvalRange unchainXDirect_PPC ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_PPC ( VexEndness endness_host,
+                                          void* place_to_unchain,
                                           void* place_to_jump_to_EXPECTED,
                                           void* disp_cp_chain_me,
                                           Bool  mode64 );
 
 /* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_PPC ( void*  place_to_patch,
+extern VexInvalRange patchProfInc_PPC ( VexEndness endness_host,
+                                        void*  place_to_patch,
                                         ULong* location_of_counter,
                                         Bool   mode64 );
 
index 79070754d479829ecbd72ba9dcc32397a9a39d6a..d675120d9af1e6dbdeeb37a059d0559b53b61246 100644 (file)
@@ -5920,6 +5920,9 @@ HInstrArray* iselSB_PPC ( IRSB* bb,
       vassert((hwcaps_host & mask64) == 0);
    }
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessBE);
+
    /* Make up an initial environment to use. */
    env = LibVEX_Alloc(sizeof(ISelEnv));
    env->vreg_ctr = 0;
index ba77250d724b7c782b5fcdae4e9ebea1c9925e2b..a8e1eed8c99c8890049dec7c877101639eb183a3 100644 (file)
@@ -9831,7 +9831,8 @@ s390_insn_xassisted_emit(UChar *buf, const s390_insn *insn,
 
    The dispatch counter is a 32-bit value. */
 static UChar *
-s390_insn_evcheck_emit(UChar *buf, const s390_insn *insn)
+s390_insn_evcheck_emit(UChar *buf, const s390_insn *insn,
+                       VexEndness endness_host)
 {
    s390_amode *amode;
    UInt b, d;
@@ -9867,7 +9868,7 @@ s390_insn_evcheck_emit(UChar *buf, const s390_insn *insn)
    
    /* Make sure the size of the generated code is identical to the size
       returned by evCheckSzB_S390 */
-   vassert(evCheckSzB_S390() == code_end - code_begin);
+   vassert(evCheckSzB_S390(endness_host) == code_end - code_begin);
 
    return buf;
 }
@@ -9896,7 +9897,8 @@ s390_insn_profinc_emit(UChar *buf,
 
 Int
 emit_S390Instr(Bool *is_profinc, UChar *buf, Int nbuf, s390_insn *insn,
-               Bool mode64, void *disp_cp_chain_me_to_slowEP,
+               Bool mode64, VexEndness endness_host,
+               void *disp_cp_chain_me_to_slowEP,
                void *disp_cp_chain_me_to_fastEP, void *disp_cp_xindir,
                void *disp_cp_xassisted)
 {
@@ -10057,7 +10059,7 @@ emit_S390Instr(Bool *is_profinc, UChar *buf, Int nbuf, s390_insn *insn,
       break;
 
    case S390_INSN_EVCHECK:
-      end = s390_insn_evcheck_emit(buf, insn);
+      end = s390_insn_evcheck_emit(buf, insn, endness_host);
       break;
 
    case S390_INSN_XDIRECT:
@@ -10087,7 +10089,7 @@ emit_S390Instr(Bool *is_profinc, UChar *buf, Int nbuf, s390_insn *insn,
 /* Return the number of bytes emitted for an S390_INSN_EVCHECK.
    See s390_insn_evcheck_emit */
 Int
-evCheckSzB_S390(void)
+evCheckSzB_S390(VexEndness endness_host)
 {
    return s390_host_has_gie ? 18 : 24;
 }
@@ -10096,7 +10098,8 @@ evCheckSzB_S390(void)
 /* Patch the counter address into CODE_TO_PATCH as previously
    generated by s390_insn_profinc_emit. */
 VexInvalRange
-patchProfInc_S390(void *code_to_patch, ULong *location_of_counter)
+patchProfInc_S390(VexEndness endness_host,
+                  void *code_to_patch, ULong *location_of_counter)
 {
    vassert(sizeof(ULong *) == 8);
 
@@ -10114,10 +10117,13 @@ patchProfInc_S390(void *code_to_patch, ULong *location_of_counter)
 /* NB: what goes on here has to be very closely coordinated with the
    s390_insn_xdirect_emit code above. */
 VexInvalRange
-chainXDirect_S390(void *place_to_chain,
+chainXDirect_S390(VexEndness endness_host,
+                  void *place_to_chain,
                   void *disp_cp_chain_me_EXPECTED,
                   void *place_to_jump_to)
 {
+   vassert(endness_host == VexEndnessBE);
+
    /* What we're expecting to see @ PLACE_TO_CHAIN is:
 
         load  tchain_scratch, #disp_cp_chain_me_EXPECTED
@@ -10199,10 +10205,13 @@ chainXDirect_S390(void *place_to_chain,
 /* NB: what goes on here has to be very closely coordinated with the
    s390_insn_xdirect_emit code above. */
 VexInvalRange
-unchainXDirect_S390(void *place_to_unchain,
+unchainXDirect_S390(VexEndness endness_host,
+                    void *place_to_unchain,
                     void *place_to_jump_to_EXPECTED,
                     void *disp_cp_chain_me)
 {
+   vassert(endness_host == VexEndnessBE);
+
    /* What we're expecting to see @ PLACE_TO_UNCHAIN:
 
           load  tchain_scratch, #place_to_jump_to_EXPECTED
index 5b6fc1fd0fde105ecccef93038a8f98c51fbd68b..7db43040ddecedc84e3146295d5fa0901e28b378 100644 (file)
@@ -736,7 +736,7 @@ void  getRegUsage_S390Instr( HRegUsage *, s390_insn *, Bool );
 void  mapRegs_S390Instr    ( HRegRemap *, s390_insn *, Bool );
 Bool  isMove_S390Instr     ( s390_insn *, HReg *, HReg * );
 Int   emit_S390Instr       ( Bool *, UChar *, Int, s390_insn *, Bool,
-                             void *, void *, void *, void *);
+                             VexEndness, void *, void *, void *, void *);
 void  getAllocableRegs_S390( Int *, HReg **, Bool );
 void  genSpill_S390        ( HInstr **, HInstr **, HReg , Int , Bool );
 void  genReload_S390       ( HInstr **, HInstr **, HReg , Int , Bool );
@@ -745,19 +745,22 @@ HInstrArray *iselSB_S390   ( IRSB *, VexArch, VexArchInfo *, VexAbiInfo *,
                              Int, Int, Bool, Bool, Addr64);
 
 /* Return the number of bytes of code needed for an event check */
-Int evCheckSzB_S390(void);
+Int evCheckSzB_S390(VexEndness endness_host);
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-VexInvalRange chainXDirect_S390(void *place_to_chain,
+VexInvalRange chainXDirect_S390(VexEndness endness_host,
+                                void *place_to_chain,
                                 void *disp_cp_chain_me_EXPECTED,
                                 void *place_to_jump_to);
 
-VexInvalRange unchainXDirect_S390(void *place_to_unchain,
+VexInvalRange unchainXDirect_S390(VexEndness endness_host,
+                                  void *place_to_unchain,
                                   void *place_to_jump_to_EXPECTED,
                                   void *disp_cp_chain_me);
 
 /* Patch the counter location into an existing ProfInc point. */
-VexInvalRange patchProfInc_S390(void  *code_to_patch,
+VexInvalRange patchProfInc_S390(VexEndness endness_host,
+                                void  *code_to_patch,
                                 ULong *location_of_counter);
 
 /* KLUDGE: See detailled comment in host_s390_defs.c. */
index 2e4df3ccc130c8f847d26e36e6578f088d91d160..cad0e7ed444a748be443cf73850a049eab0d85cc 100644 (file)
@@ -4094,6 +4094,9 @@ iselSB_S390(IRSB *bb, VexArch arch_host, VexArchInfo *archinfo_host,
    /* Do some sanity checks */
    vassert((VEX_HWCAPS_S390X(hwcaps_host) & ~(VEX_HWCAPS_S390X_ALL)) == 0);
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessBE);
+
    /* Make up an initial environment to use. */
    env = LibVEX_Alloc(sizeof(ISelEnv));
    env->vreg_ctr = 0;
index 8f5fcfe23e4e0f8867ca6b69c5d4592fa92ef8af..54bad38099543d0988d8158e32684292a9f12c22 100644 (file)
@@ -2102,7 +2102,7 @@ static UChar* push_word_from_tags ( UChar* p, UShort tags )
 
 Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
                     UChar* buf, Int nbuf, X86Instr* i, 
-                    Bool mode64,
+                    Bool mode64, VexEndness endness_host,
                     void* disp_cp_chain_me_to_slowEP,
                     void* disp_cp_chain_me_to_fastEP,
                     void* disp_cp_xindir,
@@ -3291,7 +3291,7 @@ Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
       p = doAMode_M(p, fake(4), i->Xin.EvCheck.amFailAddr);
       vassert(p - p0 == 8); /* also ensures that 0x03 offset above is ok */
       /* And crosscheck .. */
-      vassert(evCheckSzB_X86() == 8);
+      vassert(evCheckSzB_X86(endness_host) == 8);
       goto done;
    }
 
@@ -3336,7 +3336,7 @@ Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc,
 /* How big is an event check?  See case for Xin_EvCheck in
    emit_X86Instr just above.  That crosschecks what this returns, so
    we can tell if we're inconsistent. */
-Int evCheckSzB_X86 ( void )
+Int evCheckSzB_X86 ( VexEndness endness_host )
 {
    return 8;
 }
@@ -3344,10 +3344,13 @@ Int evCheckSzB_X86 ( void )
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange chainXDirect_X86 ( void* place_to_chain,
+VexInvalRange chainXDirect_X86 ( VexEndness endness_host,
+                                 void* place_to_chain,
                                  void* disp_cp_chain_me_EXPECTED,
                                  void* place_to_jump_to )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
         movl $disp_cp_chain_me_EXPECTED, %edx
         call *%edx
@@ -3389,10 +3392,13 @@ VexInvalRange chainXDirect_X86 ( void* place_to_chain,
 
 /* NB: what goes on here has to be very closely coordinated with the
    emitInstr case for XDirect, above. */
-VexInvalRange unchainXDirect_X86 ( void* place_to_unchain,
+VexInvalRange unchainXDirect_X86 ( VexEndness endness_host,
+                                   void* place_to_unchain,
                                    void* place_to_jump_to_EXPECTED,
                                    void* disp_cp_chain_me )
 {
+   vassert(endness_host == VexEndnessLE);
+
    /* What we're expecting to see is:
           jmp d32
           ud2;
@@ -3432,9 +3438,11 @@ VexInvalRange unchainXDirect_X86 ( void* place_to_unchain,
 
 /* Patch the counter address into a profile inc point, as previously
    created by the Xin_ProfInc case for emit_X86Instr. */
-VexInvalRange patchProfInc_X86 ( void*  place_to_patch,
+VexInvalRange patchProfInc_X86 ( VexEndness endness_host,
+                                 void*  place_to_patch,
                                  ULong* location_of_counter )
 {
+   vassert(endness_host == VexEndnessLE);
    vassert(sizeof(ULong*) == 4);
    UChar* p = (UChar*)place_to_patch;
    vassert(p[0] == 0x83);
index 96e2c5cf79eea0a76ff175f37a6de531a61e6efd..49676c9777b440d35baae0951ed054a0908ac053 100644 (file)
@@ -717,6 +717,7 @@ extern Bool         isMove_X86Instr      ( X86Instr*, HReg*, HReg* );
 extern Int          emit_X86Instr        ( /*MB_MOD*/Bool* is_profInc,
                                            UChar* buf, Int nbuf, X86Instr* i, 
                                            Bool mode64,
+                                           VexEndness endness_host,
                                            void* disp_cp_chain_me_to_slowEP,
                                            void* disp_cp_chain_me_to_fastEP,
                                            void* disp_cp_xindir,
@@ -745,19 +746,22 @@ extern HInstrArray* iselSB_X86           ( IRSB*,
    and so assumes that they are both <= 128, and so can use the short
    offset encoding.  This is all checked with assertions, so in the
    worst case we will merely assert at startup. */
-extern Int evCheckSzB_X86 ( void );
+extern Int evCheckSzB_X86 ( VexEndness endness_host );
 
 /* Perform a chaining and unchaining of an XDirect jump. */
-extern VexInvalRange chainXDirect_X86 ( void* place_to_chain,
+extern VexInvalRange chainXDirect_X86 ( VexEndness endness_host,
+                                        void* place_to_chain,
                                         void* disp_cp_chain_me_EXPECTED,
                                         void* place_to_jump_to );
 
-extern VexInvalRange unchainXDirect_X86 ( void* place_to_unchain,
+extern VexInvalRange unchainXDirect_X86 ( VexEndness endness_host,
+                                          void* place_to_unchain,
                                           void* place_to_jump_to_EXPECTED,
                                           void* disp_cp_chain_me );
 
 /* Patch the counter location into an existing ProfInc point. */
-extern VexInvalRange patchProfInc_X86 ( void*  place_to_patch,
+extern VexInvalRange patchProfInc_X86 ( VexEndness endness_host,
+                                        void*  place_to_patch,
                                         ULong* location_of_counter );
 
 
index 16152ba67b09a9af36bd26a6ea618230f717166e..5fe816b4eba9db0852bc03b8fefdcecb63b4402a 100644 (file)
@@ -4440,6 +4440,9 @@ HInstrArray* iselSB_X86 ( IRSB* bb,
    vassert(sizeof(max_ga) == 8);
    vassert((max_ga >> 32) == 0);
 
+   /* Check that the host's endianness is as expected. */
+   vassert(archinfo_host->endness == VexEndnessLE);
+
    /* Make up an initial environment to use. */
    env = LibVEX_Alloc(sizeof(ISelEnv));
    env->vreg_ctr = 0;
index 4472cdfcb4e85acce7e6092a4e9916c050026acb..cf9789bdb20eabdf9d61a6a237882e414ec567d9 100644 (file)
@@ -224,7 +224,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
    HInstrArray* (*iselSB)       ( IRSB*, VexArch, VexArchInfo*, VexAbiInfo*,
                                   Int, Int, Bool, Bool, Addr64 );
    Int          (*emit)         ( /*MB_MOD*/Bool*,
-                                  UChar*, Int, HInstr*, Bool,
+                                  UChar*, Int, HInstr*, Bool, VexEndness,
                                   void*, void*, void*, void* );
    IRExpr*      (*specHelper)   ( const HChar*, IRExpr**, IRStmt**, Int );
    Bool         (*preciseMemExnsFn) ( Int, Int );
@@ -232,7 +232,6 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
    DisOneInstrFn disInstrFn;
 
    VexGuestLayout* guest_layout;
-   Bool            host_is_bigendian = False;
    IRSB*           irsb;
    HInstrArray*    vcode;
    HInstrArray*    rcode;
@@ -311,12 +310,12 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          ppInstr      = (void(*)(HInstr*, Bool)) ppX86Instr;
          ppReg        = (void(*)(HReg)) ppHRegX86;
          iselSB       = iselSB_X86;
-         emit         = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit         = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                         emit_X86Instr;
-         host_is_bigendian = False;
          host_word_type    = Ity_I32;
          vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
          break;
 
       case VexArchAMD64:
@@ -334,12 +333,12 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          ppInstr     = (void(*)(HInstr*, Bool)) ppAMD64Instr;
          ppReg       = (void(*)(HReg)) ppHRegAMD64;
          iselSB      = iselSB_AMD64;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_AMD64Instr;
-         host_is_bigendian = False;
          host_word_type    = Ity_I64;
          vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
          break;
 
       case VexArchPPC32:
@@ -354,12 +353,12 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          ppInstr     = (void(*)(HInstr*,Bool)) ppPPCInstr;
          ppReg       = (void(*)(HReg)) ppHRegPPC;
          iselSB      = iselSB_PPC;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_PPCInstr;
-         host_is_bigendian = True;
          host_word_type    = Ity_I32;
          vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessBE);
          break;
 
       case VexArchPPC64:
@@ -374,12 +373,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          ppInstr     = (void(*)(HInstr*, Bool)) ppPPCInstr;
          ppReg       = (void(*)(HReg)) ppHRegPPC;
          iselSB      = iselSB_PPC;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_PPCInstr;
-         host_is_bigendian = True;
          host_word_type    = Ity_I64;
          vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessBE
+                 /* later: || vta->archinfo_host.endness == VexEndnessLE */);
          break;
 
       case VexArchS390X:
@@ -394,11 +394,11 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          ppInstr     = (void(*)(HInstr*, Bool)) ppS390Instr;
          ppReg       = (void(*)(HReg)) ppHRegS390;
          iselSB      = iselSB_S390;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*)) emit_S390Instr;
-         host_is_bigendian = True;
          host_word_type    = Ity_I64;
          vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessBE);
          break;
 
       case VexArchARM:
@@ -413,12 +413,12 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          ppInstr     = (void(*)(HInstr*, Bool)) ppARMInstr;
          ppReg       = (void(*)(HReg)) ppHRegARM;
          iselSB      = iselSB_ARM;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_ARMInstr;
-         host_is_bigendian = False;
          host_word_type    = Ity_I32;
          vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
          break;
 
       case VexArchARM64:
@@ -437,12 +437,12 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          ppInstr     = (void(*)(HInstr*, Bool)) ppARM64Instr;
          ppReg       = (void(*)(HReg)) ppHRegARM64;
          iselSB      = iselSB_ARM64;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_ARM64Instr;
-         host_is_bigendian = False;
          host_word_type    = Ity_I64;
          vassert(are_valid_hwcaps(VexArchARM64, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE);
          break;
 
       case VexArchMIPS32:
@@ -457,16 +457,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          ppInstr     = (void(*)(HInstr*, Bool)) ppMIPSInstr;
          ppReg       = (void(*)(HReg)) ppHRegMIPS;
          iselSB      = iselSB_MIPS;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_MIPSInstr;
-#        if defined(VKI_LITTLE_ENDIAN)
-         host_is_bigendian = False;
-#        elif defined(VKI_BIG_ENDIAN)
-         host_is_bigendian = True;
-#        endif
          host_word_type    = Ity_I32;
          vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE
+                 || vta->archinfo_host.endness == VexEndnessBE);
          break;
 
       case VexArchMIPS64:
@@ -481,16 +478,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          ppInstr     = (void(*)(HInstr*, Bool)) ppMIPSInstr;
          ppReg       = (void(*)(HReg)) ppHRegMIPS;
          iselSB      = iselSB_MIPS;
-         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
+         emit        = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,VexEndness,
                                void*,void*,void*,void*))
                        emit_MIPSInstr;
-#        if defined(VKI_LITTLE_ENDIAN)
-         host_is_bigendian = False;
-#        elif defined(VKI_BIG_ENDIAN)
-         host_is_bigendian = True;
-#        endif
          host_word_type    = Ity_I64;
          vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_host.hwcaps));
+         vassert(vta->archinfo_host.endness == VexEndnessLE
+                 || vta->archinfo_host.endness == VexEndnessBE);
          break;
 
       default:
@@ -514,6 +508,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestX86State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
          vassert(0 == sizeof(VexGuestX86State) % 16);
          vassert(sizeof( ((VexGuestX86State*)0)->guest_CMSTART) == 4);
          vassert(sizeof( ((VexGuestX86State*)0)->guest_CMLEN  ) == 4);
@@ -534,6 +529,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestAMD64State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
          vassert(0 == sizeof(VexGuestAMD64State) % 16);
          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMSTART ) == 8);
          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMLEN   ) == 8);
@@ -554,6 +550,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC32State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessBE);
          vassert(0 == sizeof(VexGuestPPC32State) % 16);
          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMSTART ) == 4);
          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMLEN   ) == 4);
@@ -574,6 +571,8 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC64State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessBE
+                 /* later: || vta->archinfo_guest.endness == VexEndnessBE */);
          vassert(0 == sizeof(VexGuestPPC64State) % 16);
          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMSTART    ) == 8);
          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMLEN      ) == 8);
@@ -595,6 +594,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestS390XState,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessBE);
          vassert(0 == sizeof(VexGuestS390XState) % 16);
          vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMSTART    ) == 8);
          vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMLEN      ) == 8);
@@ -615,6 +615,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestARMState,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
          vassert(0 == sizeof(VexGuestARMState) % 16);
          vassert(sizeof( ((VexGuestARMState*)0)->guest_CMSTART) == 4);
          vassert(sizeof( ((VexGuestARMState*)0)->guest_CMLEN  ) == 4);
@@ -635,6 +636,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestARM64State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestARM64State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchARM64, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE);
          vassert(0 == sizeof(VexGuestARM64State) % 16);
          vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMSTART) == 8);
          vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMLEN  ) == 8);
@@ -655,6 +657,8 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS32State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE
+                 || vta->archinfo_guest.endness == VexEndnessBE);
          vassert(0 == sizeof(VexGuestMIPS32State) % 16);
          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMSTART) == 4);
          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMLEN  ) == 4);
@@ -675,6 +679,8 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS64State,host_EvC_COUNTER);
          offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR);
          vassert(are_valid_hwcaps(VexArchMIPS64, vta->archinfo_guest.hwcaps));
+         vassert(vta->archinfo_guest.endness == VexEndnessLE
+                 || vta->archinfo_guest.endness == VexEndnessBE);
          vassert(0 == sizeof(VexGuestMIPS64State) % 16);
          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMSTART) == 8);
          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMLEN  ) == 8);
@@ -698,6 +704,8 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          we are simulating one flavour of an architecture a different
          flavour of the same architecture, which is pretty strange. */
       vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
+      /* ditto */
+      vassert(vta->archinfo_guest.endness == vta->archinfo_host.endness);
    }
 
    vexAllocSanityCheck();
@@ -715,7 +723,7 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
                      vta->guest_bytes, 
                      vta->guest_bytes_addr,
                      vta->chase_into_ok,
-                     host_is_bigendian,
+                     vta->archinfo_host.endness,
                      vta->sigill_diag,
                      vta->arch_guest,
                      &vta->archinfo_guest,
@@ -936,7 +944,8 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
          vex_printf("\n");
       }
       j = emit( &hi_isProfInc,
-                insn_bytes, sizeof insn_bytes, hi, mode64,
+                insn_bytes, sizeof insn_bytes, hi,
+                mode64, vta->archinfo_host.endness,
                 vta->disp_cp_chain_me_to_slowEP,
                 vta->disp_cp_chain_me_to_fastEP,
                 vta->disp_cp_xindir,
@@ -993,12 +1002,13 @@ VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
 
 /* --------- Chain/Unchain XDirects. --------- */
 
-VexInvalRange LibVEX_Chain ( VexArch arch_host,
-                             void*   place_to_chain,
-                             void*   disp_cp_chain_me_EXPECTED,
-                             void*   place_to_jump_to )
+VexInvalRange LibVEX_Chain ( VexArch    arch_host,
+                             VexEndness endness_host,
+                             void*      place_to_chain,
+                             void*      disp_cp_chain_me_EXPECTED,
+                             void*      place_to_jump_to )
 {
-   VexInvalRange (*chainXDirect)(void*, void*, void*) = NULL;
+   VexInvalRange (*chainXDirect)(VexEndness, void*, void*, void*) = NULL;
    switch (arch_host) {
       case VexArchX86:
          chainXDirect = chainXDirect_X86; break;
@@ -1011,19 +1021,23 @@ VexInvalRange LibVEX_Chain ( VexArch arch_host,
       case VexArchS390X:
          chainXDirect = chainXDirect_S390; break;
       case VexArchPPC32:
-         return chainXDirect_PPC(place_to_chain,
+         return chainXDirect_PPC(endness_host,
+                                 place_to_chain,
                                  disp_cp_chain_me_EXPECTED,
                                  place_to_jump_to, False/*!mode64*/);
       case VexArchPPC64:
-         return chainXDirect_PPC(place_to_chain,
+         return chainXDirect_PPC(endness_host,
+                                 place_to_chain,
                                  disp_cp_chain_me_EXPECTED,
                                  place_to_jump_to, True/*mode64*/);
       case VexArchMIPS32:
-         return chainXDirect_MIPS(place_to_chain,
+         return chainXDirect_MIPS(endness_host,
+                                  place_to_chain,
                                   disp_cp_chain_me_EXPECTED,
                                   place_to_jump_to, False/*!mode64*/);
       case VexArchMIPS64:
-         return chainXDirect_MIPS(place_to_chain,
+         return chainXDirect_MIPS(endness_host,
+                                  place_to_chain,
                                   disp_cp_chain_me_EXPECTED,
                                   place_to_jump_to, True/*!mode64*/);
       default:
@@ -1031,17 +1045,18 @@ VexInvalRange LibVEX_Chain ( VexArch arch_host,
    }
    vassert(chainXDirect);
    VexInvalRange vir
-      = chainXDirect(place_to_chain, disp_cp_chain_me_EXPECTED,
-                     place_to_jump_to);
+      = chainXDirect(endness_host, place_to_chain,
+                     disp_cp_chain_me_EXPECTED, place_to_jump_to);
    return vir;
 }
 
-VexInvalRange LibVEX_UnChain ( VexArch arch_host,
-                               void*   place_to_unchain,
-                               void*   place_to_jump_to_EXPECTED,
-                               void*   disp_cp_chain_me )
+VexInvalRange LibVEX_UnChain ( VexArch    arch_host,
+                               VexEndness endness_host,
+                               void*      place_to_unchain,
+                               void*      place_to_jump_to_EXPECTED,
+                               void*      disp_cp_chain_me )
 {
-   VexInvalRange (*unchainXDirect)(void*, void*, void*) = NULL;
+   VexInvalRange (*unchainXDirect)(VexEndness, void*, void*, void*) = NULL;
    switch (arch_host) {
       case VexArchX86:
          unchainXDirect = unchainXDirect_X86; break;
@@ -1054,19 +1069,23 @@ VexInvalRange LibVEX_UnChain ( VexArch arch_host,
       case VexArchS390X:
          unchainXDirect = unchainXDirect_S390; break;
       case VexArchPPC32:
-         return unchainXDirect_PPC(place_to_unchain,
+         return unchainXDirect_PPC(endness_host,
+                                   place_to_unchain,
                                    place_to_jump_to_EXPECTED,
                                    disp_cp_chain_me, False/*!mode64*/);
       case VexArchPPC64:
-         return unchainXDirect_PPC(place_to_unchain,
+         return unchainXDirect_PPC(endness_host,
+                                   place_to_unchain,
                                    place_to_jump_to_EXPECTED,
                                    disp_cp_chain_me, True/*mode64*/);
       case VexArchMIPS32:
-         return unchainXDirect_MIPS(place_to_unchain,
+         return unchainXDirect_MIPS(endness_host,
+                                    place_to_unchain,
                                     place_to_jump_to_EXPECTED,
                                     disp_cp_chain_me, False/*!mode64*/);
       case VexArchMIPS64:
-         return unchainXDirect_MIPS(place_to_unchain,
+         return unchainXDirect_MIPS(endness_host,
+                                    place_to_unchain,
                                     place_to_jump_to_EXPECTED,
                                     disp_cp_chain_me, True/*!mode64*/);
       default:
@@ -1074,32 +1093,33 @@ VexInvalRange LibVEX_UnChain ( VexArch arch_host,
    }
    vassert(unchainXDirect);
    VexInvalRange vir
-      = unchainXDirect(place_to_unchain, place_to_jump_to_EXPECTED,
-                       disp_cp_chain_me);
+      = unchainXDirect(endness_host, place_to_unchain,
+                       place_to_jump_to_EXPECTED, disp_cp_chain_me);
    return vir;
 }
 
-Int LibVEX_evCheckSzB ( VexArch arch_host )
+Int LibVEX_evCheckSzB ( VexArch    arch_host,
+                        VexEndness endness_host )
 {
    static Int cached = 0; /* DO NOT MAKE NON-STATIC */
    if (UNLIKELY(cached == 0)) {
       switch (arch_host) {
          case VexArchX86:
-            cached = evCheckSzB_X86(); break;
+            cached = evCheckSzB_X86(endness_host); break;
          case VexArchAMD64:
-            cached = evCheckSzB_AMD64(); break;
+            cached = evCheckSzB_AMD64(endness_host); break;
          case VexArchARM:
-            cached = evCheckSzB_ARM(); break;
+            cached = evCheckSzB_ARM(endness_host); break;
          case VexArchARM64:
-            cached = evCheckSzB_ARM64(); break;
+            cached = evCheckSzB_ARM64(endness_host); break;
          case VexArchS390X:
-            cached = evCheckSzB_S390(); break;
+            cached = evCheckSzB_S390(endness_host); break;
          case VexArchPPC32:
          case VexArchPPC64:
-            cached = evCheckSzB_PPC(); break;
+            cached = evCheckSzB_PPC(endness_host); break;
          case VexArchMIPS32:
          case VexArchMIPS64:
-            cached = evCheckSzB_MIPS(); break;
+            cached = evCheckSzB_MIPS(endness_host); break;
          default:
             vassert(0);
       }
@@ -1107,11 +1127,12 @@ Int LibVEX_evCheckSzB ( VexArch arch_host )
    return cached;
 }
 
-VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
-                                    void*   place_to_patch,
-                                    ULong*  location_of_counter )
+VexInvalRange LibVEX_PatchProfInc ( VexArch    arch_host,
+                                    VexEndness endness_host,
+                                    void*      place_to_patch,
+                                    ULong*     location_of_counter )
 {
-   VexInvalRange (*patchProfInc)(void*,ULong*) = NULL;
+   VexInvalRange (*patchProfInc)(VexEndness,void*,ULong*) = NULL;
    switch (arch_host) {
       case VexArchX86:
          patchProfInc = patchProfInc_X86; break;
@@ -1122,23 +1143,23 @@ VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
       case VexArchS390X:
          patchProfInc = patchProfInc_S390; break;
       case VexArchPPC32:
-         return patchProfInc_PPC(place_to_patch,
+         return patchProfInc_PPC(endness_host, place_to_patch,
                                  location_of_counter, False/*!mode64*/);
       case VexArchPPC64:
-         return patchProfInc_PPC(place_to_patch,
+         return patchProfInc_PPC(endness_host, place_to_patch,
                                  location_of_counter, True/*mode64*/);
       case VexArchMIPS32:
-         return patchProfInc_MIPS(place_to_patch,
+         return patchProfInc_MIPS(endness_host, place_to_patch,
                                   location_of_counter, False/*!mode64*/);
       case VexArchMIPS64:
-         return patchProfInc_MIPS(place_to_patch,
+         return patchProfInc_MIPS(endness_host, place_to_patch,
                                   location_of_counter, True/*!mode64*/);
       default:
          vassert(0);
    }
    vassert(patchProfInc);
    VexInvalRange vir
-      = patchProfInc(place_to_patch, location_of_counter);
+      = patchProfInc(endness_host, place_to_patch, location_of_counter);
    return vir;
 }
 
@@ -1216,6 +1237,16 @@ const HChar* LibVEX_ppVexArch ( VexArch arch )
    }
 }
 
+const HChar* LibVEX_ppVexEndness ( VexEndness endness )
+{
+   switch (endness) {
+      case VexEndness_INVALID: return "INVALID";
+      case VexEndnessLE:       return "LittleEndian";
+      case VexEndnessBE:       return "BigEndian";
+      default:                 return "VexEndness???";
+   }
+}
+
 const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
 {
    const HChar* str = show_hwcaps(arch,hwcaps);
@@ -1227,10 +1258,11 @@ const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
 {
    vex_bzero(vai, sizeof(*vai));
-   vai->hwcaps              = 0;
-   vai->ppc_icache_line_szB = 0;
-   vai->ppc_dcbz_szB        = 0;
-   vai->ppc_dcbzl_szB       = 0;
+   vai->hwcaps                  = 0;
+   vai->endness                 = VexEndness_INVALID;
+   vai->ppc_icache_line_szB     = 0;
+   vai->ppc_dcbz_szB            = 0;
+   vai->ppc_dcbzl_szB           = 0;
    vai->arm64_dMinLine_lg2_szB  = 0;
    vai->arm64_iMinLine_lg2_szB  = 0;
    vai->hwcache_info.num_levels = 0;
index 2dfb419c41bc2b4e2e3d89a06d2449da2d230652..3019dedc1cb1667a24044cd17309bd7e869592e4 100644 (file)
@@ -51,7 +51,7 @@
 
 typedef 
    enum { 
-      VexArch_INVALID,
+      VexArch_INVALID=0x400,
       VexArchX86, 
       VexArchAMD64, 
       VexArchARM,
@@ -65,6 +65,16 @@ typedef
    VexArch;
 
 
+/* Information about endianness. */
+typedef
+   enum {
+      VexEndness_INVALID=0x600, /* unknown endianness */
+      VexEndnessLE,             /* little endian */
+      VexEndnessBE              /* big endian */
+   }
+   VexEndness;
+
+
 /* For a given architecture, these specify extra capabilities beyond
    the minimum supported (baseline) capabilities.  They may be OR'd
    together, although some combinations don't make sense.  (eg, SSE2
@@ -220,12 +230,13 @@ typedef
 /* These return statically allocated strings. */
 
 extern const HChar* LibVEX_ppVexArch    ( VexArch );
+extern const HChar* LibVEX_ppVexEndness ( VexEndness endness );
 extern const HChar* LibVEX_ppVexHwCaps  ( VexArch, UInt );
 
 
 /* The various kinds of caches */
 typedef enum {
-   DATA_CACHE,
+   DATA_CACHE=0x500,
    INSN_CACHE,
    UNIFIED_CACHE
 } VexCacheKind;
@@ -270,8 +281,9 @@ typedef struct {
 
 typedef
    struct {
-      /* The following two fields are mandatory. */
-      UInt hwcaps;
+      /* The following three fields are mandatory. */
+      UInt         hwcaps;
+      VexEndness   endness;
       VexCacheInfo hwcache_info;
       /* PPC32/PPC64 only: size of instruction cache line */
       Int ppc_icache_line_szB;
@@ -389,7 +401,7 @@ void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi );
    points.
 
    VexRegUpdAllregsAtEachInsn : all registers up to date at each instruction. */
-typedef enum { VexRegUpdSpAtMemAccess,
+typedef enum { VexRegUpdSpAtMemAccess=0x700,
                VexRegUpdUnwindregsAtMemAccess,
                VexRegUpdAllregsAtMemAccess,
                VexRegUpdAllregsAtEachInsn } VexRegisterUpdates;
@@ -583,7 +595,7 @@ extern void LibVEX_Init (
 typedef
    struct {
       /* overall status */
-      enum { VexTransOK,
+      enum { VexTransOK=0x800,
              VexTransAccessFail, VexTransOutputFull } status;
       /* The number of extents that have a self-check (0 to 3) */
       UInt n_sc_extents;
@@ -778,35 +790,39 @@ typedef
    currently contains a call to the dispatcher specified by
    disp_cp_chain_me_EXPECTED. */
 extern
-VexInvalRange LibVEX_Chain ( VexArch arch_host,
-                             void*   place_to_chain,
-                             void*   disp_cp_chain_me_EXPECTED,
-                             void*   place_to_jump_to );
+VexInvalRange LibVEX_Chain ( VexArch    arch_host,
+                             VexEndness endhess_host,
+                             void*      place_to_chain,
+                             void*      disp_cp_chain_me_EXPECTED,
+                             void*      place_to_jump_to );
 
 /* Undo an XDirect jump located at place_to_unchain, so it is
    converted back into a call to disp_cp_chain_me.  It is expected
    (and checked) that this site currently contains a jump directly to
    the address specified by place_to_jump_to_EXPECTED. */
 extern
-VexInvalRange LibVEX_UnChain ( VexArch arch_host,
-                               void*   place_to_unchain,
-                               void*   place_to_jump_to_EXPECTED,
-                               void*   disp_cp_chain_me );
+VexInvalRange LibVEX_UnChain ( VexArch    arch_host,
+                               VexEndness endness_host,
+                               void*      place_to_unchain,
+                               void*      place_to_jump_to_EXPECTED,
+                               void*      disp_cp_chain_me );
 
 /* Returns a constant -- the size of the event check that is put at
    the start of every translation.  This makes it possible to
    calculate the fast entry point address if the slow entry point
    address is known (the usual case), or vice versa. */
 extern
-Int LibVEX_evCheckSzB ( VexArch arch_host );
+Int LibVEX_evCheckSzB ( VexArch    arch_host,
+                        VexEndness endness_host );
 
 
 /* Patch the counter location into an existing ProfInc point.  The
    specified point is checked to make sure it is plausible. */
 extern
-VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
-                                    void*   place_to_patch,
-                                    ULong*  location_of_counter );
+VexInvalRange LibVEX_PatchProfInc ( VexArch    arch_host,
+                                    VexEndness endness_host,
+                                    void*      place_to_patch,
+                                    ULong*     location_of_counter );
 
 
 /*-------------------------------------------------------*/