DIP("lxvw4x %d,r%u,r%u\n", (UInt)XT, rA_addr, rB_addr);
- t0 = load( Ity_V128, mkexpr( EA ) );
+ /* The load will result in the data being in BE order. */
+ if (host_endness == VexEndnessLE) {
+ IRExpr *t0_BE;
+ IRTemp perm_LE = newTemp(Ity_V128);
+
+ t0_BE = load( Ity_V128, mkexpr( EA ) );
+
+ /* Permute the data to LE format */
+ assign( perm_LE, binop( Iop_64HLtoV128, mkU64(0x0c0d0e0f08090a0b),
+ mkU64(0x0405060700010203)));
+
+ t0 = binop( Iop_Perm8x16, t0_BE, mkexpr(perm_LE) );
+ } else {
+ t0 = load( Ity_V128, mkexpr( EA ) );
+ }
+
putVSReg( XT, t0 );
break;
}
}
if (e->tag == Iex_Load && e->Iex.Load.end == IEndianess) {
- /* Need to be able to do V128 unaligned loads. The unaligned load can
- * be accomplised using the following code sequece from the ISA. It
- * uses the lvx instruction that does two aligned loads and then
+ /* Need to be able to do V128 unaligned loads. The BE unaligned load
+ * can be accomplised using the following code sequece from the ISA.
+ * It uses the lvx instruction that does two aligned loads and then
* permute the data to store the required data as if it had been an
* unaligned load.
*
addInstr(env, PPCInstr_AvLdSt( True/*load*/, 16, Vhi,
PPCAMode_IR(0, rB)) );
- // lvsl Vp, 0, Rb
- addInstr(env, PPCInstr_AvSh( True/*left shift*/, Vp,
- PPCAMode_IR(0, rB)) );
+ if (IEndianess == Iend_LE)
+ // lvsr Vp, 0, Rb
+ addInstr(env, PPCInstr_AvSh( False/*right shift*/, Vp,
+ PPCAMode_IR(0, rB)) );
+ else
+ // lvsl Vp, 0, Rb
+ addInstr(env, PPCInstr_AvSh( True/*left shift*/, Vp,
+ PPCAMode_IR(0, rB)) );
// addi Rb_plus_15, Rb, 15
addInstr(env, PPCInstr_Alu( Palu_ADD, rB_plus_15,
addInstr(env, PPCInstr_AvLdSt( True/*load*/, 16, Vlo,
PPCAMode_IR(0, rB_plus_15)) );
- // vperm Vt, Vhi, Vlo, Vp
- addInstr(env, PPCInstr_AvPerm( v_dst, Vhi, Vlo, Vp ));
+ if (IEndianess == Iend_LE)
+ // vperm Vt, Vhi, Vlo, Vp
+ addInstr(env, PPCInstr_AvPerm( v_dst, Vlo, Vhi, Vp ));
+ else
+ // vperm Vt, Vhi, Vlo, Vp
+ addInstr(env, PPCInstr_AvPerm( v_dst, Vhi, Vlo, Vp ));
+
return v_dst;
}