From 5735d13007d9b82e56e12686a3267affca32eba6 Mon Sep 17 00:00:00 2001 From: Julian Seward Date: Thu, 26 Jun 2014 10:49:33 +0000 Subject: [PATCH] Rename the vector subparts-of-lanes-reversal IROps to names that are easier to understand. No functional change. git-svn-id: svn://svn.valgrind.org/vex/trunk@2890 --- VEX/priv/guest_arm_toIR.c | 12 +++++------ VEX/priv/host_arm_isel.c | 44 +++++++++++++++++++-------------------- VEX/priv/ir_defs.c | 38 +++++++++++++++++---------------- VEX/pub/libvex_ir.h | 32 ++++++++++++++-------------- 4 files changed, 64 insertions(+), 62 deletions(-) diff --git a/VEX/priv/guest_arm_toIR.c b/VEX/priv/guest_arm_toIR.c index cb186b0b21..d1763d3220 100644 --- a/VEX/priv/guest_arm_toIR.c +++ b/VEX/priv/guest_arm_toIR.c @@ -6646,13 +6646,13 @@ Bool dis_neon_data_2reg_misc ( UInt theInstr, IRTemp condT ) IROp op; switch (size) { case 0: - op = Q ? Iop_Reverse64_8x16 : Iop_Reverse64_8x8; + op = Q ? Iop_Reverse8sIn64_x2 : Iop_Reverse8sIn64_x1; break; case 1: - op = Q ? Iop_Reverse64_16x8 : Iop_Reverse64_16x4; + op = Q ? Iop_Reverse16sIn64_x2 : Iop_Reverse16sIn64_x1; break; case 2: - op = Q ? Iop_Reverse64_32x4 : Iop_Reverse64_32x2; + op = Q ? Iop_Reverse32sIn64_x2 : Iop_Reverse32sIn64_x1; break; case 3: return False; @@ -6669,10 +6669,10 @@ Bool dis_neon_data_2reg_misc ( UInt theInstr, IRTemp condT ) IROp op; switch (size) { case 0: - op = Q ? Iop_Reverse32_8x16 : Iop_Reverse32_8x8; + op = Q ? Iop_Reverse8sIn32_x4 : Iop_Reverse8sIn32_x2; break; case 1: - op = Q ? Iop_Reverse32_16x8 : Iop_Reverse32_16x4; + op = Q ? Iop_Reverse16sIn32_x4 : Iop_Reverse16sIn32_x2; break; case 2: case 3: @@ -6690,7 +6690,7 @@ Bool dis_neon_data_2reg_misc ( UInt theInstr, IRTemp condT ) IROp op; switch (size) { case 0: - op = Q ? Iop_Reverse16_8x16 : Iop_Reverse16_8x8; + op = Q ? Iop_Reverse8sIn16_x8 : Iop_Reverse8sIn16_x4; break; case 1: case 2: diff --git a/VEX/priv/host_arm_isel.c b/VEX/priv/host_arm_isel.c index 4a19a81259..75723b3a1e 100644 --- a/VEX/priv/host_arm_isel.c +++ b/VEX/priv/host_arm_isel.c @@ -3391,37 +3391,37 @@ static HReg iselNeon64Expr_wrk ( ISelEnv* env, IRExpr* e ) addInstr(env, ARMInstr_NUnary(ARMneon_ABS, res, arg, size, False)); return res; } - case Iop_Reverse64_8x8: - case Iop_Reverse64_16x4: - case Iop_Reverse64_32x2: { + case Iop_Reverse8sIn64_x1: + case Iop_Reverse16sIn64_x1: + case Iop_Reverse32sIn64_x1: { HReg res = newVRegD(env); HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg); UInt size = 0; switch(e->Iex.Binop.op) { - case Iop_Reverse64_8x8: size = 0; break; - case Iop_Reverse64_16x4: size = 1; break; - case Iop_Reverse64_32x2: size = 2; break; + case Iop_Reverse8sIn64_x1: size = 0; break; + case Iop_Reverse16sIn64_x1: size = 1; break; + case Iop_Reverse32sIn64_x1: size = 2; break; default: vassert(0); } addInstr(env, ARMInstr_NUnary(ARMneon_REV64, res, arg, size, False)); return res; } - case Iop_Reverse32_8x8: - case Iop_Reverse32_16x4: { + case Iop_Reverse8sIn32_x2: + case Iop_Reverse16sIn32_x2: { HReg res = newVRegD(env); HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg); UInt size = 0; switch(e->Iex.Binop.op) { - case Iop_Reverse32_8x8: size = 0; break; - case Iop_Reverse32_16x4: size = 1; break; + case Iop_Reverse8sIn32_x2: size = 0; break; + case Iop_Reverse16sIn32_x2: size = 1; break; default: vassert(0); } addInstr(env, ARMInstr_NUnary(ARMneon_REV32, res, arg, size, False)); return res; } - case Iop_Reverse16_8x8: { + case Iop_Reverse8sIn16_x4: { HReg res = newVRegD(env); HReg arg = iselNeon64Expr(env, e->Iex.Unop.arg); UInt size = 0; @@ -4021,37 +4021,37 @@ static HReg iselNeonExpr_wrk ( ISelEnv* env, IRExpr* e ) addInstr(env, ARMInstr_NUnary(ARMneon_ABS, res, arg, size, True)); return res; } - case Iop_Reverse64_8x16: - case Iop_Reverse64_16x8: - case Iop_Reverse64_32x4: { + case Iop_Reverse8sIn64_x2: + case Iop_Reverse16sIn64_x2: + case Iop_Reverse32sIn64_x2: { HReg res = newVRegV(env); HReg arg = iselNeonExpr(env, e->Iex.Unop.arg); UInt size = 0; switch(e->Iex.Binop.op) { - case Iop_Reverse64_8x16: size = 0; break; - case Iop_Reverse64_16x8: size = 1; break; - case Iop_Reverse64_32x4: size = 2; break; + case Iop_Reverse8sIn64_x2: size = 0; break; + case Iop_Reverse16sIn64_x2: size = 1; break; + case Iop_Reverse32sIn64_x2: size = 2; break; default: vassert(0); } addInstr(env, ARMInstr_NUnary(ARMneon_REV64, res, arg, size, True)); return res; } - case Iop_Reverse32_8x16: - case Iop_Reverse32_16x8: { + case Iop_Reverse8sIn32_x4: + case Iop_Reverse16sIn32_x4: { HReg res = newVRegV(env); HReg arg = iselNeonExpr(env, e->Iex.Unop.arg); UInt size = 0; switch(e->Iex.Binop.op) { - case Iop_Reverse32_8x16: size = 0; break; - case Iop_Reverse32_16x8: size = 1; break; + case Iop_Reverse8sIn32_x4: size = 0; break; + case Iop_Reverse16sIn32_x4: size = 1; break; default: vassert(0); } addInstr(env, ARMInstr_NUnary(ARMneon_REV32, res, arg, size, True)); return res; } - case Iop_Reverse16_8x16: { + case Iop_Reverse8sIn16_x8: { HReg res = newVRegV(env); HReg arg = iselNeonExpr(env, e->Iex.Unop.arg); UInt size = 0; diff --git a/VEX/priv/ir_defs.c b/VEX/priv/ir_defs.c index 9cdc9f6e4b..df26947b73 100644 --- a/VEX/priv/ir_defs.c +++ b/VEX/priv/ir_defs.c @@ -579,12 +579,12 @@ void ppIROp ( IROp op ) case Iop_Sal32x2: vex_printf("Sal32x2"); return; case Iop_Sal64x1: vex_printf("Sal64x1"); return; case Iop_Perm8x8: vex_printf("Perm8x8"); return; - case Iop_Reverse16_8x8: vex_printf("Reverse16_8x8"); return; - case Iop_Reverse32_8x8: vex_printf("Reverse32_8x8"); return; - case Iop_Reverse32_16x4: vex_printf("Reverse32_16x4"); return; - case Iop_Reverse64_8x8: vex_printf("Reverse64_8x8"); return; - case Iop_Reverse64_16x4: vex_printf("Reverse64_16x4"); return; - case Iop_Reverse64_32x2: vex_printf("Reverse64_32x2"); return; + case Iop_Reverse8sIn16_x4: vex_printf("Reverse8sIn16_x4"); return; + case Iop_Reverse8sIn32_x2: vex_printf("Reverse8sIn32_x2"); return; + case Iop_Reverse16sIn32_x2: vex_printf("Reverse16sIn32_x2"); return; + case Iop_Reverse8sIn64_x1: vex_printf("Reverse8sIn64_x1"); return; + case Iop_Reverse16sIn64_x1: vex_printf("Reverse16sIn64_x1"); return; + case Iop_Reverse32sIn64_x1: vex_printf("Reverse32sIn64_x1"); return; case Iop_Abs32Fx2: vex_printf("Abs32Fx2"); return; case Iop_GetMSBs8x8: vex_printf("GetMSBs8x8"); return; case Iop_GetMSBs8x16: vex_printf("GetMSBs8x16"); return; @@ -970,12 +970,12 @@ void ppIROp ( IROp op ) case Iop_Perm8x16: vex_printf("Perm8x16"); return; case Iop_Perm32x4: vex_printf("Perm32x4"); return; - case Iop_Reverse16_8x16: vex_printf("Reverse16_8x16"); return; - case Iop_Reverse32_8x16: vex_printf("Reverse32_8x16"); return; - case Iop_Reverse32_16x8: vex_printf("Reverse32_16x8"); return; - case Iop_Reverse64_8x16: vex_printf("Reverse64_8x16"); return; - case Iop_Reverse64_16x8: vex_printf("Reverse64_16x8"); return; - case Iop_Reverse64_32x4: vex_printf("Reverse64_32x4"); return; + case Iop_Reverse8sIn16_x8: vex_printf("Reverse8sIn16_x8"); return; + case Iop_Reverse8sIn32_x4: vex_printf("Reverse8sIn32_x4"); return; + case Iop_Reverse16sIn32_x4: vex_printf("Reverse16sIn32_x4"); return; + case Iop_Reverse8sIn64_x2: vex_printf("Reverse8sIn64_x2"); return; + case Iop_Reverse16sIn64_x2: vex_printf("Reverse16sIn64_x2"); return; + case Iop_Reverse32sIn64_x2: vex_printf("Reverse32sIn64_x2"); return; case Iop_F32ToFixed32Ux4_RZ: vex_printf("F32ToFixed32Ux4_RZ"); return; case Iop_F32ToFixed32Sx4_RZ: vex_printf("F32ToFixed32Sx4_RZ"); return; @@ -2522,9 +2522,10 @@ void typeOfPrimop ( IROp op, case Iop_Cls8x8: case Iop_Cls16x4: case Iop_Cls32x2: case Iop_PwAddL8Ux8: case Iop_PwAddL16Ux4: case Iop_PwAddL32Ux2: case Iop_PwAddL8Sx8: case Iop_PwAddL16Sx4: case Iop_PwAddL32Sx2: - case Iop_Reverse64_8x8: case Iop_Reverse64_16x4: case Iop_Reverse64_32x2: - case Iop_Reverse32_8x8: case Iop_Reverse32_16x4: - case Iop_Reverse16_8x8: + case Iop_Reverse8sIn64_x1: case Iop_Reverse16sIn64_x1: + case Iop_Reverse32sIn64_x1: + case Iop_Reverse8sIn32_x2: case Iop_Reverse16sIn32_x2: + case Iop_Reverse8sIn16_x4: case Iop_FtoI32Sx2_RZ: case Iop_FtoI32Ux2_RZ: case Iop_I32StoFx2: case Iop_I32UtoFx2: case Iop_Recip32x2: case Iop_Recip32Fx2: @@ -2907,9 +2908,10 @@ void typeOfPrimop ( IROp op, case Iop_Cls8x16: case Iop_Cls16x8: case Iop_Cls32x4: case Iop_PwAddL8Ux16: case Iop_PwAddL16Ux8: case Iop_PwAddL32Ux4: case Iop_PwAddL8Sx16: case Iop_PwAddL16Sx8: case Iop_PwAddL32Sx4: - case Iop_Reverse64_8x16: case Iop_Reverse64_16x8: case Iop_Reverse64_32x4: - case Iop_Reverse32_8x16: case Iop_Reverse32_16x8: - case Iop_Reverse16_8x16: + case Iop_Reverse8sIn64_x2: case Iop_Reverse16sIn64_x2: + case Iop_Reverse32sIn64_x2: + case Iop_Reverse8sIn32_x4: case Iop_Reverse16sIn32_x4: + case Iop_Reverse8sIn16_x8: case Iop_Neg64Fx2: case Iop_Neg32Fx4: case Iop_Abs8x16: case Iop_Abs16x8: case Iop_Abs32x4: case Iop_Abs64x2: case Iop_CipherSV128: diff --git a/VEX/pub/libvex_ir.h b/VEX/pub/libvex_ir.h index aa9defcfda..c9a49bb01e 100644 --- a/VEX/pub/libvex_ir.h +++ b/VEX/pub/libvex_ir.h @@ -984,15 +984,16 @@ typedef /* Note: the arm back-end handles only constant third argumnet. */ Iop_Extract64, - /* REVERSE the order of elements in each Half-words, Words, - Double-words */ + /* REVERSE the order of chunks in vector lanes. Chunks must be + smaller than the vector lanes (obviously) and so may be 8-, + 16- and 32-bit in size. */ /* Examples: - Reverse16_8x8([a,b,c,d,e,f,g,h]) = [b,a,d,c,f,e,h,g] - Reverse32_8x8([a,b,c,d,e,f,g,h]) = [d,c,b,a,h,g,f,e] - Reverse64_8x8([a,b,c,d,e,f,g,h]) = [h,g,f,e,d,c,b,a] */ - Iop_Reverse16_8x8, - Iop_Reverse32_8x8, Iop_Reverse32_16x4, - Iop_Reverse64_8x8, Iop_Reverse64_16x4, Iop_Reverse64_32x2, + Reverse8sIn16_x4([a,b,c,d,e,f,g,h]) = [b,a,d,c,f,e,h,g] + Reverse8sIn32_x2([a,b,c,d,e,f,g,h]) = [d,c,b,a,h,g,f,e] + Reverse8sIn64_x1([a,b,c,d,e,f,g,h]) = [h,g,f,e,d,c,b,a] */ + Iop_Reverse8sIn16_x4, + Iop_Reverse8sIn32_x2, Iop_Reverse16sIn32_x2, + Iop_Reverse8sIn64_x1, Iop_Reverse16sIn64_x1, Iop_Reverse32sIn64_x1, /* PERMUTING -- copy src bytes to dst, as indexed by control vector bytes: @@ -1590,14 +1591,13 @@ typedef /* Note: the ARM back end handles only constant arg3 in this operation. */ Iop_ExtractV128, - /* REVERSE the order of elements in each Half-words, Words, - Double-words */ - /* Examples: - Reverse32_16x8([a,b,c,d,e,f,g,h]) = [b,a,d,c,f,e,h,g] - Reverse64_16x8([a,b,c,d,e,f,g,h]) = [d,c,b,a,h,g,f,e] */ - Iop_Reverse16_8x16, - Iop_Reverse32_8x16, Iop_Reverse32_16x8, - Iop_Reverse64_8x16, Iop_Reverse64_16x8, Iop_Reverse64_32x4, + /* REVERSE the order of chunks in vector lanes. Chunks must be + smaller than the vector lanes (obviously) and so may be 8-, + 16- and 32-bit in size. See definitions of 64-bit SIMD + versions above for examples. */ + Iop_Reverse8sIn16_x8, + Iop_Reverse8sIn32_x4, Iop_Reverse16sIn32_x4, + Iop_Reverse8sIn64_x2, Iop_Reverse16sIn64_x2, Iop_Reverse32sIn64_x2, /* PERMUTING -- copy src bytes to dst, as indexed by control vector bytes: -- 2.47.2