From 96ddd48774217a448c16995d5adb4ba402517232 Mon Sep 17 00:00:00 2001 From: Julian Seward Date: Tue, 26 Mar 2013 10:27:39 +0000 Subject: [PATCH] Implement SSE4 MOVNTDQA insn. Fixes #316503. (Patrick J. LoPresti, lopresti@gmail.com) git-svn-id: svn://svn.valgrind.org/vex/trunk@2700 --- VEX/priv/guest_amd64_toIR.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/VEX/priv/guest_amd64_toIR.c b/VEX/priv/guest_amd64_toIR.c index 1034971836..7e98e761e6 100644 --- a/VEX/priv/guest_amd64_toIR.c +++ b/VEX/priv/guest_amd64_toIR.c @@ -16378,6 +16378,25 @@ Long dis_ESC_0F38__SSE4 ( Bool* decode_OK, } break; + case 0x2A: + /* 66 0F 38 2A = MOVNTDQA + "non-temporal" "streaming" load + Handle like MOVDQA but only memory operand is allowed */ + if (have66noF2noF3(pfx) && sz == 2) { + modrm = getUChar(delta); + if (!epartIsReg(modrm)) { + addr = disAMode ( &alen, vbi, pfx, delta, dis_buf, 0 ); + gen_SEGV_if_not_16_aligned( addr ); + putXMMReg( gregOfRexRM(pfx,modrm), + loadLE(Ity_V128, mkexpr(addr)) ); + DIP("movntdqa %s,%s\n", dis_buf, + nameXMMReg(gregOfRexRM(pfx,modrm))); + delta += alen; + goto decode_success; + } + } + break; + case 0x2B: /* 66 0f 38 2B /r = PACKUSDW xmm1, xmm2/m128 2x 32x4 S->U saturating narrow from xmm2/m128 to xmm1 */ -- 2.47.2