From: William Kosasih Date: Thu, 3 Jul 2025 08:55:58 +0000 (+0930) Subject: target/arm: Fix VSTR helper store alignment checks X-Git-Tag: v10.1.0-rc0~29^2~113 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c8bde49179be1196d8ccfcfdc6269ce3861053a5;p=thirdparty%2Fqemu.git target/arm: Fix VSTR helper store alignment checks This patch adds alignment checks in the store operations in the VSTR instruction. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1154 Signed-off-by: William Kosasih Reviewed-by: Richard Henderson Message-id: 20250703085604.154449-6-kosasihwilliam4@gmail.com Signed-off-by: Peter Maydell --- diff --git a/target/arm/tcg/mve_helper.c b/target/arm/tcg/mve_helper.c index 8834523c23f..9587f2da7da 100644 --- a/target/arm/tcg/mve_helper.c +++ b/target/arm/tcg/mve_helper.c @@ -172,15 +172,17 @@ static void mve_advance_vpt(CPUARMState *env) mve_advance_vpt(env); \ } -#define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \ +#define DO_VSTR(OP, MFLAG, MSIZE, STTYPE, ESIZE, TYPE) \ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \ { \ TYPE *d = vd; \ uint16_t mask = mve_element_mask(env); \ unsigned b, e; \ + int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \ + MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \ if (mask & (1 << b)) { \ - cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \ + cpu_##STTYPE##_mmu(env, addr, d[H##ESIZE(e)], oi, GETPC()); \ } \ addr += MSIZE; \ } \ @@ -191,9 +193,9 @@ DO_VLDR(vldrb, MO_UB, 1, uint8_t, ldb, 1, uint8_t) DO_VLDR(vldrh, MO_TEUW, 2, uint16_t, ldw, 2, uint16_t) DO_VLDR(vldrw, MO_TEUL, 4, uint32_t, ldl, 4, uint32_t) -DO_VSTR(vstrb, 1, stb, 1, uint8_t) -DO_VSTR(vstrh, 2, stw, 2, uint16_t) -DO_VSTR(vstrw, 4, stl, 4, uint32_t) +DO_VSTR(vstrb, MO_UB, 1, stb, 1, uint8_t) +DO_VSTR(vstrh, MO_TEUW, 2, stw, 2, uint16_t) +DO_VSTR(vstrw, MO_TEUL, 4, stl, 4, uint32_t) DO_VLDR(vldrb_sh, MO_SB, 1, int8_t, ldb, 2, int16_t) DO_VLDR(vldrb_sw, MO_SB, 1, int8_t, ldb, 4, int32_t) @@ -202,9 +204,9 @@ DO_VLDR(vldrb_uw, MO_UB, 1, uint8_t, ldb, 4, uint32_t) DO_VLDR(vldrh_sw, MO_TESW, 2, int16_t, ldw, 4, int32_t) DO_VLDR(vldrh_uw, MO_TEUW, 2, uint16_t, ldw, 4, uint32_t) -DO_VSTR(vstrb_h, 1, stb, 2, int16_t) -DO_VSTR(vstrb_w, 1, stb, 4, int32_t) -DO_VSTR(vstrh_w, 2, stw, 4, int32_t) +DO_VSTR(vstrb_h, MO_UB, 1, stb, 2, int16_t) +DO_VSTR(vstrb_w, MO_UB, 1, stb, 4, int32_t) +DO_VSTR(vstrh_w, MO_TEUW, 2, stw, 4, int32_t) #undef DO_VLDR #undef DO_VSTR