if (GET_ATTRIB(opcode, A_SCALAR_STORE) &&
!GET_ATTRIB(opcode, A_MEMSIZE_0B)) {
if (pkt->insn[i].slot == 0) {
- pkt->pkt_has_store_s0 = true;
+ pkt->pkt_has_scalar_store_s0 = true;
} else {
- pkt->pkt_has_store_s1 = true;
+ pkt->pkt_has_scalar_store_s1 = true;
}
}
}
if hex_common.need_slot(tag):
if "A_LOAD" in hex_common.attribdict[tag]:
f.write(hex_common.code_fmt(f"""\
- bool pkt_has_store_s1 = slotval & 0x1;
+ bool pkt_has_scalar_store_s1 = slotval & 0x1;
"""))
f.write(hex_common.code_fmt(f"""\
uint32_t slot = slotval >> 1;
#ifndef CONFIG_HEXAGON_IDEF_PARSER
static TCGv gen_slotval(DisasContext *ctx)
{
- int slotval = (ctx->pkt->pkt_has_store_s1 & 1) | (ctx->insn->slot << 1);
+ int slotval =
+ (ctx->pkt->pkt_has_scalar_store_s1 & 1) | (ctx->insn->slot << 1);
return tcg_constant_tl(slotval);
}
#endif
::
---- 00021094
- mov_i32 pkt_has_store_s1,$0x0
+ mov_i32 pkt_has_scalar_store_s1,$0x0
add_i32 tmp0,r2,r2
mov_i32 loc2,tmp0
mov_i32 new_r1,loc2
void gen_load_cancel(Context *c, YYLTYPE *locp)
{
- OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n");
+ OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_scalar_store_s1) {\n");
OUT(c, locp, "ctx->s1_store_processed = false;\n");
OUT(c, locp, "process_store(ctx, 1);\n");
OUT(c, locp, "}\n");
/* Lookup the effective address EA */
find_variable(c, locp, ea, ea);
- OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n");
+ OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_scalar_store_s1) {\n");
OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n");
OUT(c, locp, "process_store(ctx, 1);\n");
OUT(c, locp, "}\n");
bool pkt_has_dczeroa;
- bool pkt_has_store_s0;
- bool pkt_has_store_s1;
+ bool pkt_has_scalar_store_s0;
+ bool pkt_has_scalar_store_s1;
bool pkt_has_hvx;
Insn *vhist_insn;
*/
#define CHECK_NOSHUF(VA, SIZE) \
do { \
- if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \
+ if (insn->slot == 0 && ctx->pkt->pkt_has_scalar_store_s1) { \
probe_noshuf_load(VA, SIZE, ctx->mem_idx); \
process_store(ctx, 1); \
} \
TCGLabel *noshuf_label = gen_new_label(); \
tcg_gen_brcondi_tl(TCG_COND_EQ, PRED, 0, noshuf_label); \
GET_EA; \
- if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \
+ if (insn->slot == 0 && ctx->pkt->pkt_has_scalar_store_s1) { \
probe_noshuf_load(EA, SIZE, ctx->mem_idx); \
} \
gen_set_label(noshuf_label); \
- if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \
+ if (insn->slot == 0 && ctx->pkt->pkt_has_scalar_store_s1) { \
process_store(ctx, 1); \
} \
} while (0)
#define fLOAD(NUM, SIZE, SIGN, EA, DST) \
do { \
- check_noshuf(env, pkt_has_store_s1, slot, EA, SIZE, GETPC()); \
+ check_noshuf(env, pkt_has_scalar_store_s1, slot, EA, SIZE, GETPC()); \
DST = (size##SIZE##SIGN##_t)MEM_LOAD##SIZE(env, EA, GETPC()); \
} while (0)
#endif
* If the load is in slot 0 and there is a store in slot1 (that
* wasn't cancelled), we have to do the store first.
*/
-static void check_noshuf(CPUHexagonState *env, bool pkt_has_store_s1,
+static void check_noshuf(CPUHexagonState *env, bool pkt_has_scalar_store_s1,
uint32_t slot, target_ulong vaddr, int size,
uintptr_t ra)
{
- if (slot == 0 && pkt_has_store_s1 &&
+ if (slot == 0 && pkt_has_scalar_store_s1 &&
((env->slot_cancelled & (1 << 1)) == 0)) {
probe_read(env, vaddr, size, MMU_USER_IDX, ra);
commit_store(env, 1, ra);
* the memory accesses overlap.
*/
Packet *pkt = ctx->pkt;
- if (pkt->pkt_has_store_s1) {
+ if (pkt->pkt_has_scalar_store_s1) {
g_assert(!pkt->pkt_has_dczeroa);
process_store(ctx, 1);
}
- if (pkt->pkt_has_store_s0) {
+ if (pkt->pkt_has_scalar_store_s0) {
g_assert(!pkt->pkt_has_dczeroa);
process_store(ctx, 0);
}
* involved in committing the packet.
*/
Packet *pkt = ctx->pkt;
- bool has_store_s0 = pkt->pkt_has_store_s0;
- bool has_store_s1 = (pkt->pkt_has_store_s1 && !ctx->s1_store_processed);
+ bool has_store_s0 = pkt->pkt_has_scalar_store_s0;
+ bool has_store_s1 =
+ (pkt->pkt_has_scalar_store_s1 && !ctx->s1_store_processed);
bool has_hvx_store = pkt_has_hvx_store(pkt);
if (pkt->pkt_has_dczeroa) {
/*