return ret;
}
-#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_SIZE, retaddr);
DATA_TYPE ret;
+#if DATA_SIZE == 16
+ ret = atomic16_xchg(haddr, val);
+#else
ret = qatomic_xchg__nocheck(haddr, val);
+#endif
+ ATOMIC_MMU_CLEANUP;
+ atomic_trace_rmw_post(env, addr,
+ VALUE_LOW(ret),
+ VALUE_HIGH(ret),
+ VALUE_LOW(val),
+ VALUE_HIGH(val),
+ oi);
+ return ret;
+}
+
+#if DATA_SIZE == 16
+ABI_TYPE ATOMIC_NAME(fetch_and)(CPUArchState *env, vaddr addr, ABI_TYPE val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
+ DATA_TYPE ret = atomic16_fetch_and(haddr, val);
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr,
VALUE_LOW(ret),
return ret;
}
+ABI_TYPE ATOMIC_NAME(fetch_or)(CPUArchState *env, vaddr addr, ABI_TYPE val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
+ DATA_TYPE ret = atomic16_fetch_or(haddr, val);
+ ATOMIC_MMU_CLEANUP;
+ atomic_trace_rmw_post(env, addr,
+ VALUE_LOW(ret),
+ VALUE_HIGH(ret),
+ VALUE_LOW(val),
+ VALUE_HIGH(val),
+ oi);
+ return ret;
+}
+#else
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
#undef GEN_ATOMIC_HELPER_FN
-#endif /* DATA SIZE < 16 */
+#endif /* DATA SIZE == 16 */
#undef END
return BSWAP(ret);
}
-#if DATA_SIZE < 16
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val,
MemOpIdx oi, uintptr_t retaddr)
{
DATA_SIZE, retaddr);
ABI_TYPE ret;
+#if DATA_SIZE == 16
+ ret = atomic16_xchg(haddr, BSWAP(val));
+#else
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
+#endif
+ ATOMIC_MMU_CLEANUP;
+ atomic_trace_rmw_post(env, addr,
+ VALUE_LOW(ret),
+ VALUE_HIGH(ret),
+ VALUE_LOW(val),
+ VALUE_HIGH(val),
+ oi);
+ return BSWAP(ret);
+}
+
+#if DATA_SIZE == 16
+ABI_TYPE ATOMIC_NAME(fetch_and)(CPUArchState *env, vaddr addr, ABI_TYPE val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
+ DATA_TYPE ret = atomic16_fetch_and(haddr, BSWAP(val));
ATOMIC_MMU_CLEANUP;
atomic_trace_rmw_post(env, addr,
VALUE_LOW(ret),
return BSWAP(ret);
}
+ABI_TYPE ATOMIC_NAME(fetch_or)(CPUArchState *env, vaddr addr, ABI_TYPE val,
+ MemOpIdx oi, uintptr_t retaddr)
+{
+ DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+ DATA_SIZE, retaddr);
+ DATA_TYPE ret = atomic16_fetch_or(haddr, BSWAP(val));
+ ATOMIC_MMU_CLEANUP;
+ atomic_trace_rmw_post(env, addr,
+ VALUE_LOW(ret),
+ VALUE_HIGH(ret),
+ VALUE_LOW(val),
+ VALUE_HIGH(val),
+ oi);
+ return BSWAP(ret);
+}
+#else
#define GEN_ATOMIC_HELPER(X) \
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
#undef ADD
#undef GEN_ATOMIC_HELPER_FN
-#endif /* DATA_SIZE < 16 */
+#endif /* DATA_SIZE == 16 */
#undef END
#endif /* DATA_SIZE > 1 */
GEN_ATOMIC_HELPER_ALL(xchg)
-#undef GEN_ATOMIC_HELPER_ALL
-#undef GEN_ATOMIC_HELPER
-
Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, vaddr addr,
Int128 cmpv, Int128 newv,
MemOpIdx oi, uintptr_t retaddr);
Int128 cmpv, Int128 newv,
MemOpIdx oi, uintptr_t retaddr);
+GEN_ATOMIC_HELPER(xchg, Int128, o_le)
+GEN_ATOMIC_HELPER(xchg, Int128, o_be)
+GEN_ATOMIC_HELPER(fetch_and, Int128, o_le)
+GEN_ATOMIC_HELPER(fetch_and, Int128, o_be)
+GEN_ATOMIC_HELPER(fetch_or, Int128, o_le)
+GEN_ATOMIC_HELPER(fetch_or, Int128, o_be)
+
+#undef GEN_ATOMIC_HELPER_ALL
+#undef GEN_ATOMIC_HELPER
+
uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr,
MemOpIdx oi, uintptr_t ra);
uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr,