mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
- return expand_atomic_exchange (target, mem, val, MEMMODEL_ACQUIRE, true);
+ return expand_sync_lock_test_and_set (target, mem, val);
}
/* Expand the __sync_lock_release intrinsic. EXP is the CALL_EXPR. */
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
val = expand_expr_force_mode (CALL_EXPR_ARG (exp, 1), mode);
- return expand_atomic_exchange (target, mem, val, model, false);
+ return expand_atomic_exchange (target, mem, val, model);
}
/* Expand the __atomic_compare_exchange intrinsic:
}
+#ifndef HAVE_atomic_clear
+# define HAVE_atomic_clear 0
+# define gen_atomic_clear(x,y) (gcc_unreachable (), NULL_RTX)
+#endif
+
/* Expand an atomic clear operation.
void _atomic_clear (BOOL *obj, enum memmodel)
EXP is the call expression. */
return const0_rtx;
}
+ if (HAVE_atomic_clear)
+ {
+ emit_insn (gen_atomic_clear (mem, model));
+ return const0_rtx;
+ }
+
/* Try issuing an __atomic_store, and allow fallback to __sync_lock_release.
Failing that, a store is issued by __atomic_store. The only way this can
fail is if the bool type is larger than a word size. Unlikely, but
EXP is the call expression. */
static rtx
-expand_builtin_atomic_test_and_set (tree exp)
+expand_builtin_atomic_test_and_set (tree exp, rtx target)
{
- rtx mem, ret;
+ rtx mem;
enum memmodel model;
enum machine_mode mode;
mem = get_builtin_sync_mem (CALL_EXPR_ARG (exp, 0), mode);
model = get_memmodel (CALL_EXPR_ARG (exp, 1));
- /* Try issuing an exchange. If it is lock free, or if there is a limited
- functionality __sync_lock_test_and_set, this will utilize it. */
- ret = expand_atomic_exchange (NULL_RTX, mem, const1_rtx, model, true);
- if (ret)
- return ret;
-
- /* Otherwise, there is no lock free support for test and set. Simply
- perform a load and a store. Since this presumes a non-atomic architecture,
- also assume single threadedness and don't issue barriers either. */
-
- ret = gen_reg_rtx (mode);
- emit_move_insn (ret, mem);
- emit_move_insn (mem, const1_rtx);
- return ret;
+ return expand_atomic_test_and_set (target, mem, model);
}
break;
case BUILT_IN_ATOMIC_TEST_AND_SET:
- return expand_builtin_atomic_test_and_set (exp);
+ return expand_builtin_atomic_test_and_set (exp, target);
case BUILT_IN_ATOMIC_CLEAR:
return expand_builtin_atomic_clear (exp);
}
-/* This function expands the atomic exchange operation:
- atomically store VAL in MEM and return the previous value in MEM.
-
- MEMMODEL is the memory model variant to use.
- TARGET is an optional place to stick the return value.
- USE_TEST_AND_SET indicates whether __sync_lock_test_and_set should be used
- as a fall back if the atomic_exchange pattern does not exist. */
-
-rtx
-expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model,
- bool use_test_and_set)
+/* This function tries to emit an atomic_exchange intruction. VAL is written
+ to *MEM using memory model MODEL. The previous contents of *MEM are returned,
+ using TARGET if possible. */
+
+static rtx
+maybe_emit_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
{
enum machine_mode mode = GET_MODE (mem);
enum insn_code icode;
return ops[0].value;
}
- /* Legacy sync_lock_test_and_set works the same, but is only defined as an
- acquire barrier. If the pattern exists, and the memory model is stronger
- than acquire, add a release barrier before the instruction.
- The barrier is not needed if sync_lock_test_and_set doesn't exist since
- it will expand into a compare-and-swap loop.
+ return NULL_RTX;
+}
- Some targets have non-compliant test_and_sets, so it would be incorrect
- to emit a test_and_set in place of an __atomic_exchange. The test_and_set
- builtin shares this expander since exchange can always replace the
- test_and_set. */
+/* This function tries to implement an atomic exchange operation using
+ __sync_lock_test_and_set. VAL is written to *MEM using memory model MODEL.
+ The previous contents of *MEM are returned, using TARGET if possible.
+ Since this instructionn is an acquire barrier only, stronger memory
+ models may require additional barriers to be emitted. */
- if (use_test_and_set)
+static rtx
+maybe_emit_sync_lock_test_and_set (rtx target, rtx mem, rtx val,
+ enum memmodel model)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ enum insn_code icode;
+ rtx last_insn = get_last_insn ();
+
+ icode = optab_handler (sync_lock_test_and_set_optab, mode);
+
+ /* Legacy sync_lock_test_and_set is an acquire barrier. If the pattern
+ exists, and the memory model is stronger than acquire, add a release
+ barrier before the instruction. */
+
+ if (model == MEMMODEL_SEQ_CST
+ || model == MEMMODEL_RELEASE
+ || model == MEMMODEL_ACQ_REL)
+ expand_mem_thread_fence (model);
+
+ if (icode != CODE_FOR_nothing)
{
- icode = optab_handler (sync_lock_test_and_set_optab, mode);
+ struct expand_operand ops[3];
+ create_output_operand (&ops[0], target, mode);
+ create_fixed_operand (&ops[1], mem);
+ /* VAL may have been promoted to a wider mode. Shrink it if so. */
+ create_convert_operand_to (&ops[2], val, mode, true);
+ if (maybe_expand_insn (icode, 3, ops))
+ return ops[0].value;
+ }
- if (icode != CODE_FOR_nothing)
+ /* If an external test-and-set libcall is provided, use that instead of
+ any external compare-and-swap that we might get from the compare-and-
+ swap-loop expansion later. */
+ if (!can_compare_and_swap_p (mode, false))
+ {
+ rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
+ if (libfunc != NULL)
{
- struct expand_operand ops[3];
- rtx last_insn = get_last_insn ();
-
- if (model == MEMMODEL_SEQ_CST
- || model == MEMMODEL_RELEASE
- || model == MEMMODEL_ACQ_REL)
- expand_mem_thread_fence (model);
-
- create_output_operand (&ops[0], target, mode);
- create_fixed_operand (&ops[1], mem);
- /* VAL may have been promoted to a wider mode. Shrink it if so. */
- create_convert_operand_to (&ops[2], val, mode, true);
- if (maybe_expand_insn (icode, 3, ops))
- return ops[0].value;
-
- delete_insns_since (last_insn);
+ rtx addr;
+
+ addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
+ return emit_library_call_value (libfunc, target, LCT_NORMAL,
+ mode, 2, addr, ptr_mode,
+ val, mode);
}
+ }
- /* If an external test-and-set libcall is provided, use that instead of
- any external compare-and-swap that we might get from the compare-and-
- swap-loop expansion below. */
- if (!can_compare_and_swap_p (mode, false))
- {
- rtx libfunc = optab_libfunc (sync_lock_test_and_set_optab, mode);
- if (libfunc != NULL)
- {
- rtx addr;
+ /* If the test_and_set can't be emitted, eliminate any barrier that might
+ have been emitted. */
+ delete_insns_since (last_insn);
+ return NULL_RTX;
+}
- if (model == MEMMODEL_SEQ_CST
- || model == MEMMODEL_RELEASE
- || model == MEMMODEL_ACQ_REL)
- expand_mem_thread_fence (model);
+/* This function tries to implement an atomic exchange operation using a
+ compare_and_swap loop. VAL is written to *MEM. The previous contents of
+ *MEM are returned, using TARGET if possible. No memory model is required
+ since a compare_and_swap loop is seq-cst. */
- addr = convert_memory_address (ptr_mode, XEXP (mem, 0));
- return emit_library_call_value (libfunc, target, LCT_NORMAL,
- mode, 2, addr, ptr_mode,
- val, mode);
- }
- }
- }
+static rtx
+maybe_emit_compare_and_swap_exchange_loop (rtx target, rtx mem, rtx val)
+{
+ enum machine_mode mode = GET_MODE (mem);
- /* Otherwise, use a compare-and-swap loop for the exchange. */
if (can_compare_and_swap_p (mode, true))
{
if (!target || !register_operand (target, mode))
return NULL_RTX;
}
+#ifndef HAVE_atomic_test_and_set
+#define HAVE_atomic_test_and_set 0
+#define gen_atomic_test_and_set(x,y,z) (gcc_unreachable (), NULL_RTX)
+#endif
+
+/* This function expands the legacy _sync_lock test_and_set operation which is
+ generally an atomic exchange. Some limited targets only allow the
+ constant 1 to be stored. This is an ACQUIRE operation.
+
+ TARGET is an optional place to stick the return value.
+ MEM is where VAL is stored. */
+
+rtx
+expand_sync_lock_test_and_set (rtx target, rtx mem, rtx val)
+{
+ rtx ret;
+
+ /* Try an atomic_exchange first. */
+ ret = maybe_emit_atomic_exchange (target, mem, val, MEMMODEL_ACQUIRE);
+
+ if (!ret)
+ ret = maybe_emit_sync_lock_test_and_set (target, mem, val,
+ MEMMODEL_ACQUIRE);
+ if (!ret)
+ ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
+
+ /* If there are no other options, try atomic_test_and_set if the value
+ being stored is 1. */
+ if (!ret && val == const1_rtx && HAVE_atomic_test_and_set)
+ {
+ ret = gen_atomic_test_and_set (target, mem, GEN_INT (MEMMODEL_ACQUIRE));
+ emit_insn (ret);
+ }
+
+ return ret;
+}
+
+/* This function expands the atomic test_and_set operation:
+ atomically store a boolean TRUE into MEM and return the previous value.
+
+ MEMMODEL is the memory model variant to use.
+ TARGET is an optional place to stick the return value. */
+
+rtx
+expand_atomic_test_and_set (rtx target, rtx mem, enum memmodel model)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx ret = NULL_RTX;
+
+ if (target == NULL_RTX)
+ target = gen_reg_rtx (mode);
+
+ if (HAVE_atomic_test_and_set)
+ {
+ ret = gen_atomic_test_and_set (target, mem, GEN_INT (MEMMODEL_ACQUIRE));
+ emit_insn (ret);
+ return ret;
+ }
+
+ /* If there is no test and set, try exchange, then a compare_and_swap loop,
+ then __sync_test_and_set. */
+ ret = maybe_emit_atomic_exchange (target, mem, const1_rtx, model);
+
+ if (!ret)
+ ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, const1_rtx);
+
+ if (!ret)
+ ret = maybe_emit_sync_lock_test_and_set (target, mem, const1_rtx, model);
+
+ if (ret)
+ return ret;
+
+ /* Failing all else, assume a single threaded environment and simply perform
+ the operation. */
+ emit_move_insn (target, mem);
+ emit_move_insn (mem, const1_rtx);
+ return target;
+}
+
+/* This function expands the atomic exchange operation:
+ atomically store VAL in MEM and return the previous value in MEM.
+
+ MEMMODEL is the memory model variant to use.
+ TARGET is an optional place to stick the return value. */
+
+rtx
+expand_atomic_exchange (rtx target, rtx mem, rtx val, enum memmodel model)
+{
+ rtx ret;
+
+ ret = maybe_emit_atomic_exchange (target, mem, val, model);
+
+ /* Next try a compare-and-swap loop for the exchange. */
+ if (!ret)
+ ret = maybe_emit_compare_and_swap_exchange_loop (target, mem, val);
+
+ return ret;
+}
+
/* This function expands the atomic compare exchange operation:
*PTARGET_BOOL is an optional place to store the boolean success/failure.
the result. If that doesn't work, don't do anything. */
if (GET_MODE_PRECISION(mode) > BITS_PER_WORD)
{
- rtx target = expand_atomic_exchange (NULL_RTX, mem, val, model, false);
+ rtx target = maybe_emit_atomic_exchange (NULL_RTX, mem, val, model);
+ if (!target)
+ target = maybe_emit_compare_and_swap_exchange_loop (NULL_RTX, mem, val);
if (target)
return const0_rtx;
else