register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
" " #asm_op " %w[i], %[v]\n") \
: [i] "+r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_##op##name), \
/* LSE atomics */ \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(add_return##name) \
__nops(1), \
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC(and)
__nops(1),
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_and##name) \
__nops(1), \
register int w0 asm ("w0") = i;
register atomic_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC(sub)
__nops(1),
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(sub_return##name) \
__nops(2), \
register int w0 asm ("w0") = i; \
register atomic_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC(fetch_sub##name) \
__nops(1), \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
" " #asm_op " %[i], %[v]\n") \
: [i] "+r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_##op##name), \
/* LSE atomics */ \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(add_return##name) \
__nops(1), \
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(and)
__nops(1),
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_and##name) \
__nops(1), \
register long x0 asm ("x0") = i;
register atomic64_t *x1 asm ("x1") = v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(sub)
__nops(1),
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(sub_return##name) \
__nops(2), \
register long x0 asm ("x0") = i; \
register atomic64_t *x1 asm ("x1") = v; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_ATOMIC64(fetch_sub##name) \
__nops(1), \
{
register long x0 asm ("x0") = (long)v;
- asm volatile(ARM64_LSE_ATOMIC_INSN(
+ asm volatile(
+ __LSE_PREAMBLE
+ ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
__LL_SC_ATOMIC64(dec_if_positive)
__nops(6),
register unsigned long x1 asm ("x1") = old; \
register unsigned long x2 asm ("x2") = new; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_CMPXCHG(name) \
__nops(2), \
register unsigned long x3 asm ("x3") = new2; \
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
\
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ asm volatile( \
+ __LSE_PREAMBLE \
+ ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
__LL_SC_CMPXCHG_DBL(name) \
__nops(3), \
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
+
#include <linux/compiler_types.h>
#include <linux/export.h>
#include <linux/stringify.h>
#else /* __ASSEMBLER__ */
-__asm__(".arch_extension lse");
-
/* Move the ll/sc atomics out-of-line */
#define __LL_SC_INLINE notrace
#define __LL_SC_PREFIX(x) __ll_sc_##x
/* In-line patching at runtime */
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
- ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+ ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
#endif /* __ASSEMBLER__ */
#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */