#include <asm/processor.h>
#include <asm/alternative.h>
-#define SPINLOCK_LOCKVAL (get_lowcore()->spinlock_lockval)
+static __always_inline unsigned int spinlock_lockval(void)
+{
+ unsigned long lc_lockval;
+ unsigned int lockval;
+
+ BUILD_BUG_ON(sizeof_field(struct lowcore, spinlock_lockval) != sizeof(lockval));
+ lc_lockval = offsetof(struct lowcore, spinlock_lockval);
+ asm_inline(
+ ALTERNATIVE(" ly %[lockval],%[offzero](%%r0)\n",
+ " ly %[lockval],%[offalt](%%r0)\n",
+ ALT_FEATURE(MFEATURE_LOWCORE))
+ : [lockval] "=d" (lockval)
+ : [offzero] "i" (lc_lockval),
+ [offalt] "i" (lc_lockval + LOWCORE_ALT_ADDRESS),
+ "m" (((struct lowcore *)0)->spinlock_lockval));
+ return lockval;
+}
extern int spin_retry;
int old = 0;
barrier();
- return likely(arch_try_cmpxchg(&lp->lock, &old, SPINLOCK_LOCKVAL));
+ return likely(arch_try_cmpxchg(&lp->lock, &old, spinlock_lockval()));
}
static inline void arch_spin_lock(arch_spinlock_t *lp)
ix = get_lowcore()->spinlock_index++;
barrier();
- lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
+ lockval = spinlock_lockval(); /* cpu + 1 */
node = this_cpu_ptr(&spin_wait[ix]);
node->prev = node->next = NULL;
node_id = node->node_id;
{
int lockval, old, new, owner, count;
- lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */
+ lockval = spinlock_lockval(); /* cpu + 1 */
/* Pass the virtual CPU to the lock holder if it is not running */
owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL);
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
- int cpu = SPINLOCK_LOCKVAL;
+ int cpu = spinlock_lockval();
int owner, count;
for (count = spin_retry; count > 0; count--) {