]> git.ipfire.org Git - people/arne_f/kernel.git/blobdiff - kernel/locking/qspinlock_paravirt.h
locking/qspinlock: Merge 'struct __qspinlock' into 'struct qspinlock'
[people/arne_f/kernel.git] / kernel / locking / qspinlock_paravirt.h
index e3b5520005db7fa5521510687b432f727c6715c3..3e33336911ffe0ca0ebb1bbe3c353209bf639f7b 100644 (file)
@@ -69,10 +69,8 @@ struct pv_node {
 #define queued_spin_trylock(l) pv_queued_spin_steal_lock(l)
 static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
 {
-       struct __qspinlock *l = (void *)lock;
-
        if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
-           (cmpxchg(&l->locked, 0, _Q_LOCKED_VAL) == 0)) {
+           (cmpxchg(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
                qstat_inc(qstat_pv_lock_stealing, true);
                return true;
        }
@@ -87,16 +85,12 @@ static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
 #if _Q_PENDING_BITS == 8
 static __always_inline void set_pending(struct qspinlock *lock)
 {
-       struct __qspinlock *l = (void *)lock;
-
-       WRITE_ONCE(l->pending, 1);
+       WRITE_ONCE(lock->pending, 1);
 }
 
 static __always_inline void clear_pending(struct qspinlock *lock)
 {
-       struct __qspinlock *l = (void *)lock;
-
-       WRITE_ONCE(l->pending, 0);
+       WRITE_ONCE(lock->pending, 0);
 }
 
 /*
@@ -106,10 +100,8 @@ static __always_inline void clear_pending(struct qspinlock *lock)
  */
 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
 {
-       struct __qspinlock *l = (void *)lock;
-
-       return !READ_ONCE(l->locked) &&
-              (cmpxchg(&l->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
+       return !READ_ONCE(lock->locked) &&
+              (cmpxchg(&lock->locked_pending, _Q_PENDING_VAL, _Q_LOCKED_VAL)
                        == _Q_PENDING_VAL);
 }
 #else /* _Q_PENDING_BITS == 8 */
@@ -353,7 +345,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
 {
        struct pv_node *pn = (struct pv_node *)node;
-       struct __qspinlock *l = (void *)lock;
 
        /*
         * If the vCPU is indeed halted, advance its state to match that of
@@ -372,7 +363,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
         * the hash table later on at unlock time, no atomic instruction is
         * needed.
         */
-       WRITE_ONCE(l->locked, _Q_SLOW_VAL);
+       WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
        (void)pv_hash(lock, pn);
 }
 
@@ -387,7 +378,6 @@ static u32
 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
 {
        struct pv_node *pn = (struct pv_node *)node;
-       struct __qspinlock *l = (void *)lock;
        struct qspinlock **lp = NULL;
        int waitcnt = 0;
        int loop;
@@ -438,13 +428,13 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
                         *
                         * Matches the smp_rmb() in __pv_queued_spin_unlock().
                         */
-                       if (xchg(&l->locked, _Q_SLOW_VAL) == 0) {
+                       if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
                                /*
                                 * The lock was free and now we own the lock.
                                 * Change the lock value back to _Q_LOCKED_VAL
                                 * and unhash the table.
                                 */
-                               WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
+                               WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
                                WRITE_ONCE(*lp, NULL);
                                goto gotlock;
                        }
@@ -452,7 +442,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
                WRITE_ONCE(pn->state, vcpu_hashed);
                qstat_inc(qstat_pv_wait_head, true);
                qstat_inc(qstat_pv_wait_again, waitcnt);
-               pv_wait(&l->locked, _Q_SLOW_VAL);
+               pv_wait(&lock->locked, _Q_SLOW_VAL);
 
                /*
                 * Because of lock stealing, the queue head vCPU may not be
@@ -477,7 +467,6 @@ gotlock:
 __visible void
 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
 {
-       struct __qspinlock *l = (void *)lock;
        struct pv_node *node;
 
        if (unlikely(locked != _Q_SLOW_VAL)) {
@@ -506,7 +495,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
         * Now that we have a reference to the (likely) blocked pv_node,
         * release the lock.
         */
-       smp_store_release(&l->locked, 0);
+       smp_store_release(&lock->locked, 0);
 
        /*
         * At this point the memory pointed at by lock can be freed/reused,
@@ -532,7 +521,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
 #ifndef __pv_queued_spin_unlock
 __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
 {
-       struct __qspinlock *l = (void *)lock;
        u8 locked;
 
        /*
@@ -540,7 +528,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
         * unhash. Otherwise it would be possible to have multiple @lock
         * entries, which would be BAD.
         */
-       locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0);
+       locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
        if (likely(locked == _Q_LOCKED_VAL))
                return;