]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
gfs2: finish_xmote cleanup
authorAndreas Gruenbacher <agruenba@redhat.com>
Fri, 12 Apr 2024 17:16:58 +0000 (19:16 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 30 May 2024 07:49:14 +0000 (09:49 +0200)
[ Upstream commit 1cd28e15864054f3c48baee9eecda1c0441c48ac ]

Currently, function finish_xmote() takes and releases the glock
spinlock.  However, all of its callers immediately take that spinlock
again, so it makes more sense to take the spin lock before calling
finish_xmote() already.

With that, thaw_glock() is the only place that sets the GLF_HAVE_REPLY
flag outside of the glock spinlock, but it also takes that spinlock
immediately thereafter.  Change that to set the bit when the spinlock is
already held.  This allows to switch from test_and_clear_bit() to
test_bit() and clear_bit() in glock_work_func().

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Stable-dep-of: 9947a06d29c0 ("gfs2: do_xmote fixes")
Signed-off-by: Sasha Levin <sashal@kernel.org>
fs/gfs2/glock.c

index 1bf0d751ece0ac051b08c43a84a6d94c16fa90e4..0c719fcd4fbc5875f1d7fe2fffa3cb7da33e4796 100644 (file)
@@ -617,7 +617,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
        struct gfs2_holder *gh;
        unsigned state = ret & LM_OUT_ST_MASK;
 
-       spin_lock(&gl->gl_lockref.lock);
        trace_gfs2_glock_state_change(gl, state);
        state_change(gl, state);
        gh = find_first_waiter(gl);
@@ -665,7 +664,6 @@ retry:
                               gl->gl_target, state);
                        GLOCK_BUG_ON(gl, 1);
                }
-               spin_unlock(&gl->gl_lockref.lock);
                return;
        }
 
@@ -688,7 +686,6 @@ retry:
        }
 out:
        clear_bit(GLF_LOCK, &gl->gl_flags);
-       spin_unlock(&gl->gl_lockref.lock);
 }
 
 static bool is_system_glock(struct gfs2_glock *gl)
@@ -835,15 +832,19 @@ skip_inval:
                if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
                    target == LM_ST_UNLOCKED &&
                    test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
+                       spin_lock(&gl->gl_lockref.lock);
                        finish_xmote(gl, target);
-                       gfs2_glock_queue_work(gl, 0);
+                       __gfs2_glock_queue_work(gl, 0);
+                       spin_unlock(&gl->gl_lockref.lock);
                } else if (ret) {
                        fs_err(sdp, "lm_lock ret %d\n", ret);
                        GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp));
                }
        } else { /* lock_nolock */
+               spin_lock(&gl->gl_lockref.lock);
                finish_xmote(gl, target);
-               gfs2_glock_queue_work(gl, 0);
+               __gfs2_glock_queue_work(gl, 0);
+               spin_unlock(&gl->gl_lockref.lock);
        }
 out:
        spin_lock(&gl->gl_lockref.lock);
@@ -1099,11 +1100,12 @@ static void glock_work_func(struct work_struct *work)
        struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
        unsigned int drop_refs = 1;
 
-       if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+       spin_lock(&gl->gl_lockref.lock);
+       if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
+               clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
                finish_xmote(gl, gl->gl_reply);
                drop_refs++;
        }
-       spin_lock(&gl->gl_lockref.lock);
        if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
            gl->gl_state != LM_ST_UNLOCKED &&
            gl->gl_demote_state != LM_ST_EXCLUSIVE) {
@@ -2176,8 +2178,11 @@ static void thaw_glock(struct gfs2_glock *gl)
                return;
        if (!lockref_get_not_dead(&gl->gl_lockref))
                return;
+
+       spin_lock(&gl->gl_lockref.lock);
        set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
-       gfs2_glock_queue_work(gl, 0);
+       __gfs2_glock_queue_work(gl, 0);
+       spin_unlock(&gl->gl_lockref.lock);
 }
 
 /**