]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
gfs2: Do not use atomic operations unnecessarily
authorAndreas Gruenbacher <agruenba@redhat.com>
Mon, 4 Aug 2025 23:58:47 +0000 (01:58 +0200)
committerAndreas Gruenbacher <agruenba@redhat.com>
Fri, 12 Sep 2025 10:02:18 +0000 (12:02 +0200)
The GLF_DEMOTE_IN_PROGRESS and GLF_LOCK flags and the glock refcount are
all protected by the glock spin lock, so there is no need for atomic
operations / barriers here.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Reviewed-by: Andrew Price <anprice@redhat.com>
fs/gfs2/glock.c

index 5bdb11de5b132bb19abd7dac6b1f21f8c0c1db88..1ced38b9a5a25dc55b6e3220aba2cd924c5836c0 100644 (file)
@@ -646,8 +646,10 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
        }
 
        /* Fast path - we got what we asked for */
-       if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
+       if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
+               clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
                gfs2_demote_wake(gl);
+       }
        if (gl->gl_state != LM_ST_UNLOCKED) {
                if (glops->go_xmote_bh) {
                        int rv;
@@ -891,14 +893,12 @@ __acquires(&gl->gl_lockref.lock)
 
 out_sched:
        clear_bit(GLF_LOCK, &gl->gl_flags);
-       smp_mb__after_atomic();
        gl->gl_lockref.count++;
        gfs2_glock_queue_work(gl, 0);
        return;
 
 out_unlock:
        clear_bit(GLF_LOCK, &gl->gl_flags);
-       smp_mb__after_atomic();
 }
 
 /**