]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
netfs: Don't use bh spinlock
authorDavid Howells <dhowells@redhat.com>
Mon, 16 Dec 2024 20:41:00 +0000 (20:41 +0000)
committerChristian Brauner <brauner@kernel.org>
Fri, 20 Dec 2024 21:34:04 +0000 (22:34 +0100)
All the accessing of the subrequest lists is now done in process context,
possibly in a workqueue, but not now in a BH context, so we don't need the
lock against BH interference when taking the netfs_io_request::lock
spinlock.

Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-11-dhowells@redhat.com
cc: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/netfs/buffered_read.c
fs/netfs/direct_read.c
fs/netfs/read_collect.c
fs/netfs/read_retry.c
fs/netfs/write_collect.c
fs/netfs/write_issue.c

index fa1013020ac91c0d3a8ebe6c138f7d2c3f9def7a..4ff4b587dc4b7d2b5a35d24ccc587fa66e4233a4 100644 (file)
@@ -200,12 +200,12 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
                subreq->len     = size;
 
                atomic_inc(&rreq->nr_outstanding);
-               spin_lock_bh(&rreq->lock);
+               spin_lock(&rreq->lock);
                list_add_tail(&subreq->rreq_link, &rreq->subrequests);
                subreq->prev_donated = rreq->prev_donated;
                rreq->prev_donated = 0;
                trace_netfs_sreq(subreq, netfs_sreq_trace_added);
-               spin_unlock_bh(&rreq->lock);
+               spin_unlock(&rreq->lock);
 
                source = netfs_cache_prepare_read(rreq, subreq, rreq->i_size);
                subreq->source = source;
index 54027fd149040e33e24ff7988d64f66bce535ba3..1a20cc3979c7f671748146e031fe40e88e40c7b4 100644 (file)
@@ -68,12 +68,12 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
                subreq->len     = size;
 
                atomic_inc(&rreq->nr_outstanding);
-               spin_lock_bh(&rreq->lock);
+               spin_lock(&rreq->lock);
                list_add_tail(&subreq->rreq_link, &rreq->subrequests);
                subreq->prev_donated = rreq->prev_donated;
                rreq->prev_donated = 0;
                trace_netfs_sreq(subreq, netfs_sreq_trace_added);
-               spin_unlock_bh(&rreq->lock);
+               spin_unlock(&rreq->lock);
 
                netfs_stat(&netfs_n_rh_download);
                if (rreq->netfs_ops->prepare_read) {
index 454a5bbdd6f872dd8ec0018f1062a7ad18a33fae..26e430baeb5ad80a4e638a321b6e7bfd82db6fee 100644 (file)
@@ -144,7 +144,7 @@ donation_changed:
        prev_donated = READ_ONCE(subreq->prev_donated);
        next_donated =  READ_ONCE(subreq->next_donated);
        if (prev_donated || next_donated) {
-               spin_lock_bh(&rreq->lock);
+               spin_lock(&rreq->lock);
                prev_donated = subreq->prev_donated;
                next_donated =  subreq->next_donated;
                subreq->start -= prev_donated;
@@ -157,7 +157,7 @@ donation_changed:
                        next_donated = subreq->next_donated = 0;
                }
                trace_netfs_sreq(subreq, netfs_sreq_trace_add_donations);
-               spin_unlock_bh(&rreq->lock);
+               spin_unlock(&rreq->lock);
        }
 
        avail = subreq->transferred;
@@ -186,18 +186,18 @@ donation_changed:
                } else if (fpos < start) {
                        excess = fend - subreq->start;
 
-                       spin_lock_bh(&rreq->lock);
+                       spin_lock(&rreq->lock);
                        /* If we complete first on a folio split with the
                         * preceding subreq, donate to that subreq - otherwise
                         * we get the responsibility.
                         */
                        if (subreq->prev_donated != prev_donated) {
-                               spin_unlock_bh(&rreq->lock);
+                               spin_unlock(&rreq->lock);
                                goto donation_changed;
                        }
 
                        if (list_is_first(&subreq->rreq_link, &rreq->subrequests)) {
-                               spin_unlock_bh(&rreq->lock);
+                               spin_unlock(&rreq->lock);
                                pr_err("Can't donate prior to front\n");
                                goto bad;
                        }
@@ -213,7 +213,7 @@ donation_changed:
 
                        if (subreq->consumed >= subreq->len)
                                goto remove_subreq_locked;
-                       spin_unlock_bh(&rreq->lock);
+                       spin_unlock(&rreq->lock);
                } else {
                        pr_err("fpos > start\n");
                        goto bad;
@@ -241,11 +241,11 @@ donation_changed:
        /* Donate the remaining downloaded data to one of the neighbouring
         * subrequests.  Note that we may race with them doing the same thing.
         */
-       spin_lock_bh(&rreq->lock);
+       spin_lock(&rreq->lock);
 
        if (subreq->prev_donated != prev_donated ||
            subreq->next_donated != next_donated) {
-               spin_unlock_bh(&rreq->lock);
+               spin_unlock(&rreq->lock);
                cond_resched();
                goto donation_changed;
        }
@@ -296,11 +296,11 @@ donation_changed:
        goto remove_subreq_locked;
 
 remove_subreq:
-       spin_lock_bh(&rreq->lock);
+       spin_lock(&rreq->lock);
 remove_subreq_locked:
        subreq->consumed = subreq->len;
        list_del(&subreq->rreq_link);
-       spin_unlock_bh(&rreq->lock);
+       spin_unlock(&rreq->lock);
        netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_consumed);
        return true;
 
index a2021efa44c0fe21c6bdb344896f74148e514ae2..a33bd06e80f816d59d75ed6ecb04c2c2dc2e6a05 100644 (file)
@@ -142,12 +142,12 @@ static void netfs_retry_read_subrequests(struct netfs_io_request *rreq)
                        __clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
                        subreq->retry_count++;
 
-                       spin_lock_bh(&rreq->lock);
+                       spin_lock(&rreq->lock);
                        list_add_tail(&subreq->rreq_link, &rreq->subrequests);
                        subreq->prev_donated += rreq->prev_donated;
                        rreq->prev_donated = 0;
                        trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
-                       spin_unlock_bh(&rreq->lock);
+                       spin_unlock(&rreq->lock);
 
                        BUG_ON(!len);
 
@@ -217,9 +217,9 @@ abandon:
                __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
                __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
        }
-       spin_lock_bh(&rreq->lock);
+       spin_lock(&rreq->lock);
        list_splice_tail_init(&queue, &rreq->subrequests);
-       spin_unlock_bh(&rreq->lock);
+       spin_unlock(&rreq->lock);
 }
 
 /*
index 237018caba27e8d1f5466224fdc0cede3683a7b9..f026cbc0e2fe1c4e8beb74c822dd0f981722b6e0 100644 (file)
@@ -238,14 +238,14 @@ reassess_streams:
 
                cancel:
                        /* Remove if completely consumed. */
-                       spin_lock_bh(&wreq->lock);
+                       spin_lock(&wreq->lock);
 
                        remove = front;
                        list_del_init(&front->rreq_link);
                        front = list_first_entry_or_null(&stream->subrequests,
                                                         struct netfs_io_subrequest, rreq_link);
                        stream->front = front;
-                       spin_unlock_bh(&wreq->lock);
+                       spin_unlock(&wreq->lock);
                        netfs_put_subrequest(remove, false,
                                             notes & SAW_FAILURE ?
                                             netfs_sreq_trace_put_cancel :
index 7a14a48e62ee44fc5a885e409af7bdb7a1522400..286bc2aa3ca0bd3b37fb8661da61156a957e5cbd 100644 (file)
@@ -203,7 +203,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
         * the list.  The collector only goes nextwards and uses the lock to
         * remove entries off of the front.
         */
-       spin_lock_bh(&wreq->lock);
+       spin_lock(&wreq->lock);
        list_add_tail(&subreq->rreq_link, &stream->subrequests);
        if (list_is_first(&subreq->rreq_link, &stream->subrequests)) {
                stream->front = subreq;
@@ -214,7 +214,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
                }
        }
 
-       spin_unlock_bh(&wreq->lock);
+       spin_unlock(&wreq->lock);
 
        stream->construct = subreq;
 }