]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
smb: client: make consitent use of spin_lock_irq{save,restore}() in smbdirect.c
authorStefan Metzmacher <metze@samba.org>
Wed, 27 Aug 2025 13:34:09 +0000 (15:34 +0200)
committerSteve French <stfrench@microsoft.com>
Sun, 28 Sep 2025 23:29:51 +0000 (18:29 -0500)
There is a mix of using spin_lock(), spin_lock_irq() and
spin_lock_irqsave() and it seems at least enqueue_reassembly() was wrong
in using just spin_lock() as it's called via recv_done() from a SOFTIRQ
as we're using IB_POLL_SOFTIRQ.

And Documentation/kernel-hacking/locking.rst section
"Cheat Sheet For Locking" says:

-  Otherwise (== data can be touched in an interrupt), use
   spin_lock_irqsave() and
   spin_unlock_irqrestore().

So in order to keep it simple and safe we use that version
now. It will help merging functions into common code and
have consistent locking in all cases.

Cc: Steve French <smfrench@gmail.com>
Cc: Tom Talpey <tom@talpey.com>
Cc: Long Li <longli@microsoft.com>
Acked-by: Namjae Jeon <linkinjeon@kernel.org>
Cc: linux-cifs@vger.kernel.org
Cc: samba-technical@lists.samba.org
Signed-off-by: Stefan Metzmacher <metze@samba.org>
Signed-off-by: Steve French <stfrench@microsoft.com>
fs/smb/client/smbdirect.c

index 6d9c9e68c76505c396f4b0e7320663a52e86df66..8e10a43c8cb1359d4472b62536730c44c410201f 100644 (file)
@@ -1397,7 +1397,9 @@ static void enqueue_reassembly(
        struct smbdirect_recv_io *response,
        int data_length)
 {
-       spin_lock(&sc->recv_io.reassembly.lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
        list_add_tail(&response->list, &sc->recv_io.reassembly.list);
        sc->recv_io.reassembly.queue_length++;
        /*
@@ -1408,7 +1410,7 @@ static void enqueue_reassembly(
         */
        virt_wmb();
        sc->recv_io.reassembly.data_length += data_length;
-       spin_unlock(&sc->recv_io.reassembly.lock);
+       spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
        sc->statistics.enqueue_reassembly_queue++;
 }
 
@@ -2076,6 +2078,7 @@ again:
        if (sc->recv_io.reassembly.data_length >= size) {
                int queue_length;
                int queue_removed = 0;
+               unsigned long flags;
 
                /*
                 * Need to make sure reassembly_data_length is read before
@@ -2135,11 +2138,11 @@ again:
                                if (queue_length)
                                        list_del(&response->list);
                                else {
-                                       spin_lock_irq(
-                                               &sc->recv_io.reassembly.lock);
+                                       spin_lock_irqsave(
+                                               &sc->recv_io.reassembly.lock, flags);
                                        list_del(&response->list);
-                                       spin_unlock_irq(
-                                               &sc->recv_io.reassembly.lock);
+                                       spin_unlock_irqrestore(
+                                               &sc->recv_io.reassembly.lock, flags);
                                }
                                queue_removed++;
                                sc->statistics.dequeue_reassembly_queue++;
@@ -2157,10 +2160,10 @@ again:
                                 to_read, data_read, offset);
                }
 
-               spin_lock_irq(&sc->recv_io.reassembly.lock);
+               spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
                sc->recv_io.reassembly.data_length -= data_read;
                sc->recv_io.reassembly.queue_length -= queue_removed;
-               spin_unlock_irq(&sc->recv_io.reassembly.lock);
+               spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags);
 
                sc->recv_io.reassembly.first_entry_offset = offset;
                log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
@@ -2432,6 +2435,7 @@ cleanup_entries:
 static struct smbdirect_mr_io *get_mr(struct smbdirect_socket *sc)
 {
        struct smbdirect_mr_io *ret;
+       unsigned long flags;
        int rc;
 again:
        rc = wait_event_interruptible(sc->mr_io.ready.wait_queue,
@@ -2447,18 +2451,18 @@ again:
                return NULL;
        }
 
-       spin_lock(&sc->mr_io.all.lock);
+       spin_lock_irqsave(&sc->mr_io.all.lock, flags);
        list_for_each_entry(ret, &sc->mr_io.all.list, list) {
                if (ret->state == SMBDIRECT_MR_READY) {
                        ret->state = SMBDIRECT_MR_REGISTERED;
-                       spin_unlock(&sc->mr_io.all.lock);
+                       spin_unlock_irqrestore(&sc->mr_io.all.lock, flags);
                        atomic_dec(&sc->mr_io.ready.count);
                        atomic_inc(&sc->mr_io.used.count);
                        return ret;
                }
        }
 
-       spin_unlock(&sc->mr_io.all.lock);
+       spin_unlock_irqrestore(&sc->mr_io.all.lock, flags);
        /*
         * It is possible that we could fail to get MR because other processes may
         * try to acquire a MR at the same time. If this is the case, retry it.