]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sunrpc: convert queue_wait from global to per-cache-detail waitqueue
authorJeff Layton <jlayton@kernel.org>
Mon, 23 Feb 2026 17:10:00 +0000 (12:10 -0500)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 30 Mar 2026 01:25:09 +0000 (21:25 -0400)
The queue_wait waitqueue is currently a file-scoped global, so a
wake_up for one cache_detail wakes pollers on all caches. Convert it
to a per-cache-detail field so that only pollers on the relevant cache
are woken.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
include/linux/sunrpc/cache.h
net/sunrpc/cache.c

index 3d32dd1f7b05d35562d2064fed69877b3950fb51..031379efba24d40f64ce346cf1032261d4b98d05 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/atomic.h>
 #include <linux/kstrtox.h>
 #include <linux/proc_fs.h>
+#include <linux/wait.h>
 
 /*
  * Each cache requires:
@@ -114,6 +115,7 @@ struct cache_detail {
        /* fields for communication over channel */
        struct list_head        queue;
        spinlock_t              queue_lock;
+       wait_queue_head_t       queue_wait;
 
        atomic_t                writers;                /* how many time is /channel open */
        time64_t                last_close;             /* if no writers, when did last close */
index 1cfaae488c6c67a9797511804e4bbba16bcc70ae..fd02dca1f07afec2f09c591037bac3ea3e8d7e17 100644 (file)
@@ -401,6 +401,7 @@ void sunrpc_init_cache_detail(struct cache_detail *cd)
        spin_lock_init(&cd->hash_lock);
        INIT_LIST_HEAD(&cd->queue);
        spin_lock_init(&cd->queue_lock);
+       init_waitqueue_head(&cd->queue_wait);
        spin_lock(&cache_list_lock);
        cd->nextcheck = 0;
        cd->entries = 0;
@@ -970,8 +971,6 @@ out:
        return ret;
 }
 
-static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
-
 static __poll_t cache_poll(struct file *filp, poll_table *wait,
                               struct cache_detail *cd)
 {
@@ -979,7 +978,7 @@ static __poll_t cache_poll(struct file *filp, poll_table *wait,
        struct cache_reader *rp = filp->private_data;
        struct cache_queue *cq;
 
-       poll_wait(filp, &queue_wait, wait);
+       poll_wait(filp, &cd->queue_wait, wait);
 
        /* alway allow write */
        mask = EPOLLOUT | EPOLLWRNORM;
@@ -1259,7 +1258,7 @@ static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
                /* Lost a race, no longer PENDING, so don't enqueue */
                ret = -EAGAIN;
        spin_unlock(&detail->queue_lock);
-       wake_up(&queue_wait);
+       wake_up(&detail->queue_wait);
        if (ret == -EAGAIN) {
                kfree(buf);
                kfree(crq);