]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
SUNRPC: integrate back-channel processing with svc_recv()
authorNeilBrown <neilb@suse.de>
Mon, 11 Sep 2023 14:38:58 +0000 (10:38 -0400)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 16 Oct 2023 16:44:03 +0000 (12:44 -0400)
Using svc_recv() for (NFSv4.1) back-channel handling means we have just
one mechanism for waking threads.

Also change kthread_freezable_should_stop() in nfs4_callback_svc() to
kthread_should_stop() as used elsewhere.
kthread_freezable_should_stop() effectively adds a try_to_freeze() call,
and svc_recv() already contains that at an appropriate place.

Signed-off-by: NeilBrown <neilb@suse.de>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Anna Schumaker <Anna.Schumaker@Netapp.com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
fs/nfs/callback.c
include/linux/sunrpc/svc.h
net/sunrpc/backchannel_rqst.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/xprtrdma/backchannel.c

index 272e6d2bb47854020a6e2670a5a9d5115f491aa9..42a0c2f1e78565bc31b3faedf96bcaa9b0f7a585 100644 (file)
@@ -78,7 +78,7 @@ nfs4_callback_svc(void *vrqstp)
 
        set_freezable();
 
-       while (!kthread_freezable_should_stop(NULL))
+       while (!kthread_should_stop())
                svc_recv(rqstp);
 
        svc_exit_thread(rqstp);
@@ -86,41 +86,6 @@ nfs4_callback_svc(void *vrqstp)
 }
 
 #if defined(CONFIG_NFS_V4_1)
-/*
- * The callback service for NFSv4.1 callbacks
- */
-static int
-nfs41_callback_svc(void *vrqstp)
-{
-       struct svc_rqst *rqstp = vrqstp;
-       struct svc_serv *serv = rqstp->rq_server;
-       struct rpc_rqst *req;
-       DEFINE_WAIT(wq);
-
-       set_freezable();
-
-       while (!kthread_freezable_should_stop(NULL)) {
-               prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_IDLE);
-               spin_lock_bh(&serv->sv_cb_lock);
-               if (!list_empty(&serv->sv_cb_list)) {
-                       req = list_first_entry(&serv->sv_cb_list,
-                                       struct rpc_rqst, rq_bc_list);
-                       list_del(&req->rq_bc_list);
-                       spin_unlock_bh(&serv->sv_cb_lock);
-                       finish_wait(&serv->sv_cb_waitq, &wq);
-                       svc_process_bc(req, rqstp);
-               } else {
-                       spin_unlock_bh(&serv->sv_cb_lock);
-                       if (!kthread_should_stop())
-                               schedule();
-                       finish_wait(&serv->sv_cb_waitq, &wq);
-               }
-       }
-
-       svc_exit_thread(rqstp);
-       return 0;
-}
-
 static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
                struct svc_serv *serv)
 {
@@ -233,10 +198,7 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
                        cb_info->users);
 
        threadfn = nfs4_callback_svc;
-#if defined(CONFIG_NFS_V4_1)
-       if (minorversion)
-               threadfn = nfs41_callback_svc;
-#else
+#if !defined(CONFIG_NFS_V4_1)
        if (minorversion)
                return ERR_PTR(-ENOTSUPP);
 #endif
index 0cdca5960171c6329b567ead7228c7af270cdf4c..acbe1314febd3221d80c6eb0d1869e2446cf4c19 100644 (file)
@@ -92,8 +92,6 @@ struct svc_serv {
                                                 * that arrive over the same
                                                 * connection */
        spinlock_t              sv_cb_lock;     /* protects the svc_cb_list */
-       wait_queue_head_t       sv_cb_waitq;    /* sleep here if there are no
-                                                * entries in the svc_cb_list */
        bool                    sv_bc_enabled;  /* service uses backchannel */
 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
 };
index 65a6c6429a53ed3750007f07205cb772827c74e4..44b7c89a635fa80b01c5df5f758237ff8b9d8c20 100644 (file)
@@ -349,10 +349,8 @@ found:
 }
 
 /*
- * Add callback request to callback list.  The callback
- * service sleeps on the sv_cb_waitq waiting for new
- * requests.  Wake it up after adding enqueing the
- * request.
+ * Add callback request to callback list.  Wake a thread
+ * on the first pool (usually the only pool) to handle it.
  */
 void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
 {
@@ -371,6 +369,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
        xprt_get(xprt);
        spin_lock(&bc_serv->sv_cb_lock);
        list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
-       wake_up(&bc_serv->sv_cb_waitq);
        spin_unlock(&bc_serv->sv_cb_lock);
+       svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
 }
index a3d031deb1ec79ada9ef556354e2c5e2ef70824a..b98a159eb17ffb58abf684f160a3b2dfa2dbf1cc 100644 (file)
@@ -440,7 +440,6 @@ __svc_init_bc(struct svc_serv *serv)
 {
        INIT_LIST_HEAD(&serv->sv_cb_list);
        spin_lock_init(&serv->sv_cb_lock);
-       init_waitqueue_head(&serv->sv_cb_waitq);
 }
 #else
 static void
@@ -718,6 +717,7 @@ void svc_pool_wake_idle_thread(struct svc_pool *pool)
 
        set_bit(SP_CONGESTED, &pool->sp_flags);
 }
+EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread);
 
 static struct svc_pool *
 svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
index 835160da3ad452fb3ccf80c8e897ac909a188f8e..b057f1cbe7a1bb5b3e4479001f158a315639147e 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/sunrpc/svc_xprt.h>
 #include <linux/sunrpc/svcsock.h>
 #include <linux/sunrpc/xprt.h>
+#include <linux/sunrpc/bc_xprt.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <trace/events/sunrpc.h>
@@ -719,6 +720,13 @@ rqst_should_sleep(struct svc_rqst *rqstp)
        if (freezing(current))
                return false;
 
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+       if (svc_is_backchannel(rqstp)) {
+               if (!list_empty(&rqstp->rq_server->sv_cb_list))
+                       return false;
+       }
+#endif
+
        return true;
 }
 
@@ -868,6 +876,25 @@ void svc_recv(struct svc_rqst *rqstp)
                trace_svc_xprt_dequeue(rqstp);
                svc_handle_xprt(rqstp, xprt);
        }
+
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+       if (svc_is_backchannel(rqstp)) {
+               struct svc_serv *serv = rqstp->rq_server;
+               struct rpc_rqst *req;
+
+               spin_lock_bh(&serv->sv_cb_lock);
+               req = list_first_entry_or_null(&serv->sv_cb_list,
+                                              struct rpc_rqst, rq_bc_list);
+               if (req) {
+                       list_del(&req->rq_bc_list);
+                       spin_unlock_bh(&serv->sv_cb_lock);
+
+                       svc_process_bc(req, rqstp);
+                       return;
+               }
+               spin_unlock_bh(&serv->sv_cb_lock);
+       }
+#endif
 }
 EXPORT_SYMBOL_GPL(svc_recv);
 
index e4d84a13c566e7d90fe15b87fe3148bb7b4a43ed..bfc434ec52a7d14d19a89f7576453c720ddccee6 100644 (file)
@@ -267,7 +267,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
        list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
        spin_unlock(&bc_serv->sv_cb_lock);
 
-       wake_up(&bc_serv->sv_cb_waitq);
+       svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
 
        r_xprt->rx_stats.bcall_count++;
        return;