]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
nfsd: adjust number of running nfsd threads based on activity
authorJeff Layton <jlayton@kernel.org>
Tue, 6 Jan 2026 18:59:49 +0000 (13:59 -0500)
committerChuck Lever <chuck.lever@oracle.com>
Wed, 28 Jan 2026 15:15:42 +0000 (10:15 -0500)
nfsd() is changed to pass a timeout to svc_recv() when there is a min
number of threads set, and to handle error returns from it:

In the case of -ETIMEDOUT, if the service mutex can be taken (via
trylock), the thread becomes an RQ_VICTIM so that it will exit,
providing that the actual number of threads is above pool->sp_nrthrmin.

In the case of -EBUSY, if the actual number of threads is below
pool->sp_nrthrmax, it will attempt to start a new thread. This attempt
is gated on a new SP_TASK_STARTING pool flag that serializes thread
creation attempts within a pool, and further by mutex_trylock().

Neil says: "I think we want memory pressure to be able to push a thread
into returning -ETIMEDOUT.  That can come later."

Signed-off-by: NeilBrown <neil@brown.name>
Signed-off-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
fs/nfsd/nfssvc.c
fs/nfsd/trace.h

index e3f647efc4c7b7b329bbd88899090ce070539aa7..1e2570e3c754cfbd3caf0335fb9f46b5e22b5d63 100644 (file)
@@ -882,9 +882,11 @@ static int
 nfsd(void *vrqstp)
 {
        struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
+       struct svc_pool *pool = rqstp->rq_pool;
        struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list);
        struct net *net = perm_sock->xpt_net;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+       bool have_mutex = false;
 
        /* At this point, the thread shares current->fs
         * with the init process. We need to create files with the
@@ -902,7 +904,44 @@ nfsd(void *vrqstp)
         * The main request loop
         */
        while (!svc_thread_should_stop(rqstp)) {
-               svc_recv(rqstp, 0);
+               switch (svc_recv(rqstp, 5 * HZ)) {
+               case -ETIMEDOUT:
+                       /* No work arrived within the timeout window */
+                       if (mutex_trylock(&nfsd_mutex)) {
+                               if (pool->sp_nrthreads > pool->sp_nrthrmin) {
+                                       trace_nfsd_dynthread_kill(net, pool);
+                                       set_bit(RQ_VICTIM, &rqstp->rq_flags);
+                                       have_mutex = true;
+                               } else {
+                                       mutex_unlock(&nfsd_mutex);
+                               }
+                       } else {
+                               trace_nfsd_dynthread_trylock_fail(net, pool);
+                       }
+                       break;
+               case -EBUSY:
+                       /* No idle threads; consider spawning another */
+                       if (pool->sp_nrthreads < pool->sp_nrthrmax) {
+                               if (mutex_trylock(&nfsd_mutex)) {
+                                       if (pool->sp_nrthreads < pool->sp_nrthrmax) {
+                                               int ret;
+
+                                               trace_nfsd_dynthread_start(net, pool);
+                                               ret = svc_new_thread(rqstp->rq_server, pool);
+                                               if (ret)
+                                                       pr_notice_ratelimited("%s: unable to spawn new thread: %d\n",
+                                                                             __func__, ret);
+                                       }
+                                       mutex_unlock(&nfsd_mutex);
+                               } else {
+                                       trace_nfsd_dynthread_trylock_fail(net, pool);
+                               }
+                       }
+                       clear_bit(SP_TASK_STARTING, &pool->sp_flags);
+                       break;
+               default:
+                       break;
+               }
                nfsd_file_net_dispose(nn);
        }
 
@@ -910,6 +949,8 @@ nfsd(void *vrqstp)
 
        /* Release the thread */
        svc_exit_thread(rqstp);
+       if (have_mutex)
+               mutex_unlock(&nfsd_mutex);
        return 0;
 }
 
index 5ae2a611e57f4b4e51a4d9eb6e0fccb66ad8d288..8885fd9bead98ebf55379d68ab9c3701981a5150 100644 (file)
@@ -91,6 +91,41 @@ DEFINE_EVENT(nfsd_xdr_err_class, nfsd_##name##_err, \
 DEFINE_NFSD_XDR_ERR_EVENT(garbage_args);
 DEFINE_NFSD_XDR_ERR_EVENT(cant_encode);
 
+DECLARE_EVENT_CLASS(nfsd_dynthread_class,
+       TP_PROTO(
+               const struct net *net,
+               const struct svc_pool *pool
+       ),
+       TP_ARGS(net, pool),
+       TP_STRUCT__entry(
+               __field(unsigned int, netns_ino)
+               __field(unsigned int, pool_id)
+               __field(unsigned int, nrthreads)
+               __field(unsigned int, nrthrmin)
+               __field(unsigned int, nrthrmax)
+       ),
+       TP_fast_assign(
+               __entry->netns_ino = net->ns.inum;
+               __entry->pool_id = pool->sp_id;
+               __entry->nrthreads = pool->sp_nrthreads;
+               __entry->nrthrmin = pool->sp_nrthrmin;
+               __entry->nrthrmax = pool->sp_nrthrmax;
+       ),
+       TP_printk("pool=%u nrthreads=%u nrthrmin=%u nrthrmax=%u",
+               __entry->pool_id, __entry->nrthreads,
+               __entry->nrthrmin, __entry->nrthrmax
+       )
+);
+
+#define DEFINE_NFSD_DYNTHREAD_EVENT(name) \
+DEFINE_EVENT(nfsd_dynthread_class, nfsd_dynthread_##name, \
+       TP_PROTO(const struct net *net, const struct svc_pool *pool), \
+       TP_ARGS(net, pool))
+
+DEFINE_NFSD_DYNTHREAD_EVENT(start);
+DEFINE_NFSD_DYNTHREAD_EVENT(kill);
+DEFINE_NFSD_DYNTHREAD_EVENT(trylock_fail);
+
 #define show_nfsd_may_flags(x)                                         \
        __print_flags(x, "|",                                           \
                { NFSD_MAY_EXEC,                "EXEC" },               \