]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
SUNRPC: add list of idle threads
authorNeilBrown <neilb@suse.de>
Mon, 11 Sep 2023 14:39:11 +0000 (10:39 -0400)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 16 Oct 2023 16:44:04 +0000 (12:44 -0400)
Rather than searching a list of threads to find an idle one, having a
list of idle threads allows an idle thread to be found immediately.

This adds some spin_lock calls which is not ideal, but as the hold-time
is tiny it is still faster than searching a list.  A future patch will
remove them using llist.h.  This involves some subtlety and so is left
to a separate patch.

This removes the need for the RQ_BUSY flag.  The rqst is "busy"
precisely when it is not on the "idle" list.

Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
include/linux/sunrpc/svc.h
include/trace/events/sunrpc.h
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c

index 0ec691070e276f62ec0a23cb6430c3efcf71c1c6..e9c34e99bc88abe62b84eaa8de5244953a4f0bd7 100644 (file)
@@ -37,6 +37,7 @@ struct svc_pool {
        struct list_head        sp_sockets;     /* pending sockets */
        unsigned int            sp_nrthreads;   /* # of threads in pool */
        struct list_head        sp_all_threads; /* all server threads */
+       struct list_head        sp_idle_threads; /* idle server threads */
 
        /* statistics on pool operation */
        struct percpu_counter   sp_messages_arrived;
@@ -186,6 +187,7 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
  */
 struct svc_rqst {
        struct list_head        rq_all;         /* all threads list */
+       struct list_head        rq_idle;        /* On the idle list */
        struct rcu_head         rq_rcu_head;    /* for RCU deferred kfree */
        struct svc_xprt *       rq_xprt;        /* transport ptr */
 
@@ -262,10 +264,31 @@ enum {
        RQ_SPLICE_OK,           /* turned off in gss privacy to prevent
                                 * encrypting page cache pages */
        RQ_VICTIM,              /* Have agreed to shut down */
-       RQ_BUSY,                /* request is busy */
        RQ_DATA,                /* request has data */
 };
 
+/**
+ * svc_thread_set_busy - mark a thread as busy
+ * @rqstp: the thread which is now busy
+ *
+ * If rq_idle is "empty", the thread must be busy.
+ */
+static inline void svc_thread_set_busy(struct svc_rqst *rqstp)
+{
+       INIT_LIST_HEAD(&rqstp->rq_idle);
+}
+
+/**
+ * svc_thread_busy - check if a thread as busy
+ * @rqstp: the thread which might be busy
+ *
+ * If rq_idle is "empty", the thread must be busy.
+ */
+static inline bool svc_thread_busy(struct svc_rqst *rqstp)
+{
+       return list_empty(&rqstp->rq_idle);
+}
+
 #define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
 
 /*
index 6beb38c1dcb5eb1836171652ab30a5c71c9f6509..337c90787fb1a960a268f1e08858b49789c7e1bd 100644 (file)
@@ -1677,7 +1677,6 @@ DEFINE_SVCXDRBUF_EVENT(sendto);
        svc_rqst_flag(DROPME)                                           \
        svc_rqst_flag(SPLICE_OK)                                        \
        svc_rqst_flag(VICTIM)                                           \
-       svc_rqst_flag(BUSY)                                             \
        svc_rqst_flag_end(DATA)
 
 #undef svc_rqst_flag
index db579bbc0a0a8df1197f17c62293dfda6dece256..9d080fe2dcdfa528694017a50434896f37d5f470 100644 (file)
@@ -510,6 +510,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
                pool->sp_id = i;
                INIT_LIST_HEAD(&pool->sp_sockets);
                INIT_LIST_HEAD(&pool->sp_all_threads);
+               INIT_LIST_HEAD(&pool->sp_idle_threads);
                spin_lock_init(&pool->sp_lock);
 
                percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
@@ -641,7 +642,7 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
 
        folio_batch_init(&rqstp->rq_fbatch);
 
-       __set_bit(RQ_BUSY, &rqstp->rq_flags);
+       svc_thread_set_busy(rqstp);
        rqstp->rq_server = serv;
        rqstp->rq_pool = pool;
 
@@ -702,10 +703,13 @@ void svc_pool_wake_idle_thread(struct svc_pool *pool)
        struct svc_rqst *rqstp;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
-               if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
-                       continue;
-
+       spin_lock_bh(&pool->sp_lock);
+       rqstp = list_first_entry_or_null(&pool->sp_idle_threads,
+                                        struct svc_rqst, rq_idle);
+       if (rqstp)
+               list_del_init(&rqstp->rq_idle);
+       spin_unlock_bh(&pool->sp_lock);
+       if (rqstp) {
                WRITE_ONCE(rqstp->rq_qtime, ktime_get());
                wake_up_process(rqstp->rq_task);
                rcu_read_unlock();
index b8539545fefdb1f203d87ce54cd49507bf1cb6a5..ebfeeb504a7955d4a31672a69ee9b6988dd7e1cb 100644 (file)
@@ -737,8 +737,9 @@ static void svc_rqst_wait_for_work(struct svc_rqst *rqstp)
                set_current_state(TASK_IDLE);
                smp_mb__before_atomic();
                clear_bit(SP_CONGESTED, &pool->sp_flags);
-               clear_bit(RQ_BUSY, &rqstp->rq_flags);
-               smp_mb__after_atomic();
+               spin_lock_bh(&pool->sp_lock);
+               list_add(&rqstp->rq_idle, &pool->sp_idle_threads);
+               spin_unlock_bh(&pool->sp_lock);
 
                /* Need to check should_sleep() again after
                 * setting task state in case a wakeup happened
@@ -751,8 +752,14 @@ static void svc_rqst_wait_for_work(struct svc_rqst *rqstp)
                        cond_resched();
                }
 
-               set_bit(RQ_BUSY, &rqstp->rq_flags);
-               smp_mb__after_atomic();
+               /* We *must* be removed from the list before we can continue.
+                * If we were woken, this is already done
+                */
+               if (!svc_thread_busy(rqstp)) {
+                       spin_lock_bh(&pool->sp_lock);
+                       list_del_init(&rqstp->rq_idle);
+                       spin_unlock_bh(&pool->sp_lock);
+               }
        } else {
                cond_resched();
        }