]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
ksmbd: use wait_event instead of schedule_timeout()
authorNamjae Jeon <linkinjeon@kernel.org>
Mon, 18 Dec 2023 15:33:05 +0000 (00:33 +0900)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 23 Dec 2023 09:41:52 +0000 (10:41 +0100)
[ Upstream commit a14c573870a664386adc10526a6c2648ea56dae1 ]

ksmbd threads eating masses of cputime when connection is disconnected.
If connection is disconnected, ksmbd thread waits for pending requests
to be processed using schedule_timeout. schedule_timeout() incorrectly
is used, and it is more efficient to use wait_event/wake_up than to check
r_count every time with timeout.

Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
Reviewed-by: Hyunchul Lee <hyc.lee@gmail.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/ksmbd/connection.c
fs/ksmbd/connection.h
fs/ksmbd/oplock.c
fs/ksmbd/server.c

index 192646b8920ece4cda106f8426f5f7db2c8da0af..be1f8ffa4a784a61b148a2fcbaf39e1951b9817e 100644 (file)
@@ -66,6 +66,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
        conn->outstanding_credits = 0;
 
        init_waitqueue_head(&conn->req_running_q);
+       init_waitqueue_head(&conn->r_count_q);
        INIT_LIST_HEAD(&conn->conns_list);
        INIT_LIST_HEAD(&conn->requests);
        INIT_LIST_HEAD(&conn->async_requests);
@@ -165,7 +166,6 @@ int ksmbd_conn_write(struct ksmbd_work *work)
        struct kvec iov[3];
        int iov_idx = 0;
 
-       ksmbd_conn_try_dequeue_request(work);
        if (!work->response_buf) {
                pr_err("NULL response header\n");
                return -EINVAL;
@@ -358,8 +358,8 @@ int ksmbd_conn_handler_loop(void *p)
 
 out:
        /* Wait till all reference dropped to the Server object*/
-       while (atomic_read(&conn->r_count) > 0)
-               schedule_timeout(HZ);
+       wait_event(conn->r_count_q, atomic_read(&conn->r_count) == 0);
+
 
        unload_nls(conn->local_nls);
        if (default_conn_ops.terminate_fn)
index e0a25cddbb29cf52bc84720471be631fe1dc72fb..a8c367c481e848de996500766568d78206f45ea4 100644 (file)
@@ -58,6 +58,7 @@ struct ksmbd_conn {
        unsigned int                    outstanding_credits;
        spinlock_t                      credits_lock;
        wait_queue_head_t               req_running_q;
+       wait_queue_head_t               r_count_q;
        /* Lock to protect requests list*/
        spinlock_t                      request_lock;
        struct list_head                requests;
index ae5fc4b2c133c6a6aa4433549ffec9a15c86c844..b527f451d7a4644fe437aa71a346dd7fe4511cc7 100644 (file)
@@ -616,18 +616,13 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
        struct ksmbd_file *fp;
 
        fp = ksmbd_lookup_durable_fd(br_info->fid);
-       if (!fp) {
-               atomic_dec(&conn->r_count);
-               ksmbd_free_work_struct(work);
-               return;
-       }
+       if (!fp)
+               goto out;
 
        if (allocate_oplock_break_buf(work)) {
                pr_err("smb2_allocate_rsp_buf failed! ");
-               atomic_dec(&conn->r_count);
                ksmbd_fd_put(work, fp);
-               ksmbd_free_work_struct(work);
-               return;
+               goto out;
        }
 
        rsp_hdr = smb2_get_msg(work->response_buf);
@@ -668,8 +663,16 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
 
        ksmbd_fd_put(work, fp);
        ksmbd_conn_write(work);
+
+out:
        ksmbd_free_work_struct(work);
-       atomic_dec(&conn->r_count);
+       /*
+        * Checking waitqueue to dropping pending requests on
+        * disconnection. waitqueue_active is safe because it
+        * uses atomic operation for condition.
+        */
+       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+               wake_up(&conn->r_count_q);
 }
 
 /**
@@ -732,9 +735,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
 
        if (allocate_oplock_break_buf(work)) {
                ksmbd_debug(OPLOCK, "smb2_allocate_rsp_buf failed! ");
-               ksmbd_free_work_struct(work);
-               atomic_dec(&conn->r_count);
-               return;
+               goto out;
        }
 
        rsp_hdr = smb2_get_msg(work->response_buf);
@@ -772,8 +773,16 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
        inc_rfc1001_len(work->response_buf, 44);
 
        ksmbd_conn_write(work);
+
+out:
        ksmbd_free_work_struct(work);
-       atomic_dec(&conn->r_count);
+       /*
+        * Checking waitqueue to dropping pending requests on
+        * disconnection. waitqueue_active is safe because it
+        * uses atomic operation for condition.
+        */
+       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+               wake_up(&conn->r_count_q);
 }
 
 /**
index 049d68831ccd46276b435962d72f13011587b4a4..1d5e46d710704b7044284267a0523ebcc49b2d6f 100644 (file)
@@ -266,7 +266,13 @@ static void handle_ksmbd_work(struct work_struct *wk)
 
        ksmbd_conn_try_dequeue_request(work);
        ksmbd_free_work_struct(work);
-       atomic_dec(&conn->r_count);
+       /*
+        * Checking waitqueue to dropping pending requests on
+        * disconnection. waitqueue_active is safe because it
+        * uses atomic operation for condition.
+        */
+       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+               wake_up(&conn->r_count_q);
 }
 
 /**