]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
cachefiles: make on-demand read killable
authorBaokun Li <libaokun1@huawei.com>
Wed, 22 May 2024 11:43:08 +0000 (19:43 +0800)
committerChristian Brauner <brauner@kernel.org>
Wed, 29 May 2024 11:03:31 +0000 (13:03 +0200)
Replacing wait_for_completion() with wait_for_completion_killable() in
cachefiles_ondemand_send_req() allows us to kill processes that might
trigger a hunk_task if the daemon is abnormal.

But now only CACHEFILES_OP_READ is killable, because OP_CLOSE and OP_OPEN
is initiated from kworker context and the signal is prohibited in these
kworker.

Note that when the req in xas changes, i.e. xas_load(&xas) != req, it
means that a process will complete the current request soon, so wait
again for the request to be completed.

In addition, add the cachefiles_ondemand_finish_req() helper function to
simplify the code.

Suggested-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Baokun Li <libaokun1@huawei.com>
Link: https://lore.kernel.org/r/20240522114308.2402121-13-libaokun@huaweicloud.com
Acked-by: Jeff Layton <jlayton@kernel.org>
Reviewed-by: Jia Zhu <zhujia.zj@bytedance.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/cachefiles/ondemand.c

index 922cab1a314b250f3dbddd4da9f721f56cec8314..58bd80956c5a64682bf6e4a70d47b6d1cfd469f1 100644 (file)
@@ -380,6 +380,20 @@ static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xa
        return NULL;
 }
 
+static inline bool cachefiles_ondemand_finish_req(struct cachefiles_req *req,
+                                                 struct xa_state *xas, int err)
+{
+       if (unlikely(!xas || !req))
+               return false;
+
+       if (xa_cmpxchg(xas->xa, xas->xa_index, req, NULL, 0) != req)
+               return false;
+
+       req->error = err;
+       complete(&req->done);
+       return true;
+}
+
 ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
                                        char __user *_buffer, size_t buflen)
 {
@@ -443,16 +457,8 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
 out:
        cachefiles_put_object(req->object, cachefiles_obj_put_read_req);
        /* Remove error request and CLOSE request has no reply */
-       if (ret || msg->opcode == CACHEFILES_OP_CLOSE) {
-               xas_reset(&xas);
-               xas_lock(&xas);
-               if (xas_load(&xas) == req) {
-                       req->error = ret;
-                       complete(&req->done);
-                       xas_store(&xas, NULL);
-               }
-               xas_unlock(&xas);
-       }
+       if (ret || msg->opcode == CACHEFILES_OP_CLOSE)
+               cachefiles_ondemand_finish_req(req, &xas, ret);
        cachefiles_req_put(req);
        return ret ? ret : n;
 }
@@ -544,8 +550,18 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
                goto out;
 
        wake_up_all(&cache->daemon_pollwq);
-       wait_for_completion(&req->done);
-       ret = req->error;
+wait:
+       ret = wait_for_completion_killable(&req->done);
+       if (!ret) {
+               ret = req->error;
+       } else {
+               ret = -EINTR;
+               if (!cachefiles_ondemand_finish_req(req, &xas, ret)) {
+                       /* Someone will complete it soon. */
+                       cpu_relax();
+                       goto wait;
+               }
+       }
        cachefiles_req_put(req);
        return ret;
 out: