]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
io_uring: cancelable uring_cmd
authorMing Lei <ming.lei@redhat.com>
Thu, 28 Sep 2023 12:43:25 +0000 (20:43 +0800)
committerJens Axboe <axboe@kernel.dk>
Thu, 28 Sep 2023 13:36:00 +0000 (07:36 -0600)
uring_cmd may never complete, such as ublk, in which uring cmd isn't
completed until one new block request is coming from ublk block device.

Add cancelable uring_cmd to provide mechanism to driver for cancelling
pending commands in its own way.

Add API of io_uring_cmd_mark_cancelable() for driver to mark one command as
cancelable, then io_uring will cancel this command in
io_uring_cancel_generic(). ->uring_cmd() callback is reused for canceling
command in driver's way, then driver gets notified with the cancelling
from io_uring.

Add API of io_uring_cmd_get_task() to help driver cancel handler
deal with the canceling.

Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de>
Suggested-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/linux/io_uring.h
include/linux/io_uring_types.h
io_uring/io_uring.c
io_uring/uring_cmd.c

index ae08d6f66e627d57d530ef20337c3311bef5b156..b4391e0a9bc8363200f9b4013193fc4e75ce1d6e 100644 (file)
@@ -20,9 +20,13 @@ enum io_uring_cmd_flags {
        IO_URING_F_SQE128               = (1 << 8),
        IO_URING_F_CQE32                = (1 << 9),
        IO_URING_F_IOPOLL               = (1 << 10),
+
+       /* set when uring wants to cancel a previously issued command */
+       IO_URING_F_CANCEL               = (1 << 11),
 };
 
 /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
+#define IORING_URING_CMD_CANCELABLE    (1U << 30)
 #define IORING_URING_CMD_POLLED                (1U << 31)
 
 struct io_uring_cmd {
@@ -85,6 +89,9 @@ static inline void io_uring_free(struct task_struct *tsk)
                __io_uring_free(tsk);
 }
 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
+void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+               unsigned int issue_flags);
+struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd);
 #else
 static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
                              struct iov_iter *iter, void *ioucmd)
@@ -125,6 +132,14 @@ static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
 {
        return -EOPNOTSUPP;
 }
+static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+               unsigned int issue_flags)
+{
+}
+static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
+{
+       return NULL;
+}
 #endif
 
 #endif
index fe1c5d4ec56cfa688fa3f745d88760d95d0b0e4f..e178461fa51342c634521537c8af21dcc2a68b7e 100644 (file)
@@ -265,6 +265,12 @@ struct io_ring_ctx {
                 */
                struct io_wq_work_list  iopoll_list;
                bool                    poll_multi_queue;
+
+               /*
+                * Any cancelable uring_cmd is added to this list in
+                * ->uring_cmd() by io_uring_cmd_insert_cancelable()
+                */
+               struct hlist_head       cancelable_uring_cmd;
        } ____cacheline_aligned_in_smp;
 
        struct {
index cb6bd990704594211d848cfa262633c865a4af3d..08c9ea46bb95af6aa1616ad1f3ac3f94612d8af2 100644 (file)
@@ -352,6 +352,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
        INIT_HLIST_HEAD(&ctx->waitid_list);
        INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
        INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
+       INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd);
        return ctx;
 err:
        kfree(ctx->cancel_table.hbs);
@@ -3258,6 +3259,37 @@ static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
        return ret;
 }
 
+static bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
+               struct task_struct *task, bool cancel_all)
+{
+       struct hlist_node *tmp;
+       struct io_kiocb *req;
+       bool ret = false;
+
+       lockdep_assert_held(&ctx->uring_lock);
+
+       hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
+                       hash_node) {
+               struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
+                               struct io_uring_cmd);
+               struct file *file = req->file;
+
+               if (!cancel_all && req->task != task)
+                       continue;
+
+               if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
+                       /* ->sqe isn't available if no async data */
+                       if (!req_has_async_data(req))
+                               cmd->sqe = NULL;
+                       file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL);
+                       ret = true;
+               }
+       }
+       io_submit_flush_completions(ctx);
+
+       return ret;
+}
+
 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
                                                struct task_struct *task,
                                                bool cancel_all)
@@ -3306,6 +3338,7 @@ static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
        mutex_lock(&ctx->uring_lock);
        ret |= io_poll_remove_all(ctx, task, cancel_all);
        ret |= io_waitid_remove_all(ctx, task, cancel_all);
+       ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all);
        mutex_unlock(&ctx->uring_lock);
        ret |= io_kill_timeouts(ctx, task, cancel_all);
        if (task)
index a0b0ec5473bfe257a3911ac9bb639c7b86adc120..00a5e5621a288adb9e0ed6bf6f8a9de4538cb987 100644 (file)
 #include "rsrc.h"
 #include "uring_cmd.h"
 
+static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
+               unsigned int issue_flags)
+{
+       struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+       struct io_ring_ctx *ctx = req->ctx;
+
+       if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
+               return;
+
+       cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
+       io_ring_submit_lock(ctx, issue_flags);
+       hlist_del(&req->hash_node);
+       io_ring_submit_unlock(ctx, issue_flags);
+}
+
+/*
+ * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
+ * will try to cancel this issued command by sending ->uring_cmd() with
+ * issue_flags of IO_URING_F_CANCEL.
+ *
+ * The command is guaranteed to not be done when calling ->uring_cmd()
+ * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
+ * with race between io_uring canceling and normal completion.
+ */
+void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+               unsigned int issue_flags)
+{
+       struct io_kiocb *req = cmd_to_io_kiocb(cmd);
+       struct io_ring_ctx *ctx = req->ctx;
+
+       if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
+               cmd->flags |= IORING_URING_CMD_CANCELABLE;
+               io_ring_submit_lock(ctx, issue_flags);
+               hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
+               io_ring_submit_unlock(ctx, issue_flags);
+       }
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
+
+struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
+{
+       return cmd_to_io_kiocb(cmd)->task;
+}
+EXPORT_SYMBOL_GPL(io_uring_cmd_get_task);
+
 static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
 {
        struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
@@ -56,6 +101,8 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
 {
        struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
 
+       io_uring_cmd_del_cancelable(ioucmd, issue_flags);
+
        if (ret < 0)
                req_set_fail(req);