]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
io_uring/zctx: unify zerocopy issue variants
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 16 Feb 2026 11:45:55 +0000 (11:45 +0000)
committerJens Axboe <axboe@kernel.dk>
Mon, 9 Mar 2026 13:21:54 +0000 (07:21 -0600)
io_send_zc and io_sendmsg_zc started different but now the only real
difference between them is how registered buffers are imported and
which net helper we use. Avoid duplication and combine them into a
single function.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
io_uring/net.c
io_uring/net.h
io_uring/opdef.c

index 9452793c21f11f47d46ddee248180c98fef25cb1..3e6112beea88dcc948daa915ac2c9fb1e1c0e953 100644 (file)
@@ -1471,9 +1471,9 @@ static int io_send_zc_import(struct io_kiocb *req,
        return 0;
 }
 
-int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
+int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
 {
-       struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
+       struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
        struct io_async_msghdr *kmsg = req->async_data;
        struct socket *sock;
        unsigned msg_flags;
@@ -1484,9 +1484,8 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
                return -ENOTSOCK;
        if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
                return -EOPNOTSUPP;
-
        if (!(req->flags & REQ_F_POLLED) &&
-           (zc->flags & IORING_RECVSEND_POLL_FIRST))
+           (sr->flags & IORING_RECVSEND_POLL_FIRST))
                return -EAGAIN;
 
        if (req->flags & REQ_F_IMPORT_BUFFER) {
@@ -1495,87 +1494,28 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
                        return ret;
        }
 
-       msg_flags = zc->msg_flags;
-       if (issue_flags & IO_URING_F_NONBLOCK)
-               msg_flags |= MSG_DONTWAIT;
-       if (msg_flags & MSG_WAITALL)
-               min_ret = iov_iter_count(&kmsg->msg.msg_iter);
-       msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
-
-       kmsg->msg.msg_flags = msg_flags;
-       kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
-       ret = sock_sendmsg(sock, &kmsg->msg);
-
-       if (unlikely(ret < min_ret)) {
-               if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
-                       return -EAGAIN;
-
-               if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
-                       zc->done_io += ret;
-                       return -EAGAIN;
-               }
-               if (ret == -ERESTARTSYS)
-                       ret = -EINTR;
-               req_set_fail(req);
-       }
-
-       if (ret >= 0)
-               ret += zc->done_io;
-       else if (zc->done_io)
-               ret = zc->done_io;
-
-       /*
-        * If we're in io-wq we can't rely on tw ordering guarantees, defer
-        * flushing notif to io_send_zc_cleanup()
-        */
-       if (!(issue_flags & IO_URING_F_UNLOCKED)) {
-               io_notif_flush(zc->notif);
-               zc->notif = NULL;
-               io_req_msg_cleanup(req, 0);
-       }
-       io_req_set_res(req, ret, IORING_CQE_F_MORE);
-       return IOU_COMPLETE;
-}
-
-int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
-{
-       struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-       struct io_async_msghdr *kmsg = req->async_data;
-       struct socket *sock;
-       unsigned msg_flags;
-       int ret, min_ret = 0;
-
-       if (req->flags & REQ_F_IMPORT_BUFFER) {
-               ret = io_send_zc_import(req, kmsg, issue_flags);
-               if (unlikely(ret))
-                       return ret;
-       }
-
-       sock = sock_from_file(req->file);
-       if (unlikely(!sock))
-               return -ENOTSOCK;
-       if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
-               return -EOPNOTSUPP;
-
-       if (!(req->flags & REQ_F_POLLED) &&
-           (sr->flags & IORING_RECVSEND_POLL_FIRST))
-               return -EAGAIN;
-
        msg_flags = sr->msg_flags;
        if (issue_flags & IO_URING_F_NONBLOCK)
                msg_flags |= MSG_DONTWAIT;
        if (msg_flags & MSG_WAITALL)
                min_ret = iov_iter_count(&kmsg->msg.msg_iter);
 
-       kmsg->msg.msg_control_user = sr->msg_control;
        kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
-       ret = __sys_sendmsg_sock(sock, &kmsg->msg, msg_flags);
+
+       if (req->opcode == IORING_OP_SEND_ZC) {
+               msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
+               kmsg->msg.msg_flags = msg_flags;
+               ret = sock_sendmsg(sock, &kmsg->msg);
+       } else {
+               kmsg->msg.msg_control_user = sr->msg_control;
+               ret = __sys_sendmsg_sock(sock, &kmsg->msg, msg_flags);
+       }
 
        if (unlikely(ret < min_ret)) {
                if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
                        return -EAGAIN;
 
-               if (ret > 0 && io_net_retry(sock, msg_flags)) {
+               if (ret > 0 && io_net_retry(sock, sr->msg_flags)) {
                        sr->done_io += ret;
                        return -EAGAIN;
                }
index a862960a3bb993b722415928e8614342739392c3..d4d1ddce50e3a4f6d02c43960d1db4107a38ae32 100644 (file)
@@ -50,7 +50,6 @@ void io_socket_bpf_populate(struct io_uring_bpf_ctx *bctx, struct io_kiocb *req)
 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_connect(struct io_kiocb *req, unsigned int issue_flags);
 
-int io_send_zc(struct io_kiocb *req, unsigned int issue_flags);
 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags);
 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 void io_send_zc_cleanup(struct io_kiocb *req);
index 91a23baf415e89cb7fd8f9ae797f02e22d5bfc26..645980fa465166651c9fbfc337200d1dac8174f8 100644 (file)
@@ -437,7 +437,7 @@ const struct io_issue_def io_issue_defs[] = {
 #if defined(CONFIG_NET)
                .async_size             = sizeof(struct io_async_msghdr),
                .prep                   = io_send_zc_prep,
-               .issue                  = io_send_zc,
+               .issue                  = io_sendmsg_zc,
 #else
                .prep                   = io_eopnotsupp_prep,
 #endif