]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
io_uring/net: Allow to do vectorized send
authorNorman Maurer <norman_maurer@apple.com>
Tue, 29 Jul 2025 06:59:53 +0000 (20:59 -1000)
committerJens Axboe <axboe@kernel.dk>
Wed, 30 Jul 2025 14:23:04 +0000 (08:23 -0600)
At the moment you have to use sendmsg for vectorized send.
While this works it's suboptimal as it also means you need to
allocate a struct msghdr that needs to be kept alive until a
submission happens. We can remove this limitation by just
allowing to use send directly.

Signed-off-by: Norman Maurer <norman_maurer@apple.com>
Link: https://lore.kernel.org/r/20250729065952.26646-1-norman_maurer@apple.com
[axboe: remove -EINVAL return for SENDMSG and SEND_VECTORIZED]
[axboe: allow send_zc to set SEND_VECTORIZED too]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
include/uapi/linux/io_uring.h
io_uring/net.c

index b8a0e70ee2fd750f824e835c3420141478f50315..6957dc539d83b43bdfd860b7ec17e64cdd91616f 100644 (file)
@@ -392,12 +392,16 @@ enum io_uring_op {
  *                             the starting buffer ID in cqe->flags as per
  *                             usual for provided buffer usage. The buffers
  *                             will be contiguous from the starting buffer ID.
+ *
+ * IORING_SEND_VECTORIZED      If set, SEND[_ZC] will take a pointer to a io_vec
+ *                             to allow vectorized send operations.
  */
 #define IORING_RECVSEND_POLL_FIRST     (1U << 0)
 #define IORING_RECV_MULTISHOT          (1U << 1)
 #define IORING_RECVSEND_FIXED_BUF      (1U << 2)
 #define IORING_SEND_ZC_REPORT_USAGE    (1U << 3)
 #define IORING_RECVSEND_BUNDLE         (1U << 4)
+#define IORING_SEND_VECTORIZED         (1U << 5)
 
 /*
  * cqe.res for IORING_CQE_F_NOTIF if
index 35585bdc59f309770433474e4ccd9f6cd029255e..dd96e355982fdfb1338c3807d9f1555605d64749 100644 (file)
@@ -382,6 +382,10 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        }
        if (req->flags & REQ_F_BUFFER_SELECT)
                return 0;
+
+       if (sr->flags & IORING_SEND_VECTORIZED)
+               return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
+
        return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
 }
 
@@ -409,7 +413,7 @@ static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe
        return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE);
 }
 
-#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
+#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE | IORING_SEND_VECTORIZED)
 
 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
@@ -1318,7 +1322,8 @@ void io_send_zc_cleanup(struct io_kiocb *req)
 }
 
 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
-#define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
+#define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE | \
+                               IORING_SEND_VECTORIZED)
 
 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {