]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
io_uring: add ->splice_fd_in checks
authorPavel Begunkov <asml.silence@gmail.com>
Mon, 13 Sep 2021 15:17:19 +0000 (09:17 -0600)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 18 Sep 2021 11:41:48 +0000 (13:41 +0200)
commit 26578cda3db983b17cabe4e577af26306beb9987 upstream.

->splice_fd_in is used only by splice/tee, but no other request checks
it for validity. Add the check for most of request types excluding
reads/writes/sends/recvs, we don't want overhead for them and can leave
them be as is until the field is actually used.

Cc: stable@vger.kernel.org
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/f44bc2acd6777d932de3d71a5692235b5b2b7397.1629451684.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
fs/io_uring.c

index a1b6d338e3a0b77121f6d69866b8afffdf5ca1d7..065ceeaf4463c572bb890820b5b7272739ae103f 100644 (file)
@@ -3474,7 +3474,7 @@ static int io_renameat_prep(struct io_kiocb *req,
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index)
+       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
@@ -3525,7 +3525,8 @@ static int io_unlinkat_prep(struct io_kiocb *req,
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
+       if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
+           sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
@@ -3571,8 +3572,8 @@ static int io_shutdown_prep(struct io_kiocb *req,
 #if defined(CONFIG_NET)
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
-           sqe->buf_index)
+       if (unlikely(sqe->ioprio || sqe->off || sqe->addr || sqe->rw_flags ||
+                    sqe->buf_index || sqe->splice_fd_in))
                return -EINVAL;
 
        req->shutdown.how = READ_ONCE(sqe->len);
@@ -3720,7 +3721,8 @@ static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
+       if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
+                    sqe->splice_fd_in))
                return -EINVAL;
 
        req->sync.flags = READ_ONCE(sqe->fsync_flags);
@@ -3753,7 +3755,8 @@ static int io_fsync(struct io_kiocb *req, unsigned int issue_flags)
 static int io_fallocate_prep(struct io_kiocb *req,
                             const struct io_uring_sqe *sqe)
 {
-       if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
+       if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
+           sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -3784,7 +3787,7 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
        const char __user *fname;
        int ret;
 
-       if (unlikely(sqe->ioprio || sqe->buf_index))
+       if (unlikely(sqe->ioprio || sqe->buf_index || sqe->splice_fd_in))
                return -EINVAL;
        if (unlikely(req->flags & REQ_F_FIXED_FILE))
                return -EBADF;
@@ -3909,7 +3912,8 @@ static int io_remove_buffers_prep(struct io_kiocb *req,
        struct io_provide_buf *p = &req->pbuf;
        u64 tmp;
 
-       if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off)
+       if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
+           sqe->splice_fd_in)
                return -EINVAL;
 
        tmp = READ_ONCE(sqe->fd);
@@ -3980,7 +3984,7 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
        struct io_provide_buf *p = &req->pbuf;
        u64 tmp;
 
-       if (sqe->ioprio || sqe->rw_flags)
+       if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
                return -EINVAL;
 
        tmp = READ_ONCE(sqe->fd);
@@ -4067,7 +4071,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req,
                             const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_EPOLL)
-       if (sqe->ioprio || sqe->buf_index)
+       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -4113,7 +4117,7 @@ static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags)
 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
-       if (sqe->ioprio || sqe->buf_index || sqe->off)
+       if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -4148,7 +4152,7 @@ static int io_madvise(struct io_kiocb *req, unsigned int issue_flags)
 
 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       if (sqe->ioprio || sqe->buf_index || sqe->addr)
+       if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
                return -EINVAL;
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -4186,7 +4190,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index)
+       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
@@ -4222,7 +4226,7 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
-           sqe->rw_flags || sqe->buf_index)
+           sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        if (req->flags & REQ_F_FIXED_FILE)
                return -EBADF;
@@ -4283,7 +4287,8 @@ static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
+       if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
+                    sqe->splice_fd_in))
                return -EINVAL;
 
        req->sync.off = READ_ONCE(sqe->off);
@@ -4710,7 +4715,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->len || sqe->buf_index)
+       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
 
        accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
@@ -4758,7 +4763,8 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags)
+       if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
+           sqe->splice_fd_in)
                return -EINVAL;
 
        conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
@@ -5368,7 +5374,7 @@ static int io_poll_update_prep(struct io_kiocb *req,
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index)
+       if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
        flags = READ_ONCE(sqe->len);
        if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA |
@@ -5603,7 +5609,7 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
                return -EINVAL;
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->len)
+       if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->splice_fd_in)
                return -EINVAL;
 
        tr->addr = READ_ONCE(sqe->addr);
@@ -5662,7 +5668,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
-       if (sqe->ioprio || sqe->buf_index || sqe->len != 1)
+       if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
+           sqe->splice_fd_in)
                return -EINVAL;
        if (off && is_timeout_link)
                return -EINVAL;
@@ -5811,7 +5818,8 @@ static int io_async_cancel_prep(struct io_kiocb *req,
                return -EINVAL;
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
-       if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags)
+       if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
+           sqe->splice_fd_in)
                return -EINVAL;
 
        req->cancel.addr = READ_ONCE(sqe->addr);
@@ -5868,7 +5876,7 @@ static int io_rsrc_update_prep(struct io_kiocb *req,
 {
        if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
                return -EINVAL;
-       if (sqe->ioprio || sqe->rw_flags)
+       if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
                return -EINVAL;
 
        req->rsrc_update.offset = READ_ONCE(sqe->off);