]>
git.ipfire.org Git - people/ms/linux.git/blob - io_uring/msg_ring.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
9 #include <uapi/linux/io_uring.h>
13 #include "filetable.h"
26 static int io_msg_ring_data(struct io_kiocb
*req
)
28 struct io_ring_ctx
*target_ctx
= req
->file
->private_data
;
29 struct io_msg
*msg
= io_kiocb_to_cmd(req
, struct io_msg
);
31 if (msg
->src_fd
|| msg
->dst_fd
|| msg
->flags
)
34 if (io_post_aux_cqe(target_ctx
, msg
->user_data
, msg
->len
, 0, true))
40 static void io_double_unlock_ctx(struct io_ring_ctx
*ctx
,
41 struct io_ring_ctx
*octx
,
42 unsigned int issue_flags
)
44 if (issue_flags
& IO_URING_F_UNLOCKED
)
45 mutex_unlock(&ctx
->uring_lock
);
46 mutex_unlock(&octx
->uring_lock
);
49 static int io_double_lock_ctx(struct io_ring_ctx
*ctx
,
50 struct io_ring_ctx
*octx
,
51 unsigned int issue_flags
)
54 * To ensure proper ordering between the two ctxs, we can only
55 * attempt a trylock on the target. If that fails and we already have
56 * the source ctx lock, punt to io-wq.
58 if (!(issue_flags
& IO_URING_F_UNLOCKED
)) {
59 if (!mutex_trylock(&octx
->uring_lock
))
64 /* Always grab smallest value ctx first. We know ctx != octx. */
66 mutex_lock(&ctx
->uring_lock
);
67 mutex_lock(&octx
->uring_lock
);
69 mutex_lock(&octx
->uring_lock
);
70 mutex_lock(&ctx
->uring_lock
);
76 static int io_msg_send_fd(struct io_kiocb
*req
, unsigned int issue_flags
)
78 struct io_ring_ctx
*target_ctx
= req
->file
->private_data
;
79 struct io_msg
*msg
= io_kiocb_to_cmd(req
, struct io_msg
);
80 struct io_ring_ctx
*ctx
= req
->ctx
;
81 unsigned long file_ptr
;
82 struct file
*src_file
;
85 if (target_ctx
== ctx
)
88 ret
= io_double_lock_ctx(ctx
, target_ctx
, issue_flags
);
93 if (unlikely(msg
->src_fd
>= ctx
->nr_user_files
))
96 msg
->src_fd
= array_index_nospec(msg
->src_fd
, ctx
->nr_user_files
);
97 file_ptr
= io_fixed_file_slot(&ctx
->file_table
, msg
->src_fd
)->file_ptr
;
98 src_file
= (struct file
*) (file_ptr
& FFS_MASK
);
101 ret
= __io_fixed_fd_install(target_ctx
, src_file
, msg
->dst_fd
);
107 if (msg
->flags
& IORING_MSG_RING_CQE_SKIP
)
111 * If this fails, the target still received the file descriptor but
112 * wasn't notified of the fact. This means that if this request
113 * completes with -EOVERFLOW, then the sender must ensure that a
114 * later IORING_OP_MSG_RING delivers the message.
116 if (!io_post_aux_cqe(target_ctx
, msg
->user_data
, msg
->len
, 0, true))
119 io_double_unlock_ctx(ctx
, target_ctx
, issue_flags
);
123 int io_msg_ring_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
125 struct io_msg
*msg
= io_kiocb_to_cmd(req
, struct io_msg
);
127 if (unlikely(sqe
->buf_index
|| sqe
->personality
))
130 msg
->user_data
= READ_ONCE(sqe
->off
);
131 msg
->len
= READ_ONCE(sqe
->len
);
132 msg
->cmd
= READ_ONCE(sqe
->addr
);
133 msg
->src_fd
= READ_ONCE(sqe
->addr3
);
134 msg
->dst_fd
= READ_ONCE(sqe
->file_index
);
135 msg
->flags
= READ_ONCE(sqe
->msg_ring_flags
);
136 if (msg
->flags
& ~IORING_MSG_RING_CQE_SKIP
)
142 int io_msg_ring(struct io_kiocb
*req
, unsigned int issue_flags
)
144 struct io_msg
*msg
= io_kiocb_to_cmd(req
, struct io_msg
);
148 if (!io_is_uring_fops(req
->file
))
152 case IORING_MSG_DATA
:
153 ret
= io_msg_ring_data(req
);
155 case IORING_MSG_SEND_FD
:
156 ret
= io_msg_send_fd(req
, issue_flags
);
166 io_req_set_res(req
, ret
, 0);
167 /* put file to avoid an attempt to IOPOLL the req */
168 if (!(req
->flags
& REQ_F_FIXED_FILE
))
169 io_put_file(req
->file
);