1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user
*addr
;
47 struct sockaddr __user
*addr
;
50 bool seen_econnaborted
;
56 struct compat_msghdr __user
*umsg_compat
;
57 struct user_msghdr __user
*umsg
;
64 /* initialised and used only by !msg send variants */
68 void __user
*msg_control
;
69 /* used only for send zerocopy */
70 struct io_kiocb
*notif
;
73 static inline bool io_check_multishot(struct io_kiocb
*req
,
74 unsigned int issue_flags
)
77 * When ->locked_cq is set we only allow to post CQEs from the original
78 * task context. Usual request completions will be handled in other
79 * generic paths but multipoll may decide to post extra cqes.
81 return !(issue_flags
& IO_URING_F_IOWQ
) ||
82 !(issue_flags
& IO_URING_F_MULTISHOT
) ||
83 !req
->ctx
->task_complete
;
86 int io_shutdown_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
88 struct io_shutdown
*shutdown
= io_kiocb_to_cmd(req
, struct io_shutdown
);
90 if (unlikely(sqe
->off
|| sqe
->addr
|| sqe
->rw_flags
||
91 sqe
->buf_index
|| sqe
->splice_fd_in
))
94 shutdown
->how
= READ_ONCE(sqe
->len
);
95 req
->flags
|= REQ_F_FORCE_ASYNC
;
99 int io_shutdown(struct io_kiocb
*req
, unsigned int issue_flags
)
101 struct io_shutdown
*shutdown
= io_kiocb_to_cmd(req
, struct io_shutdown
);
105 WARN_ON_ONCE(issue_flags
& IO_URING_F_NONBLOCK
);
107 sock
= sock_from_file(req
->file
);
111 ret
= __sys_shutdown_sock(sock
, shutdown
->how
);
112 io_req_set_res(req
, ret
, 0);
116 static bool io_net_retry(struct socket
*sock
, int flags
)
118 if (!(flags
& MSG_WAITALL
))
120 return sock
->type
== SOCK_STREAM
|| sock
->type
== SOCK_SEQPACKET
;
123 static void io_netmsg_recycle(struct io_kiocb
*req
, unsigned int issue_flags
)
125 struct io_async_msghdr
*hdr
= req
->async_data
;
127 if (!req_has_async_data(req
) || issue_flags
& IO_URING_F_UNLOCKED
)
130 /* Let normal cleanup path reap it if we fail adding to the cache */
131 if (io_alloc_cache_put(&req
->ctx
->netmsg_cache
, &hdr
->cache
)) {
132 req
->async_data
= NULL
;
133 req
->flags
&= ~REQ_F_ASYNC_DATA
;
137 static struct io_async_msghdr
*io_msg_alloc_async(struct io_kiocb
*req
,
138 unsigned int issue_flags
)
140 struct io_ring_ctx
*ctx
= req
->ctx
;
141 struct io_cache_entry
*entry
;
142 struct io_async_msghdr
*hdr
;
144 if (!(issue_flags
& IO_URING_F_UNLOCKED
)) {
145 entry
= io_alloc_cache_get(&ctx
->netmsg_cache
);
147 hdr
= container_of(entry
, struct io_async_msghdr
, cache
);
148 hdr
->free_iov
= NULL
;
149 req
->flags
|= REQ_F_ASYNC_DATA
;
150 req
->async_data
= hdr
;
155 if (!io_alloc_async_data(req
)) {
156 hdr
= req
->async_data
;
157 hdr
->free_iov
= NULL
;
163 static inline struct io_async_msghdr
*io_msg_alloc_async_prep(struct io_kiocb
*req
)
165 /* ->prep_async is always called from the submission context */
166 return io_msg_alloc_async(req
, 0);
169 static int io_setup_async_msg(struct io_kiocb
*req
,
170 struct io_async_msghdr
*kmsg
,
171 unsigned int issue_flags
)
173 struct io_async_msghdr
*async_msg
;
175 if (req_has_async_data(req
))
177 async_msg
= io_msg_alloc_async(req
, issue_flags
);
179 kfree(kmsg
->free_iov
);
182 req
->flags
|= REQ_F_NEED_CLEANUP
;
183 memcpy(async_msg
, kmsg
, sizeof(*kmsg
));
184 if (async_msg
->msg
.msg_name
)
185 async_msg
->msg
.msg_name
= &async_msg
->addr
;
186 /* if were using fast_iov, set it to the new one */
187 if (iter_is_iovec(&kmsg
->msg
.msg_iter
) && !kmsg
->free_iov
) {
188 size_t fast_idx
= iter_iov(&kmsg
->msg
.msg_iter
) - kmsg
->fast_iov
;
189 async_msg
->msg
.msg_iter
.__iov
= &async_msg
->fast_iov
[fast_idx
];
195 static int io_sendmsg_copy_hdr(struct io_kiocb
*req
,
196 struct io_async_msghdr
*iomsg
)
198 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
201 iomsg
->msg
.msg_name
= &iomsg
->addr
;
202 iomsg
->free_iov
= iomsg
->fast_iov
;
203 ret
= sendmsg_copy_msghdr(&iomsg
->msg
, sr
->umsg
, sr
->msg_flags
,
205 /* save msg_control as sys_sendmsg() overwrites it */
206 sr
->msg_control
= iomsg
->msg
.msg_control_user
;
210 int io_send_prep_async(struct io_kiocb
*req
)
212 struct io_sr_msg
*zc
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
213 struct io_async_msghdr
*io
;
216 if (!zc
->addr
|| req_has_async_data(req
))
218 io
= io_msg_alloc_async_prep(req
);
221 ret
= move_addr_to_kernel(zc
->addr
, zc
->addr_len
, &io
->addr
);
225 static int io_setup_async_addr(struct io_kiocb
*req
,
226 struct sockaddr_storage
*addr_storage
,
227 unsigned int issue_flags
)
229 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
230 struct io_async_msghdr
*io
;
232 if (!sr
->addr
|| req_has_async_data(req
))
234 io
= io_msg_alloc_async(req
, issue_flags
);
237 memcpy(&io
->addr
, addr_storage
, sizeof(io
->addr
));
241 int io_sendmsg_prep_async(struct io_kiocb
*req
)
245 if (!io_msg_alloc_async_prep(req
))
247 ret
= io_sendmsg_copy_hdr(req
, req
->async_data
);
249 req
->flags
|= REQ_F_NEED_CLEANUP
;
253 void io_sendmsg_recvmsg_cleanup(struct io_kiocb
*req
)
255 struct io_async_msghdr
*io
= req
->async_data
;
260 int io_sendmsg_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
262 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
264 if (req
->opcode
== IORING_OP_SEND
) {
265 if (READ_ONCE(sqe
->__pad3
[0]))
267 sr
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
268 sr
->addr_len
= READ_ONCE(sqe
->addr_len
);
269 } else if (sqe
->addr2
|| sqe
->file_index
) {
273 sr
->umsg
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
274 sr
->len
= READ_ONCE(sqe
->len
);
275 sr
->flags
= READ_ONCE(sqe
->ioprio
);
276 if (sr
->flags
& ~IORING_RECVSEND_POLL_FIRST
)
278 sr
->msg_flags
= READ_ONCE(sqe
->msg_flags
) | MSG_NOSIGNAL
;
279 if (sr
->msg_flags
& MSG_DONTWAIT
)
280 req
->flags
|= REQ_F_NOWAIT
;
283 if (req
->ctx
->compat
)
284 sr
->msg_flags
|= MSG_CMSG_COMPAT
;
290 int io_sendmsg(struct io_kiocb
*req
, unsigned int issue_flags
)
292 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
293 struct io_async_msghdr iomsg
, *kmsg
;
299 sock
= sock_from_file(req
->file
);
303 if (req_has_async_data(req
)) {
304 kmsg
= req
->async_data
;
305 kmsg
->msg
.msg_control_user
= sr
->msg_control
;
307 ret
= io_sendmsg_copy_hdr(req
, &iomsg
);
313 if (!(req
->flags
& REQ_F_POLLED
) &&
314 (sr
->flags
& IORING_RECVSEND_POLL_FIRST
))
315 return io_setup_async_msg(req
, kmsg
, issue_flags
);
317 flags
= sr
->msg_flags
;
318 if (issue_flags
& IO_URING_F_NONBLOCK
)
319 flags
|= MSG_DONTWAIT
;
320 if (flags
& MSG_WAITALL
)
321 min_ret
= iov_iter_count(&kmsg
->msg
.msg_iter
);
323 ret
= __sys_sendmsg_sock(sock
, &kmsg
->msg
, flags
);
326 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_NONBLOCK
))
327 return io_setup_async_msg(req
, kmsg
, issue_flags
);
328 if (ret
> 0 && io_net_retry(sock
, flags
)) {
329 kmsg
->msg
.msg_controllen
= 0;
330 kmsg
->msg
.msg_control
= NULL
;
332 req
->flags
|= REQ_F_PARTIAL_IO
;
333 return io_setup_async_msg(req
, kmsg
, issue_flags
);
335 if (ret
== -ERESTARTSYS
)
339 /* fast path, check for non-NULL to avoid function call */
341 kfree(kmsg
->free_iov
);
342 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
343 io_netmsg_recycle(req
, issue_flags
);
346 else if (sr
->done_io
)
348 io_req_set_res(req
, ret
, 0);
352 int io_send(struct io_kiocb
*req
, unsigned int issue_flags
)
354 struct sockaddr_storage __address
;
355 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
363 msg
.msg_control
= NULL
;
364 msg
.msg_controllen
= 0;
369 if (req_has_async_data(req
)) {
370 struct io_async_msghdr
*io
= req
->async_data
;
372 msg
.msg_name
= &io
->addr
;
374 ret
= move_addr_to_kernel(sr
->addr
, sr
->addr_len
, &__address
);
375 if (unlikely(ret
< 0))
377 msg
.msg_name
= (struct sockaddr
*)&__address
;
379 msg
.msg_namelen
= sr
->addr_len
;
382 if (!(req
->flags
& REQ_F_POLLED
) &&
383 (sr
->flags
& IORING_RECVSEND_POLL_FIRST
))
384 return io_setup_async_addr(req
, &__address
, issue_flags
);
386 sock
= sock_from_file(req
->file
);
390 ret
= import_ubuf(ITER_SOURCE
, sr
->buf
, sr
->len
, &msg
.msg_iter
);
394 flags
= sr
->msg_flags
;
395 if (issue_flags
& IO_URING_F_NONBLOCK
)
396 flags
|= MSG_DONTWAIT
;
397 if (flags
& MSG_WAITALL
)
398 min_ret
= iov_iter_count(&msg
.msg_iter
);
400 flags
&= ~MSG_INTERNAL_SENDMSG_FLAGS
;
401 msg
.msg_flags
= flags
;
402 ret
= sock_sendmsg(sock
, &msg
);
404 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_NONBLOCK
))
405 return io_setup_async_addr(req
, &__address
, issue_flags
);
407 if (ret
> 0 && io_net_retry(sock
, flags
)) {
411 req
->flags
|= REQ_F_PARTIAL_IO
;
412 return io_setup_async_addr(req
, &__address
, issue_flags
);
414 if (ret
== -ERESTARTSYS
)
420 else if (sr
->done_io
)
422 io_req_set_res(req
, ret
, 0);
426 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr
*iomsg
)
430 if (iomsg
->namelen
< 0)
432 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out
),
433 iomsg
->namelen
, &hdr
))
435 if (check_add_overflow(hdr
, (int)iomsg
->controllen
, &hdr
))
441 static int __io_recvmsg_copy_hdr(struct io_kiocb
*req
,
442 struct io_async_msghdr
*iomsg
)
444 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
445 struct user_msghdr msg
;
448 if (copy_from_user(&msg
, sr
->umsg
, sizeof(*sr
->umsg
)))
451 ret
= __copy_msghdr(&iomsg
->msg
, &msg
, &iomsg
->uaddr
);
455 if (req
->flags
& REQ_F_BUFFER_SELECT
) {
456 if (msg
.msg_iovlen
== 0) {
457 sr
->len
= iomsg
->fast_iov
[0].iov_len
= 0;
458 iomsg
->fast_iov
[0].iov_base
= NULL
;
459 iomsg
->free_iov
= NULL
;
460 } else if (msg
.msg_iovlen
> 1) {
463 if (copy_from_user(iomsg
->fast_iov
, msg
.msg_iov
, sizeof(*msg
.msg_iov
)))
465 sr
->len
= iomsg
->fast_iov
[0].iov_len
;
466 iomsg
->free_iov
= NULL
;
469 if (req
->flags
& REQ_F_APOLL_MULTISHOT
) {
470 iomsg
->namelen
= msg
.msg_namelen
;
471 iomsg
->controllen
= msg
.msg_controllen
;
472 if (io_recvmsg_multishot_overflow(iomsg
))
476 iomsg
->free_iov
= iomsg
->fast_iov
;
477 ret
= __import_iovec(ITER_DEST
, msg
.msg_iov
, msg
.msg_iovlen
, UIO_FASTIOV
,
478 &iomsg
->free_iov
, &iomsg
->msg
.msg_iter
,
488 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb
*req
,
489 struct io_async_msghdr
*iomsg
)
491 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
492 struct compat_msghdr msg
;
493 struct compat_iovec __user
*uiov
;
496 if (copy_from_user(&msg
, sr
->umsg_compat
, sizeof(msg
)))
499 ret
= __get_compat_msghdr(&iomsg
->msg
, &msg
, &iomsg
->uaddr
);
503 uiov
= compat_ptr(msg
.msg_iov
);
504 if (req
->flags
& REQ_F_BUFFER_SELECT
) {
507 iomsg
->free_iov
= NULL
;
508 if (msg
.msg_iovlen
== 0) {
510 } else if (msg
.msg_iovlen
> 1) {
513 if (!access_ok(uiov
, sizeof(*uiov
)))
515 if (__get_user(clen
, &uiov
->iov_len
))
522 if (req
->flags
& REQ_F_APOLL_MULTISHOT
) {
523 iomsg
->namelen
= msg
.msg_namelen
;
524 iomsg
->controllen
= msg
.msg_controllen
;
525 if (io_recvmsg_multishot_overflow(iomsg
))
529 iomsg
->free_iov
= iomsg
->fast_iov
;
530 ret
= __import_iovec(ITER_DEST
, (struct iovec __user
*)uiov
, msg
.msg_iovlen
,
531 UIO_FASTIOV
, &iomsg
->free_iov
,
532 &iomsg
->msg
.msg_iter
, true);
541 static int io_recvmsg_copy_hdr(struct io_kiocb
*req
,
542 struct io_async_msghdr
*iomsg
)
544 iomsg
->msg
.msg_name
= &iomsg
->addr
;
547 if (req
->ctx
->compat
)
548 return __io_compat_recvmsg_copy_hdr(req
, iomsg
);
551 return __io_recvmsg_copy_hdr(req
, iomsg
);
554 int io_recvmsg_prep_async(struct io_kiocb
*req
)
558 if (!io_msg_alloc_async_prep(req
))
560 ret
= io_recvmsg_copy_hdr(req
, req
->async_data
);
562 req
->flags
|= REQ_F_NEED_CLEANUP
;
566 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
568 int io_recvmsg_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
570 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
572 if (unlikely(sqe
->file_index
|| sqe
->addr2
))
575 sr
->umsg
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
576 sr
->len
= READ_ONCE(sqe
->len
);
577 sr
->flags
= READ_ONCE(sqe
->ioprio
);
578 if (sr
->flags
& ~(RECVMSG_FLAGS
))
580 sr
->msg_flags
= READ_ONCE(sqe
->msg_flags
);
581 if (sr
->msg_flags
& MSG_DONTWAIT
)
582 req
->flags
|= REQ_F_NOWAIT
;
583 if (sr
->msg_flags
& MSG_ERRQUEUE
)
584 req
->flags
|= REQ_F_CLEAR_POLLIN
;
585 if (sr
->flags
& IORING_RECV_MULTISHOT
) {
586 if (!(req
->flags
& REQ_F_BUFFER_SELECT
))
588 if (sr
->msg_flags
& MSG_WAITALL
)
590 if (req
->opcode
== IORING_OP_RECV
&& sr
->len
)
592 req
->flags
|= REQ_F_APOLL_MULTISHOT
;
594 * Store the buffer group for this multishot receive separately,
595 * as if we end up doing an io-wq based issue that selects a
596 * buffer, it has to be committed immediately and that will
597 * clear ->buf_list. This means we lose the link to the buffer
598 * list, and the eventual buffer put on completion then cannot
601 sr
->buf_group
= req
->buf_index
;
605 if (req
->ctx
->compat
)
606 sr
->msg_flags
|= MSG_CMSG_COMPAT
;
612 static inline void io_recv_prep_retry(struct io_kiocb
*req
)
614 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
617 sr
->len
= 0; /* get from the provided buffer */
618 req
->buf_index
= sr
->buf_group
;
622 * Finishes io_recv and io_recvmsg.
624 * Returns true if it is actually finished, or false if it should run
625 * again (for multishot).
627 static inline bool io_recv_finish(struct io_kiocb
*req
, int *ret
,
628 struct msghdr
*msg
, bool mshot_finished
,
629 unsigned issue_flags
)
633 cflags
= io_put_kbuf(req
, issue_flags
);
634 if (msg
->msg_inq
&& msg
->msg_inq
!= -1)
635 cflags
|= IORING_CQE_F_SOCK_NONEMPTY
;
637 if (!(req
->flags
& REQ_F_APOLL_MULTISHOT
)) {
638 io_req_set_res(req
, *ret
, cflags
);
643 if (!mshot_finished
) {
644 if (io_fill_cqe_req_aux(req
, issue_flags
& IO_URING_F_COMPLETE_DEFER
,
645 *ret
, cflags
| IORING_CQE_F_MORE
)) {
646 io_recv_prep_retry(req
);
647 /* Known not-empty or unknown state, retry */
648 if (cflags
& IORING_CQE_F_SOCK_NONEMPTY
||
651 if (issue_flags
& IO_URING_F_MULTISHOT
)
652 *ret
= IOU_ISSUE_SKIP_COMPLETE
;
657 /* Otherwise stop multishot but use the current result. */
660 io_req_set_res(req
, *ret
, cflags
);
662 if (issue_flags
& IO_URING_F_MULTISHOT
)
663 *ret
= IOU_STOP_MULTISHOT
;
669 static int io_recvmsg_prep_multishot(struct io_async_msghdr
*kmsg
,
670 struct io_sr_msg
*sr
, void __user
**buf
,
673 unsigned long ubuf
= (unsigned long) *buf
;
676 hdr
= sizeof(struct io_uring_recvmsg_out
) + kmsg
->namelen
+
681 if (kmsg
->controllen
) {
682 unsigned long control
= ubuf
+ hdr
- kmsg
->controllen
;
684 kmsg
->msg
.msg_control_user
= (void __user
*) control
;
685 kmsg
->msg
.msg_controllen
= kmsg
->controllen
;
688 sr
->buf
= *buf
; /* stash for later copy */
689 *buf
= (void __user
*) (ubuf
+ hdr
);
690 kmsg
->payloadlen
= *len
= *len
- hdr
;
694 struct io_recvmsg_multishot_hdr
{
695 struct io_uring_recvmsg_out msg
;
696 struct sockaddr_storage addr
;
699 static int io_recvmsg_multishot(struct socket
*sock
, struct io_sr_msg
*io
,
700 struct io_async_msghdr
*kmsg
,
701 unsigned int flags
, bool *finished
)
705 struct io_recvmsg_multishot_hdr hdr
;
708 kmsg
->msg
.msg_name
= &hdr
.addr
;
709 kmsg
->msg
.msg_flags
= flags
& (MSG_CMSG_CLOEXEC
|MSG_CMSG_COMPAT
);
710 kmsg
->msg
.msg_namelen
= 0;
712 if (sock
->file
->f_flags
& O_NONBLOCK
)
713 flags
|= MSG_DONTWAIT
;
715 err
= sock_recvmsg(sock
, &kmsg
->msg
, flags
);
716 *finished
= err
<= 0;
720 hdr
.msg
= (struct io_uring_recvmsg_out
) {
721 .controllen
= kmsg
->controllen
- kmsg
->msg
.msg_controllen
,
722 .flags
= kmsg
->msg
.msg_flags
& ~MSG_CMSG_COMPAT
725 hdr
.msg
.payloadlen
= err
;
726 if (err
> kmsg
->payloadlen
)
727 err
= kmsg
->payloadlen
;
729 copy_len
= sizeof(struct io_uring_recvmsg_out
);
730 if (kmsg
->msg
.msg_namelen
> kmsg
->namelen
)
731 copy_len
+= kmsg
->namelen
;
733 copy_len
+= kmsg
->msg
.msg_namelen
;
736 * "fromlen shall refer to the value before truncation.."
739 hdr
.msg
.namelen
= kmsg
->msg
.msg_namelen
;
741 /* ensure that there is no gap between hdr and sockaddr_storage */
742 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr
, addr
) !=
743 sizeof(struct io_uring_recvmsg_out
));
744 if (copy_to_user(io
->buf
, &hdr
, copy_len
)) {
749 return sizeof(struct io_uring_recvmsg_out
) + kmsg
->namelen
+
750 kmsg
->controllen
+ err
;
753 int io_recvmsg(struct io_kiocb
*req
, unsigned int issue_flags
)
755 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
756 struct io_async_msghdr iomsg
, *kmsg
;
759 int ret
, min_ret
= 0;
760 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
761 bool mshot_finished
= true;
763 sock
= sock_from_file(req
->file
);
767 if (req_has_async_data(req
)) {
768 kmsg
= req
->async_data
;
770 ret
= io_recvmsg_copy_hdr(req
, &iomsg
);
776 if (!(req
->flags
& REQ_F_POLLED
) &&
777 (sr
->flags
& IORING_RECVSEND_POLL_FIRST
))
778 return io_setup_async_msg(req
, kmsg
, issue_flags
);
780 if (!io_check_multishot(req
, issue_flags
))
781 return io_setup_async_msg(req
, kmsg
, issue_flags
);
784 if (io_do_buffer_select(req
)) {
786 size_t len
= sr
->len
;
788 buf
= io_buffer_select(req
, &len
, issue_flags
);
792 if (req
->flags
& REQ_F_APOLL_MULTISHOT
) {
793 ret
= io_recvmsg_prep_multishot(kmsg
, sr
, &buf
, &len
);
795 io_kbuf_recycle(req
, issue_flags
);
800 iov_iter_ubuf(&kmsg
->msg
.msg_iter
, ITER_DEST
, buf
, len
);
803 flags
= sr
->msg_flags
;
805 flags
|= MSG_DONTWAIT
;
807 kmsg
->msg
.msg_get_inq
= 1;
808 kmsg
->msg
.msg_inq
= -1;
809 if (req
->flags
& REQ_F_APOLL_MULTISHOT
) {
810 ret
= io_recvmsg_multishot(sock
, sr
, kmsg
, flags
,
813 /* disable partial retry for recvmsg with cmsg attached */
814 if (flags
& MSG_WAITALL
&& !kmsg
->msg
.msg_controllen
)
815 min_ret
= iov_iter_count(&kmsg
->msg
.msg_iter
);
817 ret
= __sys_recvmsg_sock(sock
, &kmsg
->msg
, sr
->umsg
,
822 if (ret
== -EAGAIN
&& force_nonblock
) {
823 ret
= io_setup_async_msg(req
, kmsg
, issue_flags
);
824 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_MULTISHOT
)) {
825 io_kbuf_recycle(req
, issue_flags
);
826 return IOU_ISSUE_SKIP_COMPLETE
;
830 if (ret
> 0 && io_net_retry(sock
, flags
)) {
832 req
->flags
|= REQ_F_PARTIAL_IO
;
833 return io_setup_async_msg(req
, kmsg
, issue_flags
);
835 if (ret
== -ERESTARTSYS
)
838 } else if ((flags
& MSG_WAITALL
) && (kmsg
->msg
.msg_flags
& (MSG_TRUNC
| MSG_CTRUNC
))) {
844 else if (sr
->done_io
)
847 io_kbuf_recycle(req
, issue_flags
);
849 if (!io_recv_finish(req
, &ret
, &kmsg
->msg
, mshot_finished
, issue_flags
))
850 goto retry_multishot
;
852 if (mshot_finished
) {
853 /* fast path, check for non-NULL to avoid function call */
855 kfree(kmsg
->free_iov
);
856 io_netmsg_recycle(req
, issue_flags
);
857 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
863 int io_recv(struct io_kiocb
*req
, unsigned int issue_flags
)
865 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
869 int ret
, min_ret
= 0;
870 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
871 size_t len
= sr
->len
;
873 if (!(req
->flags
& REQ_F_POLLED
) &&
874 (sr
->flags
& IORING_RECVSEND_POLL_FIRST
))
877 if (!io_check_multishot(req
, issue_flags
))
880 sock
= sock_from_file(req
->file
);
886 msg
.msg_control
= NULL
;
888 msg
.msg_controllen
= 0;
893 if (io_do_buffer_select(req
)) {
896 buf
= io_buffer_select(req
, &len
, issue_flags
);
902 ret
= import_ubuf(ITER_DEST
, sr
->buf
, len
, &msg
.msg_iter
);
909 flags
= sr
->msg_flags
;
911 flags
|= MSG_DONTWAIT
;
912 if (flags
& MSG_WAITALL
)
913 min_ret
= iov_iter_count(&msg
.msg_iter
);
915 ret
= sock_recvmsg(sock
, &msg
, flags
);
917 if (ret
== -EAGAIN
&& force_nonblock
) {
918 if (issue_flags
& IO_URING_F_MULTISHOT
) {
919 io_kbuf_recycle(req
, issue_flags
);
920 return IOU_ISSUE_SKIP_COMPLETE
;
925 if (ret
> 0 && io_net_retry(sock
, flags
)) {
929 req
->flags
|= REQ_F_PARTIAL_IO
;
932 if (ret
== -ERESTARTSYS
)
935 } else if ((flags
& MSG_WAITALL
) && (msg
.msg_flags
& (MSG_TRUNC
| MSG_CTRUNC
))) {
942 else if (sr
->done_io
)
945 io_kbuf_recycle(req
, issue_flags
);
947 if (!io_recv_finish(req
, &ret
, &msg
, ret
<= 0, issue_flags
))
948 goto retry_multishot
;
953 void io_send_zc_cleanup(struct io_kiocb
*req
)
955 struct io_sr_msg
*zc
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
956 struct io_async_msghdr
*io
;
958 if (req_has_async_data(req
)) {
959 io
= req
->async_data
;
960 /* might be ->fast_iov if *msg_copy_hdr failed */
961 if (io
->free_iov
!= io
->fast_iov
)
965 io_notif_flush(zc
->notif
);
970 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
971 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
973 int io_send_zc_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
975 struct io_sr_msg
*zc
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
976 struct io_ring_ctx
*ctx
= req
->ctx
;
977 struct io_kiocb
*notif
;
979 if (unlikely(READ_ONCE(sqe
->__pad2
[0]) || READ_ONCE(sqe
->addr3
)))
981 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
982 if (req
->flags
& REQ_F_CQE_SKIP
)
985 notif
= zc
->notif
= io_alloc_notif(ctx
);
988 notif
->cqe
.user_data
= req
->cqe
.user_data
;
990 notif
->cqe
.flags
= IORING_CQE_F_NOTIF
;
991 req
->flags
|= REQ_F_NEED_CLEANUP
;
993 zc
->flags
= READ_ONCE(sqe
->ioprio
);
994 if (unlikely(zc
->flags
& ~IO_ZC_FLAGS_COMMON
)) {
995 if (zc
->flags
& ~IO_ZC_FLAGS_VALID
)
997 if (zc
->flags
& IORING_SEND_ZC_REPORT_USAGE
) {
998 io_notif_set_extended(notif
);
999 io_notif_to_data(notif
)->zc_report
= true;
1003 if (zc
->flags
& IORING_RECVSEND_FIXED_BUF
) {
1004 unsigned idx
= READ_ONCE(sqe
->buf_index
);
1006 if (unlikely(idx
>= ctx
->nr_user_bufs
))
1008 idx
= array_index_nospec(idx
, ctx
->nr_user_bufs
);
1009 req
->imu
= READ_ONCE(ctx
->user_bufs
[idx
]);
1010 io_req_set_rsrc_node(notif
, ctx
, 0);
1013 if (req
->opcode
== IORING_OP_SEND_ZC
) {
1014 if (READ_ONCE(sqe
->__pad3
[0]))
1016 zc
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
1017 zc
->addr_len
= READ_ONCE(sqe
->addr_len
);
1019 if (unlikely(sqe
->addr2
|| sqe
->file_index
))
1021 if (unlikely(zc
->flags
& IORING_RECVSEND_FIXED_BUF
))
1025 zc
->buf
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
1026 zc
->len
= READ_ONCE(sqe
->len
);
1027 zc
->msg_flags
= READ_ONCE(sqe
->msg_flags
) | MSG_NOSIGNAL
;
1028 if (zc
->msg_flags
& MSG_DONTWAIT
)
1029 req
->flags
|= REQ_F_NOWAIT
;
1033 #ifdef CONFIG_COMPAT
1034 if (req
->ctx
->compat
)
1035 zc
->msg_flags
|= MSG_CMSG_COMPAT
;
1040 static int io_sg_from_iter_iovec(struct sock
*sk
, struct sk_buff
*skb
,
1041 struct iov_iter
*from
, size_t length
)
1043 skb_zcopy_downgrade_managed(skb
);
1044 return __zerocopy_sg_from_iter(NULL
, sk
, skb
, from
, length
);
1047 static int io_sg_from_iter(struct sock
*sk
, struct sk_buff
*skb
,
1048 struct iov_iter
*from
, size_t length
)
1050 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1051 int frag
= shinfo
->nr_frags
;
1053 struct bvec_iter bi
;
1055 unsigned long truesize
= 0;
1058 shinfo
->flags
|= SKBFL_MANAGED_FRAG_REFS
;
1059 else if (unlikely(!skb_zcopy_managed(skb
)))
1060 return __zerocopy_sg_from_iter(NULL
, sk
, skb
, from
, length
);
1062 bi
.bi_size
= min(from
->count
, length
);
1063 bi
.bi_bvec_done
= from
->iov_offset
;
1066 while (bi
.bi_size
&& frag
< MAX_SKB_FRAGS
) {
1067 struct bio_vec v
= mp_bvec_iter_bvec(from
->bvec
, bi
);
1070 truesize
+= PAGE_ALIGN(v
.bv_len
+ v
.bv_offset
);
1071 __skb_fill_page_desc_noacc(shinfo
, frag
++, v
.bv_page
,
1072 v
.bv_offset
, v
.bv_len
);
1073 bvec_iter_advance_single(from
->bvec
, &bi
, v
.bv_len
);
1078 shinfo
->nr_frags
= frag
;
1079 from
->bvec
+= bi
.bi_idx
;
1080 from
->nr_segs
-= bi
.bi_idx
;
1081 from
->count
-= copied
;
1082 from
->iov_offset
= bi
.bi_bvec_done
;
1084 skb
->data_len
+= copied
;
1086 skb
->truesize
+= truesize
;
1088 if (sk
&& sk
->sk_type
== SOCK_STREAM
) {
1089 sk_wmem_queued_add(sk
, truesize
);
1090 if (!skb_zcopy_pure(skb
))
1091 sk_mem_charge(sk
, truesize
);
1093 refcount_add(truesize
, &skb
->sk
->sk_wmem_alloc
);
1098 int io_send_zc(struct io_kiocb
*req
, unsigned int issue_flags
)
1100 struct sockaddr_storage __address
;
1101 struct io_sr_msg
*zc
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
1103 struct socket
*sock
;
1105 int ret
, min_ret
= 0;
1107 sock
= sock_from_file(req
->file
);
1108 if (unlikely(!sock
))
1110 if (!test_bit(SOCK_SUPPORT_ZC
, &sock
->flags
))
1113 msg
.msg_name
= NULL
;
1114 msg
.msg_control
= NULL
;
1115 msg
.msg_controllen
= 0;
1116 msg
.msg_namelen
= 0;
1119 if (req_has_async_data(req
)) {
1120 struct io_async_msghdr
*io
= req
->async_data
;
1122 msg
.msg_name
= &io
->addr
;
1124 ret
= move_addr_to_kernel(zc
->addr
, zc
->addr_len
, &__address
);
1125 if (unlikely(ret
< 0))
1127 msg
.msg_name
= (struct sockaddr
*)&__address
;
1129 msg
.msg_namelen
= zc
->addr_len
;
1132 if (!(req
->flags
& REQ_F_POLLED
) &&
1133 (zc
->flags
& IORING_RECVSEND_POLL_FIRST
))
1134 return io_setup_async_addr(req
, &__address
, issue_flags
);
1136 if (zc
->flags
& IORING_RECVSEND_FIXED_BUF
) {
1137 ret
= io_import_fixed(ITER_SOURCE
, &msg
.msg_iter
, req
->imu
,
1138 (u64
)(uintptr_t)zc
->buf
, zc
->len
);
1141 msg
.sg_from_iter
= io_sg_from_iter
;
1143 io_notif_set_extended(zc
->notif
);
1144 ret
= import_ubuf(ITER_SOURCE
, zc
->buf
, zc
->len
, &msg
.msg_iter
);
1147 ret
= io_notif_account_mem(zc
->notif
, zc
->len
);
1150 msg
.sg_from_iter
= io_sg_from_iter_iovec
;
1153 msg_flags
= zc
->msg_flags
| MSG_ZEROCOPY
;
1154 if (issue_flags
& IO_URING_F_NONBLOCK
)
1155 msg_flags
|= MSG_DONTWAIT
;
1156 if (msg_flags
& MSG_WAITALL
)
1157 min_ret
= iov_iter_count(&msg
.msg_iter
);
1158 msg_flags
&= ~MSG_INTERNAL_SENDMSG_FLAGS
;
1160 msg
.msg_flags
= msg_flags
;
1161 msg
.msg_ubuf
= &io_notif_to_data(zc
->notif
)->uarg
;
1162 ret
= sock_sendmsg(sock
, &msg
);
1164 if (unlikely(ret
< min_ret
)) {
1165 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_NONBLOCK
))
1166 return io_setup_async_addr(req
, &__address
, issue_flags
);
1168 if (ret
> 0 && io_net_retry(sock
, msg
.msg_flags
)) {
1172 req
->flags
|= REQ_F_PARTIAL_IO
;
1173 return io_setup_async_addr(req
, &__address
, issue_flags
);
1175 if (ret
== -ERESTARTSYS
)
1182 else if (zc
->done_io
)
1186 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1187 * flushing notif to io_send_zc_cleanup()
1189 if (!(issue_flags
& IO_URING_F_UNLOCKED
)) {
1190 io_notif_flush(zc
->notif
);
1191 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
1193 io_req_set_res(req
, ret
, IORING_CQE_F_MORE
);
1197 int io_sendmsg_zc(struct io_kiocb
*req
, unsigned int issue_flags
)
1199 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
1200 struct io_async_msghdr iomsg
, *kmsg
;
1201 struct socket
*sock
;
1203 int ret
, min_ret
= 0;
1205 io_notif_set_extended(sr
->notif
);
1207 sock
= sock_from_file(req
->file
);
1208 if (unlikely(!sock
))
1210 if (!test_bit(SOCK_SUPPORT_ZC
, &sock
->flags
))
1213 if (req_has_async_data(req
)) {
1214 kmsg
= req
->async_data
;
1216 ret
= io_sendmsg_copy_hdr(req
, &iomsg
);
1222 if (!(req
->flags
& REQ_F_POLLED
) &&
1223 (sr
->flags
& IORING_RECVSEND_POLL_FIRST
))
1224 return io_setup_async_msg(req
, kmsg
, issue_flags
);
1226 flags
= sr
->msg_flags
| MSG_ZEROCOPY
;
1227 if (issue_flags
& IO_URING_F_NONBLOCK
)
1228 flags
|= MSG_DONTWAIT
;
1229 if (flags
& MSG_WAITALL
)
1230 min_ret
= iov_iter_count(&kmsg
->msg
.msg_iter
);
1232 kmsg
->msg
.msg_ubuf
= &io_notif_to_data(sr
->notif
)->uarg
;
1233 kmsg
->msg
.sg_from_iter
= io_sg_from_iter_iovec
;
1234 ret
= __sys_sendmsg_sock(sock
, &kmsg
->msg
, flags
);
1236 if (unlikely(ret
< min_ret
)) {
1237 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_NONBLOCK
))
1238 return io_setup_async_msg(req
, kmsg
, issue_flags
);
1240 if (ret
> 0 && io_net_retry(sock
, flags
)) {
1242 req
->flags
|= REQ_F_PARTIAL_IO
;
1243 return io_setup_async_msg(req
, kmsg
, issue_flags
);
1245 if (ret
== -ERESTARTSYS
)
1249 /* fast path, check for non-NULL to avoid function call */
1250 if (kmsg
->free_iov
) {
1251 kfree(kmsg
->free_iov
);
1252 kmsg
->free_iov
= NULL
;
1255 io_netmsg_recycle(req
, issue_flags
);
1258 else if (sr
->done_io
)
1262 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1263 * flushing notif to io_send_zc_cleanup()
1265 if (!(issue_flags
& IO_URING_F_UNLOCKED
)) {
1266 io_notif_flush(sr
->notif
);
1267 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
1269 io_req_set_res(req
, ret
, IORING_CQE_F_MORE
);
1273 void io_sendrecv_fail(struct io_kiocb
*req
)
1275 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
1277 if (req
->flags
& REQ_F_PARTIAL_IO
)
1278 req
->cqe
.res
= sr
->done_io
;
1280 if ((req
->flags
& REQ_F_NEED_CLEANUP
) &&
1281 (req
->opcode
== IORING_OP_SEND_ZC
|| req
->opcode
== IORING_OP_SENDMSG_ZC
))
1282 req
->cqe
.flags
|= IORING_CQE_F_MORE
;
1285 int io_accept_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
1287 struct io_accept
*accept
= io_kiocb_to_cmd(req
, struct io_accept
);
1290 if (sqe
->len
|| sqe
->buf_index
)
1293 accept
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
1294 accept
->addr_len
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
1295 accept
->flags
= READ_ONCE(sqe
->accept_flags
);
1296 accept
->nofile
= rlimit(RLIMIT_NOFILE
);
1297 flags
= READ_ONCE(sqe
->ioprio
);
1298 if (flags
& ~IORING_ACCEPT_MULTISHOT
)
1301 accept
->file_slot
= READ_ONCE(sqe
->file_index
);
1302 if (accept
->file_slot
) {
1303 if (accept
->flags
& SOCK_CLOEXEC
)
1305 if (flags
& IORING_ACCEPT_MULTISHOT
&&
1306 accept
->file_slot
!= IORING_FILE_INDEX_ALLOC
)
1309 if (accept
->flags
& ~(SOCK_CLOEXEC
| SOCK_NONBLOCK
))
1311 if (SOCK_NONBLOCK
!= O_NONBLOCK
&& (accept
->flags
& SOCK_NONBLOCK
))
1312 accept
->flags
= (accept
->flags
& ~SOCK_NONBLOCK
) | O_NONBLOCK
;
1313 if (flags
& IORING_ACCEPT_MULTISHOT
)
1314 req
->flags
|= REQ_F_APOLL_MULTISHOT
;
1318 int io_accept(struct io_kiocb
*req
, unsigned int issue_flags
)
1320 struct io_accept
*accept
= io_kiocb_to_cmd(req
, struct io_accept
);
1321 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
1322 unsigned int file_flags
= force_nonblock
? O_NONBLOCK
: 0;
1323 bool fixed
= !!accept
->file_slot
;
1327 if (!io_check_multishot(req
, issue_flags
))
1331 fd
= __get_unused_fd_flags(accept
->flags
, accept
->nofile
);
1332 if (unlikely(fd
< 0))
1335 file
= do_accept(req
->file
, file_flags
, accept
->addr
, accept
->addr_len
,
1340 ret
= PTR_ERR(file
);
1341 if (ret
== -EAGAIN
&& force_nonblock
) {
1343 * if it's multishot and polled, we don't need to
1344 * return EAGAIN to arm the poll infra since it
1345 * has already been done
1347 if (issue_flags
& IO_URING_F_MULTISHOT
)
1348 ret
= IOU_ISSUE_SKIP_COMPLETE
;
1351 if (ret
== -ERESTARTSYS
)
1354 } else if (!fixed
) {
1355 fd_install(fd
, file
);
1358 ret
= io_fixed_fd_install(req
, issue_flags
, file
,
1362 if (!(req
->flags
& REQ_F_APOLL_MULTISHOT
)) {
1363 io_req_set_res(req
, ret
, 0);
1369 if (io_fill_cqe_req_aux(req
, issue_flags
& IO_URING_F_COMPLETE_DEFER
,
1370 ret
, IORING_CQE_F_MORE
))
1376 int io_socket_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
1378 struct io_socket
*sock
= io_kiocb_to_cmd(req
, struct io_socket
);
1380 if (sqe
->addr
|| sqe
->rw_flags
|| sqe
->buf_index
)
1383 sock
->domain
= READ_ONCE(sqe
->fd
);
1384 sock
->type
= READ_ONCE(sqe
->off
);
1385 sock
->protocol
= READ_ONCE(sqe
->len
);
1386 sock
->file_slot
= READ_ONCE(sqe
->file_index
);
1387 sock
->nofile
= rlimit(RLIMIT_NOFILE
);
1389 sock
->flags
= sock
->type
& ~SOCK_TYPE_MASK
;
1390 if (sock
->file_slot
&& (sock
->flags
& SOCK_CLOEXEC
))
1392 if (sock
->flags
& ~(SOCK_CLOEXEC
| SOCK_NONBLOCK
))
1397 int io_socket(struct io_kiocb
*req
, unsigned int issue_flags
)
1399 struct io_socket
*sock
= io_kiocb_to_cmd(req
, struct io_socket
);
1400 bool fixed
= !!sock
->file_slot
;
1405 fd
= __get_unused_fd_flags(sock
->flags
, sock
->nofile
);
1406 if (unlikely(fd
< 0))
1409 file
= __sys_socket_file(sock
->domain
, sock
->type
, sock
->protocol
);
1413 ret
= PTR_ERR(file
);
1414 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_NONBLOCK
))
1416 if (ret
== -ERESTARTSYS
)
1419 } else if (!fixed
) {
1420 fd_install(fd
, file
);
1423 ret
= io_fixed_fd_install(req
, issue_flags
, file
,
1426 io_req_set_res(req
, ret
, 0);
1430 int io_connect_prep_async(struct io_kiocb
*req
)
1432 struct io_async_connect
*io
= req
->async_data
;
1433 struct io_connect
*conn
= io_kiocb_to_cmd(req
, struct io_connect
);
1435 return move_addr_to_kernel(conn
->addr
, conn
->addr_len
, &io
->address
);
1438 int io_connect_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
1440 struct io_connect
*conn
= io_kiocb_to_cmd(req
, struct io_connect
);
1442 if (sqe
->len
|| sqe
->buf_index
|| sqe
->rw_flags
|| sqe
->splice_fd_in
)
1445 conn
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
1446 conn
->addr_len
= READ_ONCE(sqe
->addr2
);
1447 conn
->in_progress
= conn
->seen_econnaborted
= false;
1451 int io_connect(struct io_kiocb
*req
, unsigned int issue_flags
)
1453 struct io_connect
*connect
= io_kiocb_to_cmd(req
, struct io_connect
);
1454 struct io_async_connect __io
, *io
;
1455 unsigned file_flags
;
1457 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
1459 if (connect
->in_progress
) {
1460 struct socket
*socket
;
1463 socket
= sock_from_file(req
->file
);
1465 ret
= sock_error(socket
->sk
);
1469 if (req_has_async_data(req
)) {
1470 io
= req
->async_data
;
1472 ret
= move_addr_to_kernel(connect
->addr
,
1480 file_flags
= force_nonblock
? O_NONBLOCK
: 0;
1482 ret
= __sys_connect_file(req
->file
, &io
->address
,
1483 connect
->addr_len
, file_flags
);
1484 if ((ret
== -EAGAIN
|| ret
== -EINPROGRESS
|| ret
== -ECONNABORTED
)
1485 && force_nonblock
) {
1486 if (ret
== -EINPROGRESS
) {
1487 connect
->in_progress
= true;
1490 if (ret
== -ECONNABORTED
) {
1491 if (connect
->seen_econnaborted
)
1493 connect
->seen_econnaborted
= true;
1495 if (req_has_async_data(req
))
1497 if (io_alloc_async_data(req
)) {
1501 memcpy(req
->async_data
, &__io
, sizeof(__io
));
1504 if (ret
== -ERESTARTSYS
)
1509 io_req_set_res(req
, ret
, 0);
1513 void io_netmsg_cache_free(struct io_cache_entry
*entry
)
1515 kfree(container_of(entry
, struct io_async_msghdr
, cache
));