1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
11 #include <uapi/linux/io_uring.h>
15 #include "alloc_cache.h"
20 #if defined(CONFIG_NET)
28 struct sockaddr __user
*addr
;
47 struct sockaddr __user
*addr
;
50 bool seen_econnaborted
;
56 struct compat_msghdr __user
*umsg_compat
;
57 struct user_msghdr __user
*umsg
;
64 /* initialised and used only by !msg send variants */
68 void __user
*msg_control
;
69 /* used only for send zerocopy */
70 struct io_kiocb
*notif
;
73 static inline bool io_check_multishot(struct io_kiocb
*req
,
74 unsigned int issue_flags
)
77 * When ->locked_cq is set we only allow to post CQEs from the original
78 * task context. Usual request completions will be handled in other
79 * generic paths but multipoll may decide to post extra cqes.
81 return !(issue_flags
& IO_URING_F_IOWQ
) ||
82 !(issue_flags
& IO_URING_F_MULTISHOT
) ||
83 !req
->ctx
->task_complete
;
86 int io_shutdown_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
88 struct io_shutdown
*shutdown
= io_kiocb_to_cmd(req
, struct io_shutdown
);
90 if (unlikely(sqe
->off
|| sqe
->addr
|| sqe
->rw_flags
||
91 sqe
->buf_index
|| sqe
->splice_fd_in
))
94 shutdown
->how
= READ_ONCE(sqe
->len
);
95 req
->flags
|= REQ_F_FORCE_ASYNC
;
99 int io_shutdown(struct io_kiocb
*req
, unsigned int issue_flags
)
101 struct io_shutdown
*shutdown
= io_kiocb_to_cmd(req
, struct io_shutdown
);
105 WARN_ON_ONCE(issue_flags
& IO_URING_F_NONBLOCK
);
107 sock
= sock_from_file(req
->file
);
111 ret
= __sys_shutdown_sock(sock
, shutdown
->how
);
112 io_req_set_res(req
, ret
, 0);
116 static bool io_net_retry(struct socket
*sock
, int flags
)
118 if (!(flags
& MSG_WAITALL
))
120 return sock
->type
== SOCK_STREAM
|| sock
->type
== SOCK_SEQPACKET
;
123 static void io_netmsg_recycle(struct io_kiocb
*req
, unsigned int issue_flags
)
125 struct io_async_msghdr
*hdr
= req
->async_data
;
127 if (!req_has_async_data(req
) || issue_flags
& IO_URING_F_UNLOCKED
)
130 /* Let normal cleanup path reap it if we fail adding to the cache */
131 if (io_alloc_cache_put(&req
->ctx
->netmsg_cache
, &hdr
->cache
)) {
132 req
->async_data
= NULL
;
133 req
->flags
&= ~REQ_F_ASYNC_DATA
;
137 static struct io_async_msghdr
*io_msg_alloc_async(struct io_kiocb
*req
,
138 unsigned int issue_flags
)
140 struct io_ring_ctx
*ctx
= req
->ctx
;
141 struct io_cache_entry
*entry
;
142 struct io_async_msghdr
*hdr
;
144 if (!(issue_flags
& IO_URING_F_UNLOCKED
)) {
145 entry
= io_alloc_cache_get(&ctx
->netmsg_cache
);
147 hdr
= container_of(entry
, struct io_async_msghdr
, cache
);
148 hdr
->free_iov
= NULL
;
149 req
->flags
|= REQ_F_ASYNC_DATA
;
150 req
->async_data
= hdr
;
155 if (!io_alloc_async_data(req
)) {
156 hdr
= req
->async_data
;
157 hdr
->free_iov
= NULL
;
163 static inline struct io_async_msghdr
*io_msg_alloc_async_prep(struct io_kiocb
*req
)
165 /* ->prep_async is always called from the submission context */
166 return io_msg_alloc_async(req
, 0);
169 static int io_setup_async_msg(struct io_kiocb
*req
,
170 struct io_async_msghdr
*kmsg
,
171 unsigned int issue_flags
)
173 struct io_async_msghdr
*async_msg
;
175 if (req_has_async_data(req
))
177 async_msg
= io_msg_alloc_async(req
, issue_flags
);
179 kfree(kmsg
->free_iov
);
182 req
->flags
|= REQ_F_NEED_CLEANUP
;
183 memcpy(async_msg
, kmsg
, sizeof(*kmsg
));
184 if (async_msg
->msg
.msg_name
)
185 async_msg
->msg
.msg_name
= &async_msg
->addr
;
187 if ((req
->flags
& REQ_F_BUFFER_SELECT
) && !async_msg
->msg
.msg_iter
.nr_segs
)
190 /* if were using fast_iov, set it to the new one */
191 if (iter_is_iovec(&kmsg
->msg
.msg_iter
) && !kmsg
->free_iov
) {
192 size_t fast_idx
= iter_iov(&kmsg
->msg
.msg_iter
) - kmsg
->fast_iov
;
193 async_msg
->msg
.msg_iter
.__iov
= &async_msg
->fast_iov
[fast_idx
];
199 static int io_sendmsg_copy_hdr(struct io_kiocb
*req
,
200 struct io_async_msghdr
*iomsg
)
202 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
205 iomsg
->msg
.msg_name
= &iomsg
->addr
;
206 iomsg
->free_iov
= iomsg
->fast_iov
;
207 ret
= sendmsg_copy_msghdr(&iomsg
->msg
, sr
->umsg
, sr
->msg_flags
,
209 /* save msg_control as sys_sendmsg() overwrites it */
210 sr
->msg_control
= iomsg
->msg
.msg_control_user
;
214 int io_send_prep_async(struct io_kiocb
*req
)
216 struct io_sr_msg
*zc
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
217 struct io_async_msghdr
*io
;
220 if (!zc
->addr
|| req_has_async_data(req
))
222 io
= io_msg_alloc_async_prep(req
);
225 ret
= move_addr_to_kernel(zc
->addr
, zc
->addr_len
, &io
->addr
);
229 static int io_setup_async_addr(struct io_kiocb
*req
,
230 struct sockaddr_storage
*addr_storage
,
231 unsigned int issue_flags
)
233 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
234 struct io_async_msghdr
*io
;
236 if (!sr
->addr
|| req_has_async_data(req
))
238 io
= io_msg_alloc_async(req
, issue_flags
);
241 memcpy(&io
->addr
, addr_storage
, sizeof(io
->addr
));
245 int io_sendmsg_prep_async(struct io_kiocb
*req
)
249 if (!io_msg_alloc_async_prep(req
))
251 ret
= io_sendmsg_copy_hdr(req
, req
->async_data
);
253 req
->flags
|= REQ_F_NEED_CLEANUP
;
257 void io_sendmsg_recvmsg_cleanup(struct io_kiocb
*req
)
259 struct io_async_msghdr
*io
= req
->async_data
;
264 int io_sendmsg_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
266 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
268 if (req
->opcode
== IORING_OP_SEND
) {
269 if (READ_ONCE(sqe
->__pad3
[0]))
271 sr
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
272 sr
->addr_len
= READ_ONCE(sqe
->addr_len
);
273 } else if (sqe
->addr2
|| sqe
->file_index
) {
277 sr
->umsg
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
278 sr
->len
= READ_ONCE(sqe
->len
);
279 sr
->flags
= READ_ONCE(sqe
->ioprio
);
280 if (sr
->flags
& ~IORING_RECVSEND_POLL_FIRST
)
282 sr
->msg_flags
= READ_ONCE(sqe
->msg_flags
) | MSG_NOSIGNAL
;
283 if (sr
->msg_flags
& MSG_DONTWAIT
)
284 req
->flags
|= REQ_F_NOWAIT
;
287 if (req
->ctx
->compat
)
288 sr
->msg_flags
|= MSG_CMSG_COMPAT
;
294 int io_sendmsg(struct io_kiocb
*req
, unsigned int issue_flags
)
296 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
297 struct io_async_msghdr iomsg
, *kmsg
;
303 sock
= sock_from_file(req
->file
);
307 if (req_has_async_data(req
)) {
308 kmsg
= req
->async_data
;
309 kmsg
->msg
.msg_control_user
= sr
->msg_control
;
311 ret
= io_sendmsg_copy_hdr(req
, &iomsg
);
317 if (!(req
->flags
& REQ_F_POLLED
) &&
318 (sr
->flags
& IORING_RECVSEND_POLL_FIRST
))
319 return io_setup_async_msg(req
, kmsg
, issue_flags
);
321 flags
= sr
->msg_flags
;
322 if (issue_flags
& IO_URING_F_NONBLOCK
)
323 flags
|= MSG_DONTWAIT
;
324 if (flags
& MSG_WAITALL
)
325 min_ret
= iov_iter_count(&kmsg
->msg
.msg_iter
);
327 ret
= __sys_sendmsg_sock(sock
, &kmsg
->msg
, flags
);
330 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_NONBLOCK
))
331 return io_setup_async_msg(req
, kmsg
, issue_flags
);
332 if (ret
> 0 && io_net_retry(sock
, flags
)) {
333 kmsg
->msg
.msg_controllen
= 0;
334 kmsg
->msg
.msg_control
= NULL
;
336 req
->flags
|= REQ_F_PARTIAL_IO
;
337 return io_setup_async_msg(req
, kmsg
, issue_flags
);
339 if (ret
== -ERESTARTSYS
)
343 /* fast path, check for non-NULL to avoid function call */
345 kfree(kmsg
->free_iov
);
346 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
347 io_netmsg_recycle(req
, issue_flags
);
350 else if (sr
->done_io
)
352 io_req_set_res(req
, ret
, 0);
356 int io_send(struct io_kiocb
*req
, unsigned int issue_flags
)
358 struct sockaddr_storage __address
;
359 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
367 msg
.msg_control
= NULL
;
368 msg
.msg_controllen
= 0;
373 if (req_has_async_data(req
)) {
374 struct io_async_msghdr
*io
= req
->async_data
;
376 msg
.msg_name
= &io
->addr
;
378 ret
= move_addr_to_kernel(sr
->addr
, sr
->addr_len
, &__address
);
379 if (unlikely(ret
< 0))
381 msg
.msg_name
= (struct sockaddr
*)&__address
;
383 msg
.msg_namelen
= sr
->addr_len
;
386 if (!(req
->flags
& REQ_F_POLLED
) &&
387 (sr
->flags
& IORING_RECVSEND_POLL_FIRST
))
388 return io_setup_async_addr(req
, &__address
, issue_flags
);
390 sock
= sock_from_file(req
->file
);
394 ret
= import_ubuf(ITER_SOURCE
, sr
->buf
, sr
->len
, &msg
.msg_iter
);
398 flags
= sr
->msg_flags
;
399 if (issue_flags
& IO_URING_F_NONBLOCK
)
400 flags
|= MSG_DONTWAIT
;
401 if (flags
& MSG_WAITALL
)
402 min_ret
= iov_iter_count(&msg
.msg_iter
);
404 flags
&= ~MSG_INTERNAL_SENDMSG_FLAGS
;
405 msg
.msg_flags
= flags
;
406 ret
= sock_sendmsg(sock
, &msg
);
408 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_NONBLOCK
))
409 return io_setup_async_addr(req
, &__address
, issue_flags
);
411 if (ret
> 0 && io_net_retry(sock
, flags
)) {
415 req
->flags
|= REQ_F_PARTIAL_IO
;
416 return io_setup_async_addr(req
, &__address
, issue_flags
);
418 if (ret
== -ERESTARTSYS
)
424 else if (sr
->done_io
)
426 io_req_set_res(req
, ret
, 0);
430 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr
*iomsg
)
434 if (iomsg
->namelen
< 0)
436 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out
),
437 iomsg
->namelen
, &hdr
))
439 if (check_add_overflow(hdr
, (int)iomsg
->controllen
, &hdr
))
445 static int __io_recvmsg_copy_hdr(struct io_kiocb
*req
,
446 struct io_async_msghdr
*iomsg
)
448 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
449 struct user_msghdr msg
;
452 if (copy_from_user(&msg
, sr
->umsg
, sizeof(*sr
->umsg
)))
455 ret
= __copy_msghdr(&iomsg
->msg
, &msg
, &iomsg
->uaddr
);
459 if (req
->flags
& REQ_F_BUFFER_SELECT
) {
460 if (msg
.msg_iovlen
== 0) {
461 sr
->len
= iomsg
->fast_iov
[0].iov_len
= 0;
462 iomsg
->fast_iov
[0].iov_base
= NULL
;
463 iomsg
->free_iov
= NULL
;
464 } else if (msg
.msg_iovlen
> 1) {
467 if (copy_from_user(iomsg
->fast_iov
, msg
.msg_iov
, sizeof(*msg
.msg_iov
)))
469 sr
->len
= iomsg
->fast_iov
[0].iov_len
;
470 iomsg
->free_iov
= NULL
;
473 if (req
->flags
& REQ_F_APOLL_MULTISHOT
) {
474 iomsg
->namelen
= msg
.msg_namelen
;
475 iomsg
->controllen
= msg
.msg_controllen
;
476 if (io_recvmsg_multishot_overflow(iomsg
))
480 iomsg
->free_iov
= iomsg
->fast_iov
;
481 ret
= __import_iovec(ITER_DEST
, msg
.msg_iov
, msg
.msg_iovlen
, UIO_FASTIOV
,
482 &iomsg
->free_iov
, &iomsg
->msg
.msg_iter
,
492 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb
*req
,
493 struct io_async_msghdr
*iomsg
)
495 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
496 struct compat_msghdr msg
;
497 struct compat_iovec __user
*uiov
;
500 if (copy_from_user(&msg
, sr
->umsg_compat
, sizeof(msg
)))
503 ret
= __get_compat_msghdr(&iomsg
->msg
, &msg
, &iomsg
->uaddr
);
507 uiov
= compat_ptr(msg
.msg_iov
);
508 if (req
->flags
& REQ_F_BUFFER_SELECT
) {
511 iomsg
->free_iov
= NULL
;
512 if (msg
.msg_iovlen
== 0) {
514 } else if (msg
.msg_iovlen
> 1) {
517 if (!access_ok(uiov
, sizeof(*uiov
)))
519 if (__get_user(clen
, &uiov
->iov_len
))
526 if (req
->flags
& REQ_F_APOLL_MULTISHOT
) {
527 iomsg
->namelen
= msg
.msg_namelen
;
528 iomsg
->controllen
= msg
.msg_controllen
;
529 if (io_recvmsg_multishot_overflow(iomsg
))
533 iomsg
->free_iov
= iomsg
->fast_iov
;
534 ret
= __import_iovec(ITER_DEST
, (struct iovec __user
*)uiov
, msg
.msg_iovlen
,
535 UIO_FASTIOV
, &iomsg
->free_iov
,
536 &iomsg
->msg
.msg_iter
, true);
545 static int io_recvmsg_copy_hdr(struct io_kiocb
*req
,
546 struct io_async_msghdr
*iomsg
)
548 iomsg
->msg
.msg_name
= &iomsg
->addr
;
549 iomsg
->msg
.msg_iter
.nr_segs
= 0;
552 if (req
->ctx
->compat
)
553 return __io_compat_recvmsg_copy_hdr(req
, iomsg
);
556 return __io_recvmsg_copy_hdr(req
, iomsg
);
559 int io_recvmsg_prep_async(struct io_kiocb
*req
)
563 if (!io_msg_alloc_async_prep(req
))
565 ret
= io_recvmsg_copy_hdr(req
, req
->async_data
);
567 req
->flags
|= REQ_F_NEED_CLEANUP
;
571 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
573 int io_recvmsg_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
575 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
577 if (unlikely(sqe
->file_index
|| sqe
->addr2
))
580 sr
->umsg
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
581 sr
->len
= READ_ONCE(sqe
->len
);
582 sr
->flags
= READ_ONCE(sqe
->ioprio
);
583 if (sr
->flags
& ~(RECVMSG_FLAGS
))
585 sr
->msg_flags
= READ_ONCE(sqe
->msg_flags
);
586 if (sr
->msg_flags
& MSG_DONTWAIT
)
587 req
->flags
|= REQ_F_NOWAIT
;
588 if (sr
->msg_flags
& MSG_ERRQUEUE
)
589 req
->flags
|= REQ_F_CLEAR_POLLIN
;
590 if (sr
->flags
& IORING_RECV_MULTISHOT
) {
591 if (!(req
->flags
& REQ_F_BUFFER_SELECT
))
593 if (sr
->msg_flags
& MSG_WAITALL
)
595 if (req
->opcode
== IORING_OP_RECV
&& sr
->len
)
597 req
->flags
|= REQ_F_APOLL_MULTISHOT
;
599 * Store the buffer group for this multishot receive separately,
600 * as if we end up doing an io-wq based issue that selects a
601 * buffer, it has to be committed immediately and that will
602 * clear ->buf_list. This means we lose the link to the buffer
603 * list, and the eventual buffer put on completion then cannot
606 sr
->buf_group
= req
->buf_index
;
610 if (req
->ctx
->compat
)
611 sr
->msg_flags
|= MSG_CMSG_COMPAT
;
617 static inline void io_recv_prep_retry(struct io_kiocb
*req
)
619 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
622 sr
->len
= 0; /* get from the provided buffer */
623 req
->buf_index
= sr
->buf_group
;
627 * Finishes io_recv and io_recvmsg.
629 * Returns true if it is actually finished, or false if it should run
630 * again (for multishot).
632 static inline bool io_recv_finish(struct io_kiocb
*req
, int *ret
,
633 struct msghdr
*msg
, bool mshot_finished
,
634 unsigned issue_flags
)
638 cflags
= io_put_kbuf(req
, issue_flags
);
639 if (msg
->msg_inq
&& msg
->msg_inq
!= -1)
640 cflags
|= IORING_CQE_F_SOCK_NONEMPTY
;
642 if (!(req
->flags
& REQ_F_APOLL_MULTISHOT
)) {
643 io_req_set_res(req
, *ret
, cflags
);
648 if (!mshot_finished
) {
649 if (io_fill_cqe_req_aux(req
, issue_flags
& IO_URING_F_COMPLETE_DEFER
,
650 *ret
, cflags
| IORING_CQE_F_MORE
)) {
651 io_recv_prep_retry(req
);
652 /* Known not-empty or unknown state, retry */
653 if (cflags
& IORING_CQE_F_SOCK_NONEMPTY
||
656 if (issue_flags
& IO_URING_F_MULTISHOT
)
657 *ret
= IOU_ISSUE_SKIP_COMPLETE
;
662 /* Otherwise stop multishot but use the current result. */
665 io_req_set_res(req
, *ret
, cflags
);
667 if (issue_flags
& IO_URING_F_MULTISHOT
)
668 *ret
= IOU_STOP_MULTISHOT
;
674 static int io_recvmsg_prep_multishot(struct io_async_msghdr
*kmsg
,
675 struct io_sr_msg
*sr
, void __user
**buf
,
678 unsigned long ubuf
= (unsigned long) *buf
;
681 hdr
= sizeof(struct io_uring_recvmsg_out
) + kmsg
->namelen
+
686 if (kmsg
->controllen
) {
687 unsigned long control
= ubuf
+ hdr
- kmsg
->controllen
;
689 kmsg
->msg
.msg_control_user
= (void __user
*) control
;
690 kmsg
->msg
.msg_controllen
= kmsg
->controllen
;
693 sr
->buf
= *buf
; /* stash for later copy */
694 *buf
= (void __user
*) (ubuf
+ hdr
);
695 kmsg
->payloadlen
= *len
= *len
- hdr
;
699 struct io_recvmsg_multishot_hdr
{
700 struct io_uring_recvmsg_out msg
;
701 struct sockaddr_storage addr
;
704 static int io_recvmsg_multishot(struct socket
*sock
, struct io_sr_msg
*io
,
705 struct io_async_msghdr
*kmsg
,
706 unsigned int flags
, bool *finished
)
710 struct io_recvmsg_multishot_hdr hdr
;
713 kmsg
->msg
.msg_name
= &hdr
.addr
;
714 kmsg
->msg
.msg_flags
= flags
& (MSG_CMSG_CLOEXEC
|MSG_CMSG_COMPAT
);
715 kmsg
->msg
.msg_namelen
= 0;
717 if (sock
->file
->f_flags
& O_NONBLOCK
)
718 flags
|= MSG_DONTWAIT
;
720 err
= sock_recvmsg(sock
, &kmsg
->msg
, flags
);
721 *finished
= err
<= 0;
725 hdr
.msg
= (struct io_uring_recvmsg_out
) {
726 .controllen
= kmsg
->controllen
- kmsg
->msg
.msg_controllen
,
727 .flags
= kmsg
->msg
.msg_flags
& ~MSG_CMSG_COMPAT
730 hdr
.msg
.payloadlen
= err
;
731 if (err
> kmsg
->payloadlen
)
732 err
= kmsg
->payloadlen
;
734 copy_len
= sizeof(struct io_uring_recvmsg_out
);
735 if (kmsg
->msg
.msg_namelen
> kmsg
->namelen
)
736 copy_len
+= kmsg
->namelen
;
738 copy_len
+= kmsg
->msg
.msg_namelen
;
741 * "fromlen shall refer to the value before truncation.."
744 hdr
.msg
.namelen
= kmsg
->msg
.msg_namelen
;
746 /* ensure that there is no gap between hdr and sockaddr_storage */
747 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr
, addr
) !=
748 sizeof(struct io_uring_recvmsg_out
));
749 if (copy_to_user(io
->buf
, &hdr
, copy_len
)) {
754 return sizeof(struct io_uring_recvmsg_out
) + kmsg
->namelen
+
755 kmsg
->controllen
+ err
;
758 int io_recvmsg(struct io_kiocb
*req
, unsigned int issue_flags
)
760 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
761 struct io_async_msghdr iomsg
, *kmsg
;
764 int ret
, min_ret
= 0;
765 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
766 bool mshot_finished
= true;
768 sock
= sock_from_file(req
->file
);
772 if (req_has_async_data(req
)) {
773 kmsg
= req
->async_data
;
775 ret
= io_recvmsg_copy_hdr(req
, &iomsg
);
781 if (!(req
->flags
& REQ_F_POLLED
) &&
782 (sr
->flags
& IORING_RECVSEND_POLL_FIRST
))
783 return io_setup_async_msg(req
, kmsg
, issue_flags
);
785 if (!io_check_multishot(req
, issue_flags
))
786 return io_setup_async_msg(req
, kmsg
, issue_flags
);
789 if (io_do_buffer_select(req
)) {
791 size_t len
= sr
->len
;
793 buf
= io_buffer_select(req
, &len
, issue_flags
);
797 if (req
->flags
& REQ_F_APOLL_MULTISHOT
) {
798 ret
= io_recvmsg_prep_multishot(kmsg
, sr
, &buf
, &len
);
800 io_kbuf_recycle(req
, issue_flags
);
805 iov_iter_ubuf(&kmsg
->msg
.msg_iter
, ITER_DEST
, buf
, len
);
808 flags
= sr
->msg_flags
;
810 flags
|= MSG_DONTWAIT
;
812 kmsg
->msg
.msg_get_inq
= 1;
813 kmsg
->msg
.msg_inq
= -1;
814 if (req
->flags
& REQ_F_APOLL_MULTISHOT
) {
815 ret
= io_recvmsg_multishot(sock
, sr
, kmsg
, flags
,
818 /* disable partial retry for recvmsg with cmsg attached */
819 if (flags
& MSG_WAITALL
&& !kmsg
->msg
.msg_controllen
)
820 min_ret
= iov_iter_count(&kmsg
->msg
.msg_iter
);
822 ret
= __sys_recvmsg_sock(sock
, &kmsg
->msg
, sr
->umsg
,
827 if (ret
== -EAGAIN
&& force_nonblock
) {
828 ret
= io_setup_async_msg(req
, kmsg
, issue_flags
);
829 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_MULTISHOT
)) {
830 io_kbuf_recycle(req
, issue_flags
);
831 return IOU_ISSUE_SKIP_COMPLETE
;
835 if (ret
> 0 && io_net_retry(sock
, flags
)) {
837 req
->flags
|= REQ_F_PARTIAL_IO
;
838 return io_setup_async_msg(req
, kmsg
, issue_flags
);
840 if (ret
== -ERESTARTSYS
)
843 } else if ((flags
& MSG_WAITALL
) && (kmsg
->msg
.msg_flags
& (MSG_TRUNC
| MSG_CTRUNC
))) {
849 else if (sr
->done_io
)
852 io_kbuf_recycle(req
, issue_flags
);
854 if (!io_recv_finish(req
, &ret
, &kmsg
->msg
, mshot_finished
, issue_flags
))
855 goto retry_multishot
;
857 if (mshot_finished
) {
858 /* fast path, check for non-NULL to avoid function call */
860 kfree(kmsg
->free_iov
);
861 io_netmsg_recycle(req
, issue_flags
);
862 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
868 int io_recv(struct io_kiocb
*req
, unsigned int issue_flags
)
870 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
874 int ret
, min_ret
= 0;
875 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
876 size_t len
= sr
->len
;
878 if (!(req
->flags
& REQ_F_POLLED
) &&
879 (sr
->flags
& IORING_RECVSEND_POLL_FIRST
))
882 if (!io_check_multishot(req
, issue_flags
))
885 sock
= sock_from_file(req
->file
);
891 msg
.msg_control
= NULL
;
893 msg
.msg_controllen
= 0;
898 if (io_do_buffer_select(req
)) {
901 buf
= io_buffer_select(req
, &len
, issue_flags
);
907 ret
= import_ubuf(ITER_DEST
, sr
->buf
, len
, &msg
.msg_iter
);
914 flags
= sr
->msg_flags
;
916 flags
|= MSG_DONTWAIT
;
917 if (flags
& MSG_WAITALL
)
918 min_ret
= iov_iter_count(&msg
.msg_iter
);
920 ret
= sock_recvmsg(sock
, &msg
, flags
);
922 if (ret
== -EAGAIN
&& force_nonblock
) {
923 if (issue_flags
& IO_URING_F_MULTISHOT
) {
924 io_kbuf_recycle(req
, issue_flags
);
925 return IOU_ISSUE_SKIP_COMPLETE
;
930 if (ret
> 0 && io_net_retry(sock
, flags
)) {
934 req
->flags
|= REQ_F_PARTIAL_IO
;
937 if (ret
== -ERESTARTSYS
)
940 } else if ((flags
& MSG_WAITALL
) && (msg
.msg_flags
& (MSG_TRUNC
| MSG_CTRUNC
))) {
947 else if (sr
->done_io
)
950 io_kbuf_recycle(req
, issue_flags
);
952 if (!io_recv_finish(req
, &ret
, &msg
, ret
<= 0, issue_flags
))
953 goto retry_multishot
;
958 void io_send_zc_cleanup(struct io_kiocb
*req
)
960 struct io_sr_msg
*zc
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
961 struct io_async_msghdr
*io
;
963 if (req_has_async_data(req
)) {
964 io
= req
->async_data
;
965 /* might be ->fast_iov if *msg_copy_hdr failed */
966 if (io
->free_iov
!= io
->fast_iov
)
970 io_notif_flush(zc
->notif
);
975 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
976 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
978 int io_send_zc_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
980 struct io_sr_msg
*zc
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
981 struct io_ring_ctx
*ctx
= req
->ctx
;
982 struct io_kiocb
*notif
;
984 if (unlikely(READ_ONCE(sqe
->__pad2
[0]) || READ_ONCE(sqe
->addr3
)))
986 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
987 if (req
->flags
& REQ_F_CQE_SKIP
)
990 notif
= zc
->notif
= io_alloc_notif(ctx
);
993 notif
->cqe
.user_data
= req
->cqe
.user_data
;
995 notif
->cqe
.flags
= IORING_CQE_F_NOTIF
;
996 req
->flags
|= REQ_F_NEED_CLEANUP
;
998 zc
->flags
= READ_ONCE(sqe
->ioprio
);
999 if (unlikely(zc
->flags
& ~IO_ZC_FLAGS_COMMON
)) {
1000 if (zc
->flags
& ~IO_ZC_FLAGS_VALID
)
1002 if (zc
->flags
& IORING_SEND_ZC_REPORT_USAGE
) {
1003 io_notif_set_extended(notif
);
1004 io_notif_to_data(notif
)->zc_report
= true;
1008 if (zc
->flags
& IORING_RECVSEND_FIXED_BUF
) {
1009 unsigned idx
= READ_ONCE(sqe
->buf_index
);
1011 if (unlikely(idx
>= ctx
->nr_user_bufs
))
1013 idx
= array_index_nospec(idx
, ctx
->nr_user_bufs
);
1014 req
->imu
= READ_ONCE(ctx
->user_bufs
[idx
]);
1015 io_req_set_rsrc_node(notif
, ctx
, 0);
1018 if (req
->opcode
== IORING_OP_SEND_ZC
) {
1019 if (READ_ONCE(sqe
->__pad3
[0]))
1021 zc
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
1022 zc
->addr_len
= READ_ONCE(sqe
->addr_len
);
1024 if (unlikely(sqe
->addr2
|| sqe
->file_index
))
1026 if (unlikely(zc
->flags
& IORING_RECVSEND_FIXED_BUF
))
1030 zc
->buf
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
1031 zc
->len
= READ_ONCE(sqe
->len
);
1032 zc
->msg_flags
= READ_ONCE(sqe
->msg_flags
) | MSG_NOSIGNAL
;
1033 if (zc
->msg_flags
& MSG_DONTWAIT
)
1034 req
->flags
|= REQ_F_NOWAIT
;
1038 #ifdef CONFIG_COMPAT
1039 if (req
->ctx
->compat
)
1040 zc
->msg_flags
|= MSG_CMSG_COMPAT
;
1045 static int io_sg_from_iter_iovec(struct sock
*sk
, struct sk_buff
*skb
,
1046 struct iov_iter
*from
, size_t length
)
1048 skb_zcopy_downgrade_managed(skb
);
1049 return __zerocopy_sg_from_iter(NULL
, sk
, skb
, from
, length
);
1052 static int io_sg_from_iter(struct sock
*sk
, struct sk_buff
*skb
,
1053 struct iov_iter
*from
, size_t length
)
1055 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1056 int frag
= shinfo
->nr_frags
;
1058 struct bvec_iter bi
;
1060 unsigned long truesize
= 0;
1063 shinfo
->flags
|= SKBFL_MANAGED_FRAG_REFS
;
1064 else if (unlikely(!skb_zcopy_managed(skb
)))
1065 return __zerocopy_sg_from_iter(NULL
, sk
, skb
, from
, length
);
1067 bi
.bi_size
= min(from
->count
, length
);
1068 bi
.bi_bvec_done
= from
->iov_offset
;
1071 while (bi
.bi_size
&& frag
< MAX_SKB_FRAGS
) {
1072 struct bio_vec v
= mp_bvec_iter_bvec(from
->bvec
, bi
);
1075 truesize
+= PAGE_ALIGN(v
.bv_len
+ v
.bv_offset
);
1076 __skb_fill_page_desc_noacc(shinfo
, frag
++, v
.bv_page
,
1077 v
.bv_offset
, v
.bv_len
);
1078 bvec_iter_advance_single(from
->bvec
, &bi
, v
.bv_len
);
1083 shinfo
->nr_frags
= frag
;
1084 from
->bvec
+= bi
.bi_idx
;
1085 from
->nr_segs
-= bi
.bi_idx
;
1086 from
->count
-= copied
;
1087 from
->iov_offset
= bi
.bi_bvec_done
;
1089 skb
->data_len
+= copied
;
1091 skb
->truesize
+= truesize
;
1093 if (sk
&& sk
->sk_type
== SOCK_STREAM
) {
1094 sk_wmem_queued_add(sk
, truesize
);
1095 if (!skb_zcopy_pure(skb
))
1096 sk_mem_charge(sk
, truesize
);
1098 refcount_add(truesize
, &skb
->sk
->sk_wmem_alloc
);
1103 int io_send_zc(struct io_kiocb
*req
, unsigned int issue_flags
)
1105 struct sockaddr_storage __address
;
1106 struct io_sr_msg
*zc
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
1108 struct socket
*sock
;
1110 int ret
, min_ret
= 0;
1112 sock
= sock_from_file(req
->file
);
1113 if (unlikely(!sock
))
1115 if (!test_bit(SOCK_SUPPORT_ZC
, &sock
->flags
))
1118 msg
.msg_name
= NULL
;
1119 msg
.msg_control
= NULL
;
1120 msg
.msg_controllen
= 0;
1121 msg
.msg_namelen
= 0;
1124 if (req_has_async_data(req
)) {
1125 struct io_async_msghdr
*io
= req
->async_data
;
1127 msg
.msg_name
= &io
->addr
;
1129 ret
= move_addr_to_kernel(zc
->addr
, zc
->addr_len
, &__address
);
1130 if (unlikely(ret
< 0))
1132 msg
.msg_name
= (struct sockaddr
*)&__address
;
1134 msg
.msg_namelen
= zc
->addr_len
;
1137 if (!(req
->flags
& REQ_F_POLLED
) &&
1138 (zc
->flags
& IORING_RECVSEND_POLL_FIRST
))
1139 return io_setup_async_addr(req
, &__address
, issue_flags
);
1141 if (zc
->flags
& IORING_RECVSEND_FIXED_BUF
) {
1142 ret
= io_import_fixed(ITER_SOURCE
, &msg
.msg_iter
, req
->imu
,
1143 (u64
)(uintptr_t)zc
->buf
, zc
->len
);
1146 msg
.sg_from_iter
= io_sg_from_iter
;
1148 io_notif_set_extended(zc
->notif
);
1149 ret
= import_ubuf(ITER_SOURCE
, zc
->buf
, zc
->len
, &msg
.msg_iter
);
1152 ret
= io_notif_account_mem(zc
->notif
, zc
->len
);
1155 msg
.sg_from_iter
= io_sg_from_iter_iovec
;
1158 msg_flags
= zc
->msg_flags
| MSG_ZEROCOPY
;
1159 if (issue_flags
& IO_URING_F_NONBLOCK
)
1160 msg_flags
|= MSG_DONTWAIT
;
1161 if (msg_flags
& MSG_WAITALL
)
1162 min_ret
= iov_iter_count(&msg
.msg_iter
);
1163 msg_flags
&= ~MSG_INTERNAL_SENDMSG_FLAGS
;
1165 msg
.msg_flags
= msg_flags
;
1166 msg
.msg_ubuf
= &io_notif_to_data(zc
->notif
)->uarg
;
1167 ret
= sock_sendmsg(sock
, &msg
);
1169 if (unlikely(ret
< min_ret
)) {
1170 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_NONBLOCK
))
1171 return io_setup_async_addr(req
, &__address
, issue_flags
);
1173 if (ret
> 0 && io_net_retry(sock
, msg
.msg_flags
)) {
1177 req
->flags
|= REQ_F_PARTIAL_IO
;
1178 return io_setup_async_addr(req
, &__address
, issue_flags
);
1180 if (ret
== -ERESTARTSYS
)
1187 else if (zc
->done_io
)
1191 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1192 * flushing notif to io_send_zc_cleanup()
1194 if (!(issue_flags
& IO_URING_F_UNLOCKED
)) {
1195 io_notif_flush(zc
->notif
);
1196 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
1198 io_req_set_res(req
, ret
, IORING_CQE_F_MORE
);
1202 int io_sendmsg_zc(struct io_kiocb
*req
, unsigned int issue_flags
)
1204 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
1205 struct io_async_msghdr iomsg
, *kmsg
;
1206 struct socket
*sock
;
1208 int ret
, min_ret
= 0;
1210 io_notif_set_extended(sr
->notif
);
1212 sock
= sock_from_file(req
->file
);
1213 if (unlikely(!sock
))
1215 if (!test_bit(SOCK_SUPPORT_ZC
, &sock
->flags
))
1218 if (req_has_async_data(req
)) {
1219 kmsg
= req
->async_data
;
1221 ret
= io_sendmsg_copy_hdr(req
, &iomsg
);
1227 if (!(req
->flags
& REQ_F_POLLED
) &&
1228 (sr
->flags
& IORING_RECVSEND_POLL_FIRST
))
1229 return io_setup_async_msg(req
, kmsg
, issue_flags
);
1231 flags
= sr
->msg_flags
| MSG_ZEROCOPY
;
1232 if (issue_flags
& IO_URING_F_NONBLOCK
)
1233 flags
|= MSG_DONTWAIT
;
1234 if (flags
& MSG_WAITALL
)
1235 min_ret
= iov_iter_count(&kmsg
->msg
.msg_iter
);
1237 kmsg
->msg
.msg_ubuf
= &io_notif_to_data(sr
->notif
)->uarg
;
1238 kmsg
->msg
.sg_from_iter
= io_sg_from_iter_iovec
;
1239 ret
= __sys_sendmsg_sock(sock
, &kmsg
->msg
, flags
);
1241 if (unlikely(ret
< min_ret
)) {
1242 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_NONBLOCK
))
1243 return io_setup_async_msg(req
, kmsg
, issue_flags
);
1245 if (ret
> 0 && io_net_retry(sock
, flags
)) {
1247 req
->flags
|= REQ_F_PARTIAL_IO
;
1248 return io_setup_async_msg(req
, kmsg
, issue_flags
);
1250 if (ret
== -ERESTARTSYS
)
1254 /* fast path, check for non-NULL to avoid function call */
1255 if (kmsg
->free_iov
) {
1256 kfree(kmsg
->free_iov
);
1257 kmsg
->free_iov
= NULL
;
1260 io_netmsg_recycle(req
, issue_flags
);
1263 else if (sr
->done_io
)
1267 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1268 * flushing notif to io_send_zc_cleanup()
1270 if (!(issue_flags
& IO_URING_F_UNLOCKED
)) {
1271 io_notif_flush(sr
->notif
);
1272 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
1274 io_req_set_res(req
, ret
, IORING_CQE_F_MORE
);
1278 void io_sendrecv_fail(struct io_kiocb
*req
)
1280 struct io_sr_msg
*sr
= io_kiocb_to_cmd(req
, struct io_sr_msg
);
1282 if (req
->flags
& REQ_F_PARTIAL_IO
)
1283 req
->cqe
.res
= sr
->done_io
;
1285 if ((req
->flags
& REQ_F_NEED_CLEANUP
) &&
1286 (req
->opcode
== IORING_OP_SEND_ZC
|| req
->opcode
== IORING_OP_SENDMSG_ZC
))
1287 req
->cqe
.flags
|= IORING_CQE_F_MORE
;
1290 int io_accept_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
1292 struct io_accept
*accept
= io_kiocb_to_cmd(req
, struct io_accept
);
1295 if (sqe
->len
|| sqe
->buf_index
)
1298 accept
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
1299 accept
->addr_len
= u64_to_user_ptr(READ_ONCE(sqe
->addr2
));
1300 accept
->flags
= READ_ONCE(sqe
->accept_flags
);
1301 accept
->nofile
= rlimit(RLIMIT_NOFILE
);
1302 flags
= READ_ONCE(sqe
->ioprio
);
1303 if (flags
& ~IORING_ACCEPT_MULTISHOT
)
1306 accept
->file_slot
= READ_ONCE(sqe
->file_index
);
1307 if (accept
->file_slot
) {
1308 if (accept
->flags
& SOCK_CLOEXEC
)
1310 if (flags
& IORING_ACCEPT_MULTISHOT
&&
1311 accept
->file_slot
!= IORING_FILE_INDEX_ALLOC
)
1314 if (accept
->flags
& ~(SOCK_CLOEXEC
| SOCK_NONBLOCK
))
1316 if (SOCK_NONBLOCK
!= O_NONBLOCK
&& (accept
->flags
& SOCK_NONBLOCK
))
1317 accept
->flags
= (accept
->flags
& ~SOCK_NONBLOCK
) | O_NONBLOCK
;
1318 if (flags
& IORING_ACCEPT_MULTISHOT
)
1319 req
->flags
|= REQ_F_APOLL_MULTISHOT
;
1323 int io_accept(struct io_kiocb
*req
, unsigned int issue_flags
)
1325 struct io_accept
*accept
= io_kiocb_to_cmd(req
, struct io_accept
);
1326 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
1327 unsigned int file_flags
= force_nonblock
? O_NONBLOCK
: 0;
1328 bool fixed
= !!accept
->file_slot
;
1332 if (!io_check_multishot(req
, issue_flags
))
1336 fd
= __get_unused_fd_flags(accept
->flags
, accept
->nofile
);
1337 if (unlikely(fd
< 0))
1340 file
= do_accept(req
->file
, file_flags
, accept
->addr
, accept
->addr_len
,
1345 ret
= PTR_ERR(file
);
1346 if (ret
== -EAGAIN
&& force_nonblock
) {
1348 * if it's multishot and polled, we don't need to
1349 * return EAGAIN to arm the poll infra since it
1350 * has already been done
1352 if (issue_flags
& IO_URING_F_MULTISHOT
)
1353 ret
= IOU_ISSUE_SKIP_COMPLETE
;
1356 if (ret
== -ERESTARTSYS
)
1359 } else if (!fixed
) {
1360 fd_install(fd
, file
);
1363 ret
= io_fixed_fd_install(req
, issue_flags
, file
,
1367 if (!(req
->flags
& REQ_F_APOLL_MULTISHOT
)) {
1368 io_req_set_res(req
, ret
, 0);
1374 if (io_fill_cqe_req_aux(req
, issue_flags
& IO_URING_F_COMPLETE_DEFER
,
1375 ret
, IORING_CQE_F_MORE
))
1381 int io_socket_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
1383 struct io_socket
*sock
= io_kiocb_to_cmd(req
, struct io_socket
);
1385 if (sqe
->addr
|| sqe
->rw_flags
|| sqe
->buf_index
)
1388 sock
->domain
= READ_ONCE(sqe
->fd
);
1389 sock
->type
= READ_ONCE(sqe
->off
);
1390 sock
->protocol
= READ_ONCE(sqe
->len
);
1391 sock
->file_slot
= READ_ONCE(sqe
->file_index
);
1392 sock
->nofile
= rlimit(RLIMIT_NOFILE
);
1394 sock
->flags
= sock
->type
& ~SOCK_TYPE_MASK
;
1395 if (sock
->file_slot
&& (sock
->flags
& SOCK_CLOEXEC
))
1397 if (sock
->flags
& ~(SOCK_CLOEXEC
| SOCK_NONBLOCK
))
1402 int io_socket(struct io_kiocb
*req
, unsigned int issue_flags
)
1404 struct io_socket
*sock
= io_kiocb_to_cmd(req
, struct io_socket
);
1405 bool fixed
= !!sock
->file_slot
;
1410 fd
= __get_unused_fd_flags(sock
->flags
, sock
->nofile
);
1411 if (unlikely(fd
< 0))
1414 file
= __sys_socket_file(sock
->domain
, sock
->type
, sock
->protocol
);
1418 ret
= PTR_ERR(file
);
1419 if (ret
== -EAGAIN
&& (issue_flags
& IO_URING_F_NONBLOCK
))
1421 if (ret
== -ERESTARTSYS
)
1424 } else if (!fixed
) {
1425 fd_install(fd
, file
);
1428 ret
= io_fixed_fd_install(req
, issue_flags
, file
,
1431 io_req_set_res(req
, ret
, 0);
1435 int io_connect_prep_async(struct io_kiocb
*req
)
1437 struct io_async_connect
*io
= req
->async_data
;
1438 struct io_connect
*conn
= io_kiocb_to_cmd(req
, struct io_connect
);
1440 return move_addr_to_kernel(conn
->addr
, conn
->addr_len
, &io
->address
);
1443 int io_connect_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
1445 struct io_connect
*conn
= io_kiocb_to_cmd(req
, struct io_connect
);
1447 if (sqe
->len
|| sqe
->buf_index
|| sqe
->rw_flags
|| sqe
->splice_fd_in
)
1450 conn
->addr
= u64_to_user_ptr(READ_ONCE(sqe
->addr
));
1451 conn
->addr_len
= READ_ONCE(sqe
->addr2
);
1452 conn
->in_progress
= conn
->seen_econnaborted
= false;
1456 int io_connect(struct io_kiocb
*req
, unsigned int issue_flags
)
1458 struct io_connect
*connect
= io_kiocb_to_cmd(req
, struct io_connect
);
1459 struct io_async_connect __io
, *io
;
1460 unsigned file_flags
;
1462 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
1464 if (req_has_async_data(req
)) {
1465 io
= req
->async_data
;
1467 ret
= move_addr_to_kernel(connect
->addr
,
1475 file_flags
= force_nonblock
? O_NONBLOCK
: 0;
1477 ret
= __sys_connect_file(req
->file
, &io
->address
,
1478 connect
->addr_len
, file_flags
);
1479 if ((ret
== -EAGAIN
|| ret
== -EINPROGRESS
|| ret
== -ECONNABORTED
)
1480 && force_nonblock
) {
1481 if (ret
== -EINPROGRESS
) {
1482 connect
->in_progress
= true;
1483 } else if (ret
== -ECONNABORTED
) {
1484 if (connect
->seen_econnaborted
)
1486 connect
->seen_econnaborted
= true;
1488 if (req_has_async_data(req
))
1490 if (io_alloc_async_data(req
)) {
1494 memcpy(req
->async_data
, &__io
, sizeof(__io
));
1497 if (connect
->in_progress
) {
1499 * At least bluetooth will return -EBADFD on a re-connect
1500 * attempt, and it's (supposedly) also valid to get -EISCONN
1501 * which means the previous result is good. For both of these,
1502 * grab the sock_error() and use that for the completion.
1504 if (ret
== -EBADFD
|| ret
== -EISCONN
)
1505 ret
= sock_error(sock_from_file(req
->file
)->sk
);
1507 if (ret
== -ERESTARTSYS
)
1512 io_req_set_res(req
, ret
, 0);
1516 void io_netmsg_cache_free(struct io_cache_entry
*entry
)
1518 kfree(container_of(entry
, struct io_async_msghdr
, cache
));