1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
20 #include <net/transp_v6.h>
22 #include <net/mptcp.h>
26 #define MPTCP_SAME_STATE TCP_MAX_STATES
28 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
30 struct mptcp_sock msk
;
39 #define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0]))
41 static struct percpu_counter mptcp_sockets_allocated
;
43 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
44 * completed yet or has failed, return the subflow socket.
45 * Otherwise return NULL.
47 static struct socket
*__mptcp_nmpc_socket(const struct mptcp_sock
*msk
)
49 if (!msk
->subflow
|| READ_ONCE(msk
->can_ack
))
55 static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock
*msk
)
57 return msk
->first
&& !sk_is_mptcp(msk
->first
);
60 static struct socket
*mptcp_is_tcpsk(struct sock
*sk
)
62 struct socket
*sock
= sk
->sk_socket
;
67 if (unlikely(sk
->sk_prot
== &tcp_prot
)) {
68 /* we are being invoked after mptcp_accept() has
69 * accepted a non-mp-capable flow: sk is a tcp_sk,
72 * Hand the socket over to tcp so all further socket ops
75 sock
->ops
= &inet_stream_ops
;
77 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
78 } else if (unlikely(sk
->sk_prot
== &tcpv6_prot
)) {
79 sock
->ops
= &inet6_stream_ops
;
87 static struct socket
*__mptcp_tcp_fallback(struct mptcp_sock
*msk
)
91 sock_owned_by_me((const struct sock
*)msk
);
93 sock
= mptcp_is_tcpsk((struct sock
*)msk
);
97 if (likely(!__mptcp_needs_tcp_fallback(msk
)))
101 release_sock((struct sock
*)msk
);
108 static bool __mptcp_can_create_subflow(const struct mptcp_sock
*msk
)
113 static struct socket
*__mptcp_socket_create(struct mptcp_sock
*msk
, int state
)
115 struct mptcp_subflow_context
*subflow
;
116 struct sock
*sk
= (struct sock
*)msk
;
117 struct socket
*ssock
;
120 ssock
= __mptcp_tcp_fallback(msk
);
124 ssock
= __mptcp_nmpc_socket(msk
);
128 if (!__mptcp_can_create_subflow(msk
))
129 return ERR_PTR(-EINVAL
);
131 err
= mptcp_subflow_create_socket(sk
, &ssock
);
135 msk
->first
= ssock
->sk
;
136 msk
->subflow
= ssock
;
137 subflow
= mptcp_subflow_ctx(ssock
->sk
);
138 list_add(&subflow
->node
, &msk
->conn_list
);
139 subflow
->request_mptcp
= 1;
142 if (state
!= MPTCP_SAME_STATE
)
143 inet_sk_state_store(sk
, state
);
147 static void __mptcp_move_skb(struct mptcp_sock
*msk
, struct sock
*ssk
,
149 unsigned int offset
, size_t copy_len
)
151 struct sock
*sk
= (struct sock
*)msk
;
153 __skb_unlink(skb
, &ssk
->sk_receive_queue
);
154 skb_set_owner_r(skb
, sk
);
155 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
157 msk
->ack_seq
+= copy_len
;
158 MPTCP_SKB_CB(skb
)->offset
= offset
;
161 /* both sockets must be locked */
162 static bool mptcp_subflow_dsn_valid(const struct mptcp_sock
*msk
,
165 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
166 u64 dsn
= mptcp_subflow_get_mapped_dsn(subflow
);
168 /* revalidate data sequence number.
170 * mptcp_subflow_data_available() is usually called
171 * without msk lock. Its unlikely (but possible)
172 * that msk->ack_seq has been advanced since the last
173 * call found in-sequence data.
175 if (likely(dsn
== msk
->ack_seq
))
178 subflow
->data_avail
= 0;
179 return mptcp_subflow_data_available(ssk
);
182 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock
*msk
,
186 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
187 struct sock
*sk
= (struct sock
*)msk
;
188 unsigned int moved
= 0;
189 bool more_data_avail
;
193 if (!mptcp_subflow_dsn_valid(msk
, ssk
)) {
198 if (!(sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)) {
199 int rcvbuf
= max(ssk
->sk_rcvbuf
, sk
->sk_rcvbuf
);
201 if (rcvbuf
> sk
->sk_rcvbuf
)
202 sk
->sk_rcvbuf
= rcvbuf
;
207 u32 map_remaining
, offset
;
208 u32 seq
= tp
->copied_seq
;
212 /* try to move as much data as available */
213 map_remaining
= subflow
->map_data_len
-
214 mptcp_subflow_get_map_offset(subflow
);
216 skb
= skb_peek(&ssk
->sk_receive_queue
);
220 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
221 fin
= TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
;
227 if (offset
< skb
->len
) {
228 size_t len
= skb
->len
- offset
;
233 __mptcp_move_skb(msk
, ssk
, skb
, offset
, len
);
237 if (WARN_ON_ONCE(map_remaining
< len
))
241 sk_eat_skb(ssk
, skb
);
245 WRITE_ONCE(tp
->copied_seq
, seq
);
246 more_data_avail
= mptcp_subflow_data_available(ssk
);
248 if (atomic_read(&sk
->sk_rmem_alloc
) > READ_ONCE(sk
->sk_rcvbuf
)) {
252 } while (more_data_avail
);
259 /* In most cases we will be able to lock the mptcp socket. If its already
260 * owned, we need to defer to the work queue to avoid ABBA deadlock.
262 static bool move_skbs_to_msk(struct mptcp_sock
*msk
, struct sock
*ssk
)
264 struct sock
*sk
= (struct sock
*)msk
;
265 unsigned int moved
= 0;
267 if (READ_ONCE(sk
->sk_lock
.owned
))
270 if (unlikely(!spin_trylock_bh(&sk
->sk_lock
.slock
)))
273 /* must re-check after taking the lock */
274 if (!READ_ONCE(sk
->sk_lock
.owned
))
275 __mptcp_move_skbs_from_subflow(msk
, ssk
, &moved
);
277 spin_unlock_bh(&sk
->sk_lock
.slock
);
282 void mptcp_data_ready(struct sock
*sk
, struct sock
*ssk
)
284 struct mptcp_sock
*msk
= mptcp_sk(sk
);
286 set_bit(MPTCP_DATA_READY
, &msk
->flags
);
288 if (atomic_read(&sk
->sk_rmem_alloc
) < READ_ONCE(sk
->sk_rcvbuf
) &&
289 move_skbs_to_msk(msk
, ssk
))
292 /* don't schedule if mptcp sk is (still) over limit */
293 if (atomic_read(&sk
->sk_rmem_alloc
) > READ_ONCE(sk
->sk_rcvbuf
))
296 /* mptcp socket is owned, release_cb should retry */
297 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED
,
298 &sk
->sk_tsq_flags
)) {
301 /* need to try again, its possible release_cb() has already
302 * been called after the test_and_set_bit() above.
304 move_skbs_to_msk(msk
, ssk
);
307 sk
->sk_data_ready(sk
);
310 static void __mptcp_flush_join_list(struct mptcp_sock
*msk
)
312 if (likely(list_empty(&msk
->join_list
)))
315 spin_lock_bh(&msk
->join_list_lock
);
316 list_splice_tail_init(&msk
->join_list
, &msk
->conn_list
);
317 spin_unlock_bh(&msk
->join_list_lock
);
320 static void mptcp_set_timeout(const struct sock
*sk
, const struct sock
*ssk
)
322 long tout
= ssk
&& inet_csk(ssk
)->icsk_pending
?
323 inet_csk(ssk
)->icsk_timeout
- jiffies
: 0;
326 tout
= mptcp_sk(sk
)->timer_ival
;
327 mptcp_sk(sk
)->timer_ival
= tout
> 0 ? tout
: TCP_RTO_MIN
;
330 static bool mptcp_timer_pending(struct sock
*sk
)
332 return timer_pending(&inet_csk(sk
)->icsk_retransmit_timer
);
335 static void mptcp_reset_timer(struct sock
*sk
)
337 struct inet_connection_sock
*icsk
= inet_csk(sk
);
340 /* should never be called with mptcp level timer cleared */
341 tout
= READ_ONCE(mptcp_sk(sk
)->timer_ival
);
342 if (WARN_ON_ONCE(!tout
))
344 sk_reset_timer(sk
, &icsk
->icsk_retransmit_timer
, jiffies
+ tout
);
347 void mptcp_data_acked(struct sock
*sk
)
349 mptcp_reset_timer(sk
);
351 if (!sk_stream_is_writeable(sk
) &&
352 schedule_work(&mptcp_sk(sk
)->work
))
356 void mptcp_subflow_eof(struct sock
*sk
)
358 struct mptcp_sock
*msk
= mptcp_sk(sk
);
360 if (!test_and_set_bit(MPTCP_WORK_EOF
, &msk
->flags
) &&
361 schedule_work(&msk
->work
))
365 static void mptcp_stop_timer(struct sock
*sk
)
367 struct inet_connection_sock
*icsk
= inet_csk(sk
);
369 sk_stop_timer(sk
, &icsk
->icsk_retransmit_timer
);
370 mptcp_sk(sk
)->timer_ival
= 0;
373 static bool mptcp_ext_cache_refill(struct mptcp_sock
*msk
)
375 if (!msk
->cached_ext
)
376 msk
->cached_ext
= __skb_ext_alloc();
378 return !!msk
->cached_ext
;
381 static struct sock
*mptcp_subflow_recv_lookup(const struct mptcp_sock
*msk
)
383 struct mptcp_subflow_context
*subflow
;
384 struct sock
*sk
= (struct sock
*)msk
;
386 sock_owned_by_me(sk
);
388 mptcp_for_each_subflow(msk
, subflow
) {
389 if (subflow
->data_avail
)
390 return mptcp_subflow_tcp_sock(subflow
);
396 static bool mptcp_skb_can_collapse_to(u64 write_seq
,
397 const struct sk_buff
*skb
,
398 const struct mptcp_ext
*mpext
)
400 if (!tcp_skb_can_collapse_to(skb
))
403 /* can collapse only if MPTCP level sequence is in order */
404 return mpext
&& mpext
->data_seq
+ mpext
->data_len
== write_seq
;
407 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock
*msk
,
408 const struct page_frag
*pfrag
,
409 const struct mptcp_data_frag
*df
)
411 return df
&& pfrag
->page
== df
->page
&&
412 df
->data_seq
+ df
->data_len
== msk
->write_seq
;
415 static void dfrag_uncharge(struct sock
*sk
, int len
)
417 sk_mem_uncharge(sk
, len
);
418 sk_wmem_queued_add(sk
, -len
);
421 static void dfrag_clear(struct sock
*sk
, struct mptcp_data_frag
*dfrag
)
423 int len
= dfrag
->data_len
+ dfrag
->overhead
;
425 list_del(&dfrag
->list
);
426 dfrag_uncharge(sk
, len
);
427 put_page(dfrag
->page
);
430 static void mptcp_clean_una(struct sock
*sk
)
432 struct mptcp_sock
*msk
= mptcp_sk(sk
);
433 struct mptcp_data_frag
*dtmp
, *dfrag
;
434 u64 snd_una
= atomic64_read(&msk
->snd_una
);
435 bool cleaned
= false;
437 list_for_each_entry_safe(dfrag
, dtmp
, &msk
->rtx_queue
, list
) {
438 if (after64(dfrag
->data_seq
+ dfrag
->data_len
, snd_una
))
441 dfrag_clear(sk
, dfrag
);
445 dfrag
= mptcp_rtx_head(sk
);
446 if (dfrag
&& after64(snd_una
, dfrag
->data_seq
)) {
447 u64 delta
= dfrag
->data_seq
+ dfrag
->data_len
- snd_una
;
449 dfrag
->data_seq
+= delta
;
450 dfrag
->data_len
-= delta
;
452 dfrag_uncharge(sk
, delta
);
457 sk_mem_reclaim_partial(sk
);
459 /* Only wake up writers if a subflow is ready */
460 if (test_bit(MPTCP_SEND_SPACE
, &msk
->flags
))
461 sk_stream_write_space(sk
);
465 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
468 static bool mptcp_page_frag_refill(struct sock
*sk
, struct page_frag
*pfrag
)
470 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag
),
471 pfrag
, sk
->sk_allocation
)))
474 sk
->sk_prot
->enter_memory_pressure(sk
);
475 sk_stream_moderate_sndbuf(sk
);
479 static struct mptcp_data_frag
*
480 mptcp_carve_data_frag(const struct mptcp_sock
*msk
, struct page_frag
*pfrag
,
483 int offset
= ALIGN(orig_offset
, sizeof(long));
484 struct mptcp_data_frag
*dfrag
;
486 dfrag
= (struct mptcp_data_frag
*)(page_to_virt(pfrag
->page
) + offset
);
488 dfrag
->data_seq
= msk
->write_seq
;
489 dfrag
->overhead
= offset
- orig_offset
+ sizeof(struct mptcp_data_frag
);
490 dfrag
->offset
= offset
+ sizeof(struct mptcp_data_frag
);
491 dfrag
->page
= pfrag
->page
;
496 static int mptcp_sendmsg_frag(struct sock
*sk
, struct sock
*ssk
,
497 struct msghdr
*msg
, struct mptcp_data_frag
*dfrag
,
498 long *timeo
, int *pmss_now
,
501 int mss_now
, avail_size
, size_goal
, offset
, ret
, frag_truesize
= 0;
502 bool dfrag_collapsed
, can_collapse
= false;
503 struct mptcp_sock
*msk
= mptcp_sk(sk
);
504 struct mptcp_ext
*mpext
= NULL
;
505 bool retransmission
= !!dfrag
;
506 struct sk_buff
*skb
, *tail
;
507 struct page_frag
*pfrag
;
512 /* use the mptcp page cache so that we can easily move the data
513 * from one substream to another, but do per subflow memory accounting
514 * Note: pfrag is used only !retransmission, but the compiler if
515 * fooled into a warning if we don't init here
517 pfrag
= sk_page_frag(sk
);
518 while ((!retransmission
&& !mptcp_page_frag_refill(ssk
, pfrag
)) ||
519 !mptcp_ext_cache_refill(msk
)) {
520 ret
= sk_stream_wait_memory(ssk
, timeo
);
524 /* if sk_stream_wait_memory() sleeps snd_una can change
525 * significantly, refresh the rtx queue
529 if (unlikely(__mptcp_needs_tcp_fallback(msk
)))
532 if (!retransmission
) {
533 write_seq
= &msk
->write_seq
;
536 write_seq
= &dfrag
->data_seq
;
540 /* compute copy limit */
541 mss_now
= tcp_send_mss(ssk
, &size_goal
, msg
->msg_flags
);
543 *ps_goal
= size_goal
;
544 avail_size
= size_goal
;
545 skb
= tcp_write_queue_tail(ssk
);
547 mpext
= skb_ext_find(skb
, SKB_EXT_MPTCP
);
549 /* Limit the write to the size available in the
550 * current skb, if any, so that we create at most a new skb.
551 * Explicitly tells TCP internals to avoid collapsing on later
552 * queue management operation, to avoid breaking the ext <->
553 * SSN association set here
555 can_collapse
= (size_goal
- skb
->len
> 0) &&
556 mptcp_skb_can_collapse_to(*write_seq
, skb
, mpext
);
558 TCP_SKB_CB(skb
)->eor
= 1;
560 avail_size
= size_goal
- skb
->len
;
563 if (!retransmission
) {
564 /* reuse tail pfrag, if possible, or carve a new one from the
567 dfrag
= mptcp_rtx_tail(sk
);
568 offset
= pfrag
->offset
;
569 dfrag_collapsed
= mptcp_frag_can_collapse_to(msk
, pfrag
, dfrag
);
570 if (!dfrag_collapsed
) {
571 dfrag
= mptcp_carve_data_frag(msk
, pfrag
, offset
);
572 offset
= dfrag
->offset
;
573 frag_truesize
= dfrag
->overhead
;
575 psize
= min_t(size_t, pfrag
->size
- offset
, avail_size
);
578 pr_debug("left=%zu", msg_data_left(msg
));
579 psize
= copy_page_from_iter(pfrag
->page
, offset
,
580 min_t(size_t, msg_data_left(msg
),
583 pr_debug("left=%zu", msg_data_left(msg
));
587 if (!sk_wmem_schedule(sk
, psize
+ dfrag
->overhead
))
590 offset
= dfrag
->offset
;
591 psize
= min_t(size_t, dfrag
->data_len
, avail_size
);
594 /* tell the TCP stack to delay the push so that we can safely
595 * access the skb after the sendpages call
597 ret
= do_tcp_sendpages(ssk
, page
, offset
, psize
,
598 msg
->msg_flags
| MSG_SENDPAGE_NOTLAST
);
602 frag_truesize
+= ret
;
603 if (!retransmission
) {
604 if (unlikely(ret
< psize
))
605 iov_iter_revert(&msg
->msg_iter
, psize
- ret
);
607 /* send successful, keep track of sent data for mptcp-level
610 dfrag
->data_len
+= ret
;
611 if (!dfrag_collapsed
) {
612 get_page(dfrag
->page
);
613 list_add_tail(&dfrag
->list
, &msk
->rtx_queue
);
614 sk_wmem_queued_add(sk
, frag_truesize
);
616 sk_wmem_queued_add(sk
, ret
);
619 /* charge data on mptcp rtx queue to the master socket
620 * Note: we charge such data both to sk and ssk
622 sk
->sk_forward_alloc
-= frag_truesize
;
625 /* if the tail skb extension is still the cached one, collapsing
626 * really happened. Note: we can't check for 'same skb' as the sk_buff
627 * hdr on tail can be transmitted, freed and re-allocated by the
628 * do_tcp_sendpages() call
630 tail
= tcp_write_queue_tail(ssk
);
631 if (mpext
&& tail
&& mpext
== skb_ext_find(tail
, SKB_EXT_MPTCP
)) {
632 WARN_ON_ONCE(!can_collapse
);
633 mpext
->data_len
+= ret
;
637 skb
= tcp_write_queue_tail(ssk
);
638 mpext
= __skb_ext_set(skb
, SKB_EXT_MPTCP
, msk
->cached_ext
);
639 msk
->cached_ext
= NULL
;
641 memset(mpext
, 0, sizeof(*mpext
));
642 mpext
->data_seq
= *write_seq
;
643 mpext
->subflow_seq
= mptcp_subflow_ctx(ssk
)->rel_write_seq
;
644 mpext
->data_len
= ret
;
648 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
649 mpext
->data_seq
, mpext
->subflow_seq
, mpext
->data_len
,
654 pfrag
->offset
+= frag_truesize
;
656 mptcp_subflow_ctx(ssk
)->rel_write_seq
+= ret
;
661 static struct sock
*mptcp_subflow_get_send(struct mptcp_sock
*msk
)
663 struct mptcp_subflow_context
*subflow
;
664 struct sock
*backup
= NULL
;
666 sock_owned_by_me((const struct sock
*)msk
);
668 mptcp_for_each_subflow(msk
, subflow
) {
669 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
671 if (!sk_stream_memory_free(ssk
)) {
672 struct socket
*sock
= ssk
->sk_socket
;
675 clear_bit(MPTCP_SEND_SPACE
, &msk
->flags
);
676 smp_mb__after_atomic();
678 /* enables sk->write_space() callbacks */
679 set_bit(SOCK_NOSPACE
, &sock
->flags
);
685 if (subflow
->backup
) {
698 static void ssk_check_wmem(struct mptcp_sock
*msk
, struct sock
*ssk
)
702 if (likely(sk_stream_is_writeable(ssk
)))
705 sock
= READ_ONCE(ssk
->sk_socket
);
708 clear_bit(MPTCP_SEND_SPACE
, &msk
->flags
);
709 smp_mb__after_atomic();
710 /* set NOSPACE only after clearing SEND_SPACE flag */
711 set_bit(SOCK_NOSPACE
, &sock
->flags
);
715 static int mptcp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
717 int mss_now
= 0, size_goal
= 0, ret
= 0;
718 struct mptcp_sock
*msk
= mptcp_sk(sk
);
719 struct socket
*ssock
;
724 if (msg
->msg_flags
& ~(MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
))
729 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
731 if ((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)) {
732 ret
= sk_stream_wait_connect(sk
, &timeo
);
737 ssock
= __mptcp_tcp_fallback(msk
);
738 if (unlikely(ssock
)) {
740 pr_debug("fallback passthrough");
741 ret
= sock_sendmsg(ssock
, msg
);
742 return ret
>= 0 ? ret
+ copied
: (copied
? copied
: ret
);
747 __mptcp_flush_join_list(msk
);
748 ssk
= mptcp_subflow_get_send(msk
);
749 while (!sk_stream_memory_free(sk
) || !ssk
) {
750 ret
= sk_stream_wait_memory(sk
, &timeo
);
756 ssk
= mptcp_subflow_get_send(msk
);
757 if (list_empty(&msk
->conn_list
)) {
763 pr_debug("conn_list->subflow=%p", ssk
);
766 while (msg_data_left(msg
)) {
767 ret
= mptcp_sendmsg_frag(sk
, ssk
, msg
, NULL
, &timeo
, &mss_now
,
771 if (ret
== 0 && unlikely(__mptcp_needs_tcp_fallback(msk
))) {
773 ssock
= __mptcp_tcp_fallback(msk
);
780 mptcp_set_timeout(sk
, ssk
);
783 tcp_push(ssk
, msg
->msg_flags
, mss_now
, tcp_sk(ssk
)->nonagle
,
786 /* start the timer, if it's not pending */
787 if (!mptcp_timer_pending(sk
))
788 mptcp_reset_timer(sk
);
791 ssk_check_wmem(msk
, ssk
);
798 static void mptcp_wait_data(struct sock
*sk
, long *timeo
)
800 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
801 struct mptcp_sock
*msk
= mptcp_sk(sk
);
803 add_wait_queue(sk_sleep(sk
), &wait
);
804 sk_set_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
806 sk_wait_event(sk
, timeo
,
807 test_and_clear_bit(MPTCP_DATA_READY
, &msk
->flags
), &wait
);
809 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA
, sk
);
810 remove_wait_queue(sk_sleep(sk
), &wait
);
813 static int __mptcp_recvmsg_mskq(struct mptcp_sock
*msk
,
817 struct sock
*sk
= (struct sock
*)msk
;
821 while ((skb
= skb_peek(&sk
->sk_receive_queue
)) != NULL
) {
822 u32 offset
= MPTCP_SKB_CB(skb
)->offset
;
823 u32 data_len
= skb
->len
- offset
;
824 u32 count
= min_t(size_t, len
- copied
, data_len
);
827 err
= skb_copy_datagram_msg(skb
, offset
, msg
, count
);
828 if (unlikely(err
< 0)) {
836 if (count
< data_len
) {
837 MPTCP_SKB_CB(skb
)->offset
+= count
;
841 __skb_unlink(skb
, &sk
->sk_receive_queue
);
851 static bool __mptcp_move_skbs(struct mptcp_sock
*msk
)
853 unsigned int moved
= 0;
857 struct sock
*ssk
= mptcp_subflow_recv_lookup(msk
);
863 done
= __mptcp_move_skbs_from_subflow(msk
, ssk
, &moved
);
870 static int mptcp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
871 int nonblock
, int flags
, int *addr_len
)
873 struct mptcp_sock
*msk
= mptcp_sk(sk
);
874 struct socket
*ssock
;
879 if (msg
->msg_flags
& ~(MSG_WAITALL
| MSG_DONTWAIT
))
883 ssock
= __mptcp_tcp_fallback(msk
);
884 if (unlikely(ssock
)) {
886 pr_debug("fallback-read subflow=%p",
887 mptcp_subflow_ctx(ssock
->sk
));
888 copied
= sock_recvmsg(ssock
, msg
, flags
);
892 timeo
= sock_rcvtimeo(sk
, nonblock
);
894 len
= min_t(size_t, len
, INT_MAX
);
895 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
896 __mptcp_flush_join_list(msk
);
898 while (len
> (size_t)copied
) {
901 bytes_read
= __mptcp_recvmsg_mskq(msk
, msg
, len
- copied
);
902 if (unlikely(bytes_read
< 0)) {
908 copied
+= bytes_read
;
910 if (skb_queue_empty(&sk
->sk_receive_queue
) &&
911 __mptcp_move_skbs(msk
))
914 /* only the master socket status is relevant here. The exit
915 * conditions mirror closely tcp_recvmsg()
917 if (copied
>= target
)
922 sk
->sk_state
== TCP_CLOSE
||
923 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
925 signal_pending(current
))
929 copied
= sock_error(sk
);
933 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
936 if (sk
->sk_state
== TCP_CLOSE
) {
946 if (signal_pending(current
)) {
947 copied
= sock_intr_errno(timeo
);
952 pr_debug("block timeout %ld", timeo
);
953 mptcp_wait_data(sk
, &timeo
);
954 if (unlikely(__mptcp_tcp_fallback(msk
)))
958 if (skb_queue_empty(&sk
->sk_receive_queue
)) {
959 /* entire backlog drained, clear DATA_READY. */
960 clear_bit(MPTCP_DATA_READY
, &msk
->flags
);
962 /* .. race-breaker: ssk might have gotten new data
963 * after last __mptcp_move_skbs() returned false.
965 if (unlikely(__mptcp_move_skbs(msk
)))
966 set_bit(MPTCP_DATA_READY
, &msk
->flags
);
967 } else if (unlikely(!test_bit(MPTCP_DATA_READY
, &msk
->flags
))) {
968 /* data to read but mptcp_wait_data() cleared DATA_READY */
969 set_bit(MPTCP_DATA_READY
, &msk
->flags
);
976 static void mptcp_retransmit_handler(struct sock
*sk
)
978 struct mptcp_sock
*msk
= mptcp_sk(sk
);
980 if (atomic64_read(&msk
->snd_una
) == msk
->write_seq
) {
981 mptcp_stop_timer(sk
);
983 set_bit(MPTCP_WORK_RTX
, &msk
->flags
);
984 if (schedule_work(&msk
->work
))
989 static void mptcp_retransmit_timer(struct timer_list
*t
)
991 struct inet_connection_sock
*icsk
= from_timer(icsk
, t
,
992 icsk_retransmit_timer
);
993 struct sock
*sk
= &icsk
->icsk_inet
.sk
;
996 if (!sock_owned_by_user(sk
)) {
997 mptcp_retransmit_handler(sk
);
999 /* delegate our work to tcp_release_cb() */
1000 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED
,
1008 /* Find an idle subflow. Return NULL if there is unacked data at tcp
1011 * A backup subflow is returned only if that is the only kind available.
1013 static struct sock
*mptcp_subflow_get_retrans(const struct mptcp_sock
*msk
)
1015 struct mptcp_subflow_context
*subflow
;
1016 struct sock
*backup
= NULL
;
1018 sock_owned_by_me((const struct sock
*)msk
);
1020 mptcp_for_each_subflow(msk
, subflow
) {
1021 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
1023 /* still data outstanding at TCP level? Don't retransmit. */
1024 if (!tcp_write_queue_empty(ssk
))
1027 if (subflow
->backup
) {
1039 /* subflow sockets can be either outgoing (connect) or incoming
1042 * Outgoing subflows use in-kernel sockets.
1043 * Incoming subflows do not have their own 'struct socket' allocated,
1044 * so we need to use tcp_close() after detaching them from the mptcp
1047 static void __mptcp_close_ssk(struct sock
*sk
, struct sock
*ssk
,
1048 struct mptcp_subflow_context
*subflow
,
1051 struct socket
*sock
= READ_ONCE(ssk
->sk_socket
);
1053 list_del(&subflow
->node
);
1055 if (sock
&& sock
!= sk
->sk_socket
) {
1056 /* outgoing subflow */
1059 /* incoming subflow */
1060 tcp_close(ssk
, timeout
);
1064 static unsigned int mptcp_sync_mss(struct sock
*sk
, u32 pmtu
)
1069 static void mptcp_check_for_eof(struct mptcp_sock
*msk
)
1071 struct mptcp_subflow_context
*subflow
;
1072 struct sock
*sk
= (struct sock
*)msk
;
1075 mptcp_for_each_subflow(msk
, subflow
)
1076 receivers
+= !subflow
->rx_eof
;
1078 if (!receivers
&& !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
1079 /* hopefully temporary hack: propagate shutdown status
1080 * to msk, when all subflows agree on it
1082 sk
->sk_shutdown
|= RCV_SHUTDOWN
;
1084 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
1085 set_bit(MPTCP_DATA_READY
, &msk
->flags
);
1086 sk
->sk_data_ready(sk
);
1090 static void mptcp_worker(struct work_struct
*work
)
1092 struct mptcp_sock
*msk
= container_of(work
, struct mptcp_sock
, work
);
1093 struct sock
*ssk
, *sk
= &msk
->sk
.icsk_inet
.sk
;
1094 int orig_len
, orig_offset
, ret
, mss_now
= 0, size_goal
= 0;
1095 struct mptcp_data_frag
*dfrag
;
1102 mptcp_clean_una(sk
);
1103 __mptcp_flush_join_list(msk
);
1104 __mptcp_move_skbs(msk
);
1106 if (test_and_clear_bit(MPTCP_WORK_EOF
, &msk
->flags
))
1107 mptcp_check_for_eof(msk
);
1109 if (!test_and_clear_bit(MPTCP_WORK_RTX
, &msk
->flags
))
1112 dfrag
= mptcp_rtx_head(sk
);
1116 ssk
= mptcp_subflow_get_retrans(msk
);
1122 msg
.msg_flags
= MSG_DONTWAIT
;
1123 orig_len
= dfrag
->data_len
;
1124 orig_offset
= dfrag
->offset
;
1125 orig_write_seq
= dfrag
->data_seq
;
1126 while (dfrag
->data_len
> 0) {
1127 ret
= mptcp_sendmsg_frag(sk
, ssk
, &msg
, dfrag
, &timeo
, &mss_now
,
1132 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_RETRANSSEGS
);
1134 dfrag
->data_len
-= ret
;
1135 dfrag
->offset
+= ret
;
1138 tcp_push(ssk
, msg
.msg_flags
, mss_now
, tcp_sk(ssk
)->nonagle
,
1141 dfrag
->data_seq
= orig_write_seq
;
1142 dfrag
->offset
= orig_offset
;
1143 dfrag
->data_len
= orig_len
;
1145 mptcp_set_timeout(sk
, ssk
);
1149 if (!mptcp_timer_pending(sk
))
1150 mptcp_reset_timer(sk
);
1157 static int __mptcp_init_sock(struct sock
*sk
)
1159 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1161 spin_lock_init(&msk
->join_list_lock
);
1163 INIT_LIST_HEAD(&msk
->conn_list
);
1164 INIT_LIST_HEAD(&msk
->join_list
);
1165 INIT_LIST_HEAD(&msk
->rtx_queue
);
1166 __set_bit(MPTCP_SEND_SPACE
, &msk
->flags
);
1167 INIT_WORK(&msk
->work
, mptcp_worker
);
1170 inet_csk(sk
)->icsk_sync_mss
= mptcp_sync_mss
;
1172 mptcp_pm_data_init(msk
);
1174 /* re-use the csk retrans timer for MPTCP-level retrans */
1175 timer_setup(&msk
->sk
.icsk_retransmit_timer
, mptcp_retransmit_timer
, 0);
1180 static int mptcp_init_sock(struct sock
*sk
)
1182 struct net
*net
= sock_net(sk
);
1185 if (!mptcp_is_enabled(net
))
1186 return -ENOPROTOOPT
;
1188 if (unlikely(!net
->mib
.mptcp_statistics
) && !mptcp_mib_alloc(net
))
1191 ret
= __mptcp_init_sock(sk
);
1195 sk_sockets_allocated_inc(sk
);
1196 sk
->sk_sndbuf
= sock_net(sk
)->ipv4
.sysctl_tcp_wmem
[2];
1201 static void __mptcp_clear_xmit(struct sock
*sk
)
1203 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1204 struct mptcp_data_frag
*dtmp
, *dfrag
;
1206 sk_stop_timer(sk
, &msk
->sk
.icsk_retransmit_timer
);
1208 list_for_each_entry_safe(dfrag
, dtmp
, &msk
->rtx_queue
, list
)
1209 dfrag_clear(sk
, dfrag
);
1212 static void mptcp_cancel_work(struct sock
*sk
)
1214 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1216 if (cancel_work_sync(&msk
->work
))
1220 static void mptcp_subflow_shutdown(struct sock
*ssk
, int how
,
1221 bool data_fin_tx_enable
, u64 data_fin_tx_seq
)
1225 switch (ssk
->sk_state
) {
1227 if (!(how
& RCV_SHUTDOWN
))
1231 tcp_disconnect(ssk
, O_NONBLOCK
);
1234 if (data_fin_tx_enable
) {
1235 struct mptcp_subflow_context
*subflow
;
1237 subflow
= mptcp_subflow_ctx(ssk
);
1238 subflow
->data_fin_tx_seq
= data_fin_tx_seq
;
1239 subflow
->data_fin_tx_enable
= 1;
1242 ssk
->sk_shutdown
|= how
;
1243 tcp_shutdown(ssk
, how
);
1247 /* Wake up anyone sleeping in poll. */
1248 ssk
->sk_state_change(ssk
);
1252 /* Called with msk lock held, releases such lock before returning */
1253 static void mptcp_close(struct sock
*sk
, long timeout
)
1255 struct mptcp_subflow_context
*subflow
, *tmp
;
1256 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1257 LIST_HEAD(conn_list
);
1258 u64 data_fin_tx_seq
;
1262 mptcp_token_destroy(msk
->token
);
1263 inet_sk_state_store(sk
, TCP_CLOSE
);
1265 __mptcp_flush_join_list(msk
);
1267 list_splice_init(&msk
->conn_list
, &conn_list
);
1269 data_fin_tx_seq
= msk
->write_seq
;
1271 __mptcp_clear_xmit(sk
);
1275 list_for_each_entry_safe(subflow
, tmp
, &conn_list
, node
) {
1276 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
1278 subflow
->data_fin_tx_seq
= data_fin_tx_seq
;
1279 subflow
->data_fin_tx_enable
= 1;
1280 __mptcp_close_ssk(sk
, ssk
, subflow
, timeout
);
1283 mptcp_cancel_work(sk
);
1284 mptcp_pm_close(msk
);
1286 __skb_queue_purge(&sk
->sk_receive_queue
);
1288 sk_common_release(sk
);
1291 static void mptcp_copy_inaddrs(struct sock
*msk
, const struct sock
*ssk
)
1293 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1294 const struct ipv6_pinfo
*ssk6
= inet6_sk(ssk
);
1295 struct ipv6_pinfo
*msk6
= inet6_sk(msk
);
1297 msk
->sk_v6_daddr
= ssk
->sk_v6_daddr
;
1298 msk
->sk_v6_rcv_saddr
= ssk
->sk_v6_rcv_saddr
;
1301 msk6
->saddr
= ssk6
->saddr
;
1302 msk6
->flow_label
= ssk6
->flow_label
;
1306 inet_sk(msk
)->inet_num
= inet_sk(ssk
)->inet_num
;
1307 inet_sk(msk
)->inet_dport
= inet_sk(ssk
)->inet_dport
;
1308 inet_sk(msk
)->inet_sport
= inet_sk(ssk
)->inet_sport
;
1309 inet_sk(msk
)->inet_daddr
= inet_sk(ssk
)->inet_daddr
;
1310 inet_sk(msk
)->inet_saddr
= inet_sk(ssk
)->inet_saddr
;
1311 inet_sk(msk
)->inet_rcv_saddr
= inet_sk(ssk
)->inet_rcv_saddr
;
1314 static int mptcp_disconnect(struct sock
*sk
, int flags
)
1317 __mptcp_clear_xmit(sk
);
1319 mptcp_cancel_work(sk
);
1320 return tcp_disconnect(sk
, flags
);
1323 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1324 static struct ipv6_pinfo
*mptcp_inet6_sk(const struct sock
*sk
)
1326 unsigned int offset
= sizeof(struct mptcp6_sock
) - sizeof(struct ipv6_pinfo
);
1328 return (struct ipv6_pinfo
*)(((u8
*)sk
) + offset
);
1332 struct sock
*mptcp_sk_clone(const struct sock
*sk
, struct request_sock
*req
)
1334 struct mptcp_subflow_request_sock
*subflow_req
= mptcp_subflow_rsk(req
);
1335 struct sock
*nsk
= sk_clone_lock(sk
, GFP_ATOMIC
);
1336 struct mptcp_sock
*msk
;
1342 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1343 if (nsk
->sk_family
== AF_INET6
)
1344 inet_sk(nsk
)->pinet6
= mptcp_inet6_sk(nsk
);
1347 __mptcp_init_sock(nsk
);
1349 msk
= mptcp_sk(nsk
);
1350 msk
->local_key
= subflow_req
->local_key
;
1351 msk
->token
= subflow_req
->token
;
1352 msk
->subflow
= NULL
;
1354 if (unlikely(mptcp_token_new_accept(subflow_req
->token
, nsk
))) {
1355 bh_unlock_sock(nsk
);
1357 /* we can't call into mptcp_close() here - possible BH context
1358 * free the sock directly
1360 nsk
->sk_prot
->destroy(nsk
);
1365 msk
->write_seq
= subflow_req
->idsn
+ 1;
1366 atomic64_set(&msk
->snd_una
, msk
->write_seq
);
1367 if (subflow_req
->remote_key_valid
) {
1368 msk
->can_ack
= true;
1369 msk
->remote_key
= subflow_req
->remote_key
;
1370 mptcp_crypto_key_sha(msk
->remote_key
, NULL
, &ack_seq
);
1372 msk
->ack_seq
= ack_seq
;
1375 /* will be fully established after successful MPC subflow creation */
1376 inet_sk_state_store(nsk
, TCP_SYN_RECV
);
1377 bh_unlock_sock(nsk
);
1379 /* keep a single reference */
1384 static struct sock
*mptcp_accept(struct sock
*sk
, int flags
, int *err
,
1387 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1388 struct socket
*listener
;
1391 listener
= __mptcp_nmpc_socket(msk
);
1392 if (WARN_ON_ONCE(!listener
)) {
1397 pr_debug("msk=%p, listener=%p", msk
, mptcp_subflow_ctx(listener
->sk
));
1398 newsk
= inet_csk_accept(listener
->sk
, flags
, err
, kern
);
1402 pr_debug("msk=%p, subflow is mptcp=%d", msk
, sk_is_mptcp(newsk
));
1404 if (sk_is_mptcp(newsk
)) {
1405 struct mptcp_subflow_context
*subflow
;
1406 struct sock
*new_mptcp_sock
;
1407 struct sock
*ssk
= newsk
;
1409 subflow
= mptcp_subflow_ctx(newsk
);
1410 new_mptcp_sock
= subflow
->conn
;
1412 /* is_mptcp should be false if subflow->conn is missing, see
1413 * subflow_syn_recv_sock()
1415 if (WARN_ON_ONCE(!new_mptcp_sock
)) {
1416 tcp_sk(newsk
)->is_mptcp
= 0;
1420 /* acquire the 2nd reference for the owning socket */
1421 sock_hold(new_mptcp_sock
);
1424 bh_lock_sock(new_mptcp_sock
);
1425 msk
= mptcp_sk(new_mptcp_sock
);
1428 newsk
= new_mptcp_sock
;
1429 mptcp_copy_inaddrs(newsk
, ssk
);
1430 list_add(&subflow
->node
, &msk
->conn_list
);
1432 bh_unlock_sock(new_mptcp_sock
);
1434 __MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_MPCAPABLEPASSIVEACK
);
1437 MPTCP_INC_STATS(sock_net(sk
),
1438 MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK
);
1444 static void mptcp_destroy(struct sock
*sk
)
1446 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1448 if (msk
->cached_ext
)
1449 __skb_ext_put(msk
->cached_ext
);
1451 sk_sockets_allocated_dec(sk
);
1454 static int mptcp_setsockopt(struct sock
*sk
, int level
, int optname
,
1455 char __user
*optval
, unsigned int optlen
)
1457 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1458 struct socket
*ssock
;
1460 pr_debug("msk=%p", msk
);
1462 /* @@ the meaning of setsockopt() when the socket is connected and
1463 * there are multiple subflows is not yet defined. It is up to the
1464 * MPTCP-level socket to configure the subflows until the subflow
1465 * is in TCP fallback, when TCP socket options are passed through
1466 * to the one remaining subflow.
1469 ssock
= __mptcp_tcp_fallback(msk
);
1471 return tcp_setsockopt(ssock
->sk
, level
, optname
, optval
,
1479 static int mptcp_getsockopt(struct sock
*sk
, int level
, int optname
,
1480 char __user
*optval
, int __user
*option
)
1482 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1483 struct socket
*ssock
;
1485 pr_debug("msk=%p", msk
);
1487 /* @@ the meaning of setsockopt() when the socket is connected and
1488 * there are multiple subflows is not yet defined. It is up to the
1489 * MPTCP-level socket to configure the subflows until the subflow
1490 * is in TCP fallback, when socket options are passed through
1491 * to the one remaining subflow.
1494 ssock
= __mptcp_tcp_fallback(msk
);
1496 return tcp_getsockopt(ssock
->sk
, level
, optname
, optval
,
1504 #define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \
1505 TCPF_WRITE_TIMER_DEFERRED)
1507 /* this is very alike tcp_release_cb() but we must handle differently a
1508 * different set of events
1510 static void mptcp_release_cb(struct sock
*sk
)
1512 unsigned long flags
, nflags
;
1515 flags
= sk
->sk_tsq_flags
;
1516 if (!(flags
& MPTCP_DEFERRED_ALL
))
1518 nflags
= flags
& ~MPTCP_DEFERRED_ALL
;
1519 } while (cmpxchg(&sk
->sk_tsq_flags
, flags
, nflags
) != flags
);
1521 sock_release_ownership(sk
);
1523 if (flags
& TCPF_DELACK_TIMER_DEFERRED
) {
1524 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1527 ssk
= mptcp_subflow_recv_lookup(msk
);
1528 if (!ssk
|| !schedule_work(&msk
->work
))
1532 if (flags
& TCPF_WRITE_TIMER_DEFERRED
) {
1533 mptcp_retransmit_handler(sk
);
1538 static int mptcp_get_port(struct sock
*sk
, unsigned short snum
)
1540 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1541 struct socket
*ssock
;
1543 ssock
= __mptcp_nmpc_socket(msk
);
1544 pr_debug("msk=%p, subflow=%p", msk
, ssock
);
1545 if (WARN_ON_ONCE(!ssock
))
1548 return inet_csk_get_port(ssock
->sk
, snum
);
1551 void mptcp_finish_connect(struct sock
*ssk
)
1553 struct mptcp_subflow_context
*subflow
;
1554 struct mptcp_sock
*msk
;
1558 subflow
= mptcp_subflow_ctx(ssk
);
1562 if (!subflow
->mp_capable
) {
1563 MPTCP_INC_STATS(sock_net(sk
),
1564 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK
);
1568 pr_debug("msk=%p, token=%u", sk
, subflow
->token
);
1570 mptcp_crypto_key_sha(subflow
->remote_key
, NULL
, &ack_seq
);
1572 subflow
->map_seq
= ack_seq
;
1573 subflow
->map_subflow_seq
= 1;
1574 subflow
->rel_write_seq
= 1;
1576 /* the socket is not connected yet, no msk/subflow ops can access/race
1577 * accessing the field below
1579 WRITE_ONCE(msk
->remote_key
, subflow
->remote_key
);
1580 WRITE_ONCE(msk
->local_key
, subflow
->local_key
);
1581 WRITE_ONCE(msk
->token
, subflow
->token
);
1582 WRITE_ONCE(msk
->write_seq
, subflow
->idsn
+ 1);
1583 WRITE_ONCE(msk
->ack_seq
, ack_seq
);
1584 WRITE_ONCE(msk
->can_ack
, 1);
1585 atomic64_set(&msk
->snd_una
, msk
->write_seq
);
1587 mptcp_pm_new_connection(msk
, 0);
1590 static void mptcp_sock_graft(struct sock
*sk
, struct socket
*parent
)
1592 write_lock_bh(&sk
->sk_callback_lock
);
1593 rcu_assign_pointer(sk
->sk_wq
, &parent
->wq
);
1594 sk_set_socket(sk
, parent
);
1595 sk
->sk_uid
= SOCK_INODE(parent
)->i_uid
;
1596 write_unlock_bh(&sk
->sk_callback_lock
);
1599 bool mptcp_finish_join(struct sock
*sk
)
1601 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(sk
);
1602 struct mptcp_sock
*msk
= mptcp_sk(subflow
->conn
);
1603 struct sock
*parent
= (void *)msk
;
1604 struct socket
*parent_sock
;
1607 pr_debug("msk=%p, subflow=%p", msk
, subflow
);
1609 /* mptcp socket already closing? */
1610 if (inet_sk_state_load(parent
) != TCP_ESTABLISHED
)
1613 if (!msk
->pm
.server_side
)
1616 /* passive connection, attach to msk socket */
1617 parent_sock
= READ_ONCE(parent
->sk_socket
);
1618 if (parent_sock
&& !sk
->sk_socket
)
1619 mptcp_sock_graft(sk
, parent_sock
);
1621 ret
= mptcp_pm_allow_new_subflow(msk
);
1623 /* active connections are already on conn_list */
1624 spin_lock_bh(&msk
->join_list_lock
);
1625 if (!WARN_ON_ONCE(!list_empty(&subflow
->node
)))
1626 list_add_tail(&subflow
->node
, &msk
->join_list
);
1627 spin_unlock_bh(&msk
->join_list_lock
);
1632 bool mptcp_sk_is_subflow(const struct sock
*sk
)
1634 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(sk
);
1636 return subflow
->mp_join
== 1;
1639 static bool mptcp_memory_free(const struct sock
*sk
, int wake
)
1641 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1643 return wake
? test_bit(MPTCP_SEND_SPACE
, &msk
->flags
) : true;
1646 static struct proto mptcp_prot
= {
1648 .owner
= THIS_MODULE
,
1649 .init
= mptcp_init_sock
,
1650 .disconnect
= mptcp_disconnect
,
1651 .close
= mptcp_close
,
1652 .accept
= mptcp_accept
,
1653 .setsockopt
= mptcp_setsockopt
,
1654 .getsockopt
= mptcp_getsockopt
,
1655 .shutdown
= tcp_shutdown
,
1656 .destroy
= mptcp_destroy
,
1657 .sendmsg
= mptcp_sendmsg
,
1658 .recvmsg
= mptcp_recvmsg
,
1659 .release_cb
= mptcp_release_cb
,
1661 .unhash
= inet_unhash
,
1662 .get_port
= mptcp_get_port
,
1663 .sockets_allocated
= &mptcp_sockets_allocated
,
1664 .memory_allocated
= &tcp_memory_allocated
,
1665 .memory_pressure
= &tcp_memory_pressure
,
1666 .stream_memory_free
= mptcp_memory_free
,
1667 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_wmem
),
1668 .sysctl_mem
= sysctl_tcp_mem
,
1669 .obj_size
= sizeof(struct mptcp_sock
),
1670 .no_autobind
= true,
1673 static int mptcp_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
1675 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
1676 struct socket
*ssock
;
1679 lock_sock(sock
->sk
);
1680 ssock
= __mptcp_socket_create(msk
, MPTCP_SAME_STATE
);
1681 if (IS_ERR(ssock
)) {
1682 err
= PTR_ERR(ssock
);
1686 err
= ssock
->ops
->bind(ssock
, uaddr
, addr_len
);
1688 mptcp_copy_inaddrs(sock
->sk
, ssock
->sk
);
1691 release_sock(sock
->sk
);
1695 static int mptcp_stream_connect(struct socket
*sock
, struct sockaddr
*uaddr
,
1696 int addr_len
, int flags
)
1698 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
1699 struct socket
*ssock
;
1702 lock_sock(sock
->sk
);
1703 ssock
= __mptcp_socket_create(msk
, TCP_SYN_SENT
);
1704 if (IS_ERR(ssock
)) {
1705 err
= PTR_ERR(ssock
);
1709 #ifdef CONFIG_TCP_MD5SIG
1710 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
1713 if (rcu_access_pointer(tcp_sk(ssock
->sk
)->md5sig_info
))
1714 mptcp_subflow_ctx(ssock
->sk
)->request_mptcp
= 0;
1717 err
= ssock
->ops
->connect(ssock
, uaddr
, addr_len
, flags
);
1718 inet_sk_state_store(sock
->sk
, inet_sk_state_load(ssock
->sk
));
1719 mptcp_copy_inaddrs(sock
->sk
, ssock
->sk
);
1722 release_sock(sock
->sk
);
1726 static int mptcp_v4_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
1729 if (sock
->sk
->sk_prot
== &tcp_prot
) {
1730 /* we are being invoked from __sys_accept4, after
1731 * mptcp_accept() has just accepted a non-mp-capable
1732 * flow: sk is a tcp_sk, not an mptcp one.
1734 * Hand the socket over to tcp so all further socket ops
1737 sock
->ops
= &inet_stream_ops
;
1740 return inet_getname(sock
, uaddr
, peer
);
1743 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1744 static int mptcp_v6_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
1747 if (sock
->sk
->sk_prot
== &tcpv6_prot
) {
1748 /* we are being invoked from __sys_accept4 after
1749 * mptcp_accept() has accepted a non-mp-capable
1750 * subflow: sk is a tcp_sk, not mptcp.
1752 * Hand the socket over to tcp so all further
1753 * socket ops bypass mptcp.
1755 sock
->ops
= &inet6_stream_ops
;
1758 return inet6_getname(sock
, uaddr
, peer
);
1762 static int mptcp_listen(struct socket
*sock
, int backlog
)
1764 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
1765 struct socket
*ssock
;
1768 pr_debug("msk=%p", msk
);
1770 lock_sock(sock
->sk
);
1771 ssock
= __mptcp_socket_create(msk
, TCP_LISTEN
);
1772 if (IS_ERR(ssock
)) {
1773 err
= PTR_ERR(ssock
);
1777 err
= ssock
->ops
->listen(ssock
, backlog
);
1778 inet_sk_state_store(sock
->sk
, inet_sk_state_load(ssock
->sk
));
1780 mptcp_copy_inaddrs(sock
->sk
, ssock
->sk
);
1783 release_sock(sock
->sk
);
1787 static bool is_tcp_proto(const struct proto
*p
)
1789 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1790 return p
== &tcp_prot
|| p
== &tcpv6_prot
;
1792 return p
== &tcp_prot
;
1796 static int mptcp_stream_accept(struct socket
*sock
, struct socket
*newsock
,
1797 int flags
, bool kern
)
1799 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
1800 struct socket
*ssock
;
1803 pr_debug("msk=%p", msk
);
1805 lock_sock(sock
->sk
);
1806 if (sock
->sk
->sk_state
!= TCP_LISTEN
)
1809 ssock
= __mptcp_nmpc_socket(msk
);
1813 sock_hold(ssock
->sk
);
1814 release_sock(sock
->sk
);
1816 err
= ssock
->ops
->accept(sock
, newsock
, flags
, kern
);
1817 if (err
== 0 && !is_tcp_proto(newsock
->sk
->sk_prot
)) {
1818 struct mptcp_sock
*msk
= mptcp_sk(newsock
->sk
);
1819 struct mptcp_subflow_context
*subflow
;
1821 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
1822 * This is needed so NOSPACE flag can be set from tcp stack.
1824 __mptcp_flush_join_list(msk
);
1825 list_for_each_entry(subflow
, &msk
->conn_list
, node
) {
1826 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
1828 if (!ssk
->sk_socket
)
1829 mptcp_sock_graft(ssk
, newsock
);
1833 sock_put(ssock
->sk
);
1837 release_sock(sock
->sk
);
1841 static __poll_t
mptcp_poll(struct file
*file
, struct socket
*sock
,
1842 struct poll_table_struct
*wait
)
1844 struct sock
*sk
= sock
->sk
;
1845 struct mptcp_sock
*msk
;
1846 struct socket
*ssock
;
1851 ssock
= __mptcp_tcp_fallback(msk
);
1853 ssock
= __mptcp_nmpc_socket(msk
);
1855 mask
= ssock
->ops
->poll(file
, ssock
, wait
);
1861 sock_poll_wait(file
, sock
, wait
);
1864 if (test_bit(MPTCP_DATA_READY
, &msk
->flags
))
1865 mask
= EPOLLIN
| EPOLLRDNORM
;
1866 if (sk_stream_is_writeable(sk
) &&
1867 test_bit(MPTCP_SEND_SPACE
, &msk
->flags
))
1868 mask
|= EPOLLOUT
| EPOLLWRNORM
;
1869 if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1870 mask
|= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
1877 static int mptcp_shutdown(struct socket
*sock
, int how
)
1879 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
1880 struct mptcp_subflow_context
*subflow
;
1881 struct socket
*ssock
;
1884 pr_debug("sk=%p, how=%d", msk
, how
);
1886 lock_sock(sock
->sk
);
1887 ssock
= __mptcp_tcp_fallback(msk
);
1889 release_sock(sock
->sk
);
1890 return inet_shutdown(ssock
, how
);
1893 if (how
== SHUT_WR
|| how
== SHUT_RDWR
)
1894 inet_sk_state_store(sock
->sk
, TCP_FIN_WAIT1
);
1898 if ((how
& ~SHUTDOWN_MASK
) || !how
) {
1903 if (sock
->state
== SS_CONNECTING
) {
1904 if ((1 << sock
->sk
->sk_state
) &
1905 (TCPF_SYN_SENT
| TCPF_SYN_RECV
| TCPF_CLOSE
))
1906 sock
->state
= SS_DISCONNECTING
;
1908 sock
->state
= SS_CONNECTED
;
1911 __mptcp_flush_join_list(msk
);
1912 mptcp_for_each_subflow(msk
, subflow
) {
1913 struct sock
*tcp_sk
= mptcp_subflow_tcp_sock(subflow
);
1915 mptcp_subflow_shutdown(tcp_sk
, how
, 1, msk
->write_seq
);
1919 release_sock(sock
->sk
);
1924 static const struct proto_ops mptcp_stream_ops
= {
1926 .owner
= THIS_MODULE
,
1927 .release
= inet_release
,
1929 .connect
= mptcp_stream_connect
,
1930 .socketpair
= sock_no_socketpair
,
1931 .accept
= mptcp_stream_accept
,
1932 .getname
= mptcp_v4_getname
,
1934 .ioctl
= inet_ioctl
,
1935 .gettstamp
= sock_gettstamp
,
1936 .listen
= mptcp_listen
,
1937 .shutdown
= mptcp_shutdown
,
1938 .setsockopt
= sock_common_setsockopt
,
1939 .getsockopt
= sock_common_getsockopt
,
1940 .sendmsg
= inet_sendmsg
,
1941 .recvmsg
= inet_recvmsg
,
1942 .mmap
= sock_no_mmap
,
1943 .sendpage
= inet_sendpage
,
1944 #ifdef CONFIG_COMPAT
1945 .compat_setsockopt
= compat_sock_common_setsockopt
,
1946 .compat_getsockopt
= compat_sock_common_getsockopt
,
1950 static struct inet_protosw mptcp_protosw
= {
1951 .type
= SOCK_STREAM
,
1952 .protocol
= IPPROTO_MPTCP
,
1953 .prot
= &mptcp_prot
,
1954 .ops
= &mptcp_stream_ops
,
1955 .flags
= INET_PROTOSW_ICSK
,
1958 void mptcp_proto_init(void)
1960 mptcp_prot
.h
.hashinfo
= tcp_prot
.h
.hashinfo
;
1962 if (percpu_counter_init(&mptcp_sockets_allocated
, 0, GFP_KERNEL
))
1963 panic("Failed to allocate MPTCP pcpu counter\n");
1965 mptcp_subflow_init();
1968 if (proto_register(&mptcp_prot
, 1) != 0)
1969 panic("Failed to register MPTCP proto.\n");
1971 inet_register_protosw(&mptcp_protosw
);
1973 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb
) > sizeof_field(struct sk_buff
, cb
));
1976 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1977 static const struct proto_ops mptcp_v6_stream_ops
= {
1979 .owner
= THIS_MODULE
,
1980 .release
= inet6_release
,
1982 .connect
= mptcp_stream_connect
,
1983 .socketpair
= sock_no_socketpair
,
1984 .accept
= mptcp_stream_accept
,
1985 .getname
= mptcp_v6_getname
,
1987 .ioctl
= inet6_ioctl
,
1988 .gettstamp
= sock_gettstamp
,
1989 .listen
= mptcp_listen
,
1990 .shutdown
= mptcp_shutdown
,
1991 .setsockopt
= sock_common_setsockopt
,
1992 .getsockopt
= sock_common_getsockopt
,
1993 .sendmsg
= inet6_sendmsg
,
1994 .recvmsg
= inet6_recvmsg
,
1995 .mmap
= sock_no_mmap
,
1996 .sendpage
= inet_sendpage
,
1997 #ifdef CONFIG_COMPAT
1998 .compat_setsockopt
= compat_sock_common_setsockopt
,
1999 .compat_getsockopt
= compat_sock_common_getsockopt
,
2003 static struct proto mptcp_v6_prot
;
2005 static void mptcp_v6_destroy(struct sock
*sk
)
2008 inet6_destroy_sock(sk
);
2011 static struct inet_protosw mptcp_v6_protosw
= {
2012 .type
= SOCK_STREAM
,
2013 .protocol
= IPPROTO_MPTCP
,
2014 .prot
= &mptcp_v6_prot
,
2015 .ops
= &mptcp_v6_stream_ops
,
2016 .flags
= INET_PROTOSW_ICSK
,
2019 int mptcp_proto_v6_init(void)
2023 mptcp_v6_prot
= mptcp_prot
;
2024 strcpy(mptcp_v6_prot
.name
, "MPTCPv6");
2025 mptcp_v6_prot
.slab
= NULL
;
2026 mptcp_v6_prot
.destroy
= mptcp_v6_destroy
;
2027 mptcp_v6_prot
.obj_size
= sizeof(struct mptcp6_sock
);
2029 err
= proto_register(&mptcp_v6_prot
, 1);
2033 err
= inet6_register_protosw(&mptcp_v6_protosw
);
2035 proto_unregister(&mptcp_v6_prot
);