1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #include <net/tcp_states.h>
20 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
21 #include <net/transp_v6.h>
23 #include <net/mptcp.h>
25 #include <asm/ioctls.h>
29 #define CREATE_TRACE_POINTS
30 #include <trace/events/mptcp.h>
32 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
34 struct mptcp_sock msk
;
40 MPTCP_CMSG_TS
= BIT(0),
41 MPTCP_CMSG_INQ
= BIT(1),
44 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp
;
46 static void __mptcp_destroy_sock(struct sock
*sk
);
47 static void mptcp_check_send_data_fin(struct sock
*sk
);
49 DEFINE_PER_CPU(struct mptcp_delegated_action
, mptcp_delegated_actions
);
50 static struct net_device mptcp_napi_dev
;
52 /* Returns end sequence number of the receiver's advertised window */
53 static u64
mptcp_wnd_end(const struct mptcp_sock
*msk
)
55 return READ_ONCE(msk
->wnd_end
);
58 static const struct proto_ops
*mptcp_fallback_tcp_ops(const struct sock
*sk
)
60 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
61 if (sk
->sk_prot
== &tcpv6_prot
)
62 return &inet6_stream_ops
;
64 WARN_ON_ONCE(sk
->sk_prot
!= &tcp_prot
);
65 return &inet_stream_ops
;
68 static int __mptcp_socket_create(struct mptcp_sock
*msk
)
70 struct mptcp_subflow_context
*subflow
;
71 struct sock
*sk
= (struct sock
*)msk
;
75 err
= mptcp_subflow_create_socket(sk
, sk
->sk_family
, &ssock
);
79 msk
->scaling_ratio
= tcp_sk(ssock
->sk
)->scaling_ratio
;
80 WRITE_ONCE(msk
->first
, ssock
->sk
);
81 subflow
= mptcp_subflow_ctx(ssock
->sk
);
82 list_add(&subflow
->node
, &msk
->conn_list
);
84 subflow
->request_mptcp
= 1;
85 subflow
->subflow_id
= msk
->subflow_id
++;
87 /* This is the first subflow, always with id 0 */
88 subflow
->local_id_valid
= 1;
89 mptcp_sock_graft(msk
->first
, sk
->sk_socket
);
90 iput(SOCK_INODE(ssock
));
95 /* If the MPC handshake is not started, returns the first subflow,
96 * eventually allocating it.
98 struct sock
*__mptcp_nmpc_sk(struct mptcp_sock
*msk
)
100 struct sock
*sk
= (struct sock
*)msk
;
103 if (!((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
104 return ERR_PTR(-EINVAL
);
107 ret
= __mptcp_socket_create(msk
);
115 static void mptcp_drop(struct sock
*sk
, struct sk_buff
*skb
)
117 sk_drops_add(sk
, skb
);
121 static void mptcp_rmem_fwd_alloc_add(struct sock
*sk
, int size
)
123 WRITE_ONCE(mptcp_sk(sk
)->rmem_fwd_alloc
,
124 mptcp_sk(sk
)->rmem_fwd_alloc
+ size
);
127 static void mptcp_rmem_charge(struct sock
*sk
, int size
)
129 mptcp_rmem_fwd_alloc_add(sk
, -size
);
132 static bool mptcp_try_coalesce(struct sock
*sk
, struct sk_buff
*to
,
133 struct sk_buff
*from
)
138 if (MPTCP_SKB_CB(from
)->offset
||
139 !skb_try_coalesce(to
, from
, &fragstolen
, &delta
))
142 pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
143 MPTCP_SKB_CB(from
)->map_seq
, MPTCP_SKB_CB(to
)->map_seq
,
144 to
->len
, MPTCP_SKB_CB(from
)->end_seq
);
145 MPTCP_SKB_CB(to
)->end_seq
= MPTCP_SKB_CB(from
)->end_seq
;
147 /* note the fwd memory can reach a negative value after accounting
148 * for the delta, but the later skb free will restore a non
151 atomic_add(delta
, &sk
->sk_rmem_alloc
);
152 mptcp_rmem_charge(sk
, delta
);
153 kfree_skb_partial(from
, fragstolen
);
158 static bool mptcp_ooo_try_coalesce(struct mptcp_sock
*msk
, struct sk_buff
*to
,
159 struct sk_buff
*from
)
161 if (MPTCP_SKB_CB(from
)->map_seq
!= MPTCP_SKB_CB(to
)->end_seq
)
164 return mptcp_try_coalesce((struct sock
*)msk
, to
, from
);
167 static void __mptcp_rmem_reclaim(struct sock
*sk
, int amount
)
169 amount
>>= PAGE_SHIFT
;
170 mptcp_rmem_charge(sk
, amount
<< PAGE_SHIFT
);
171 __sk_mem_reduce_allocated(sk
, amount
);
174 static void mptcp_rmem_uncharge(struct sock
*sk
, int size
)
176 struct mptcp_sock
*msk
= mptcp_sk(sk
);
179 mptcp_rmem_fwd_alloc_add(sk
, size
);
180 reclaimable
= msk
->rmem_fwd_alloc
- sk_unused_reserved_mem(sk
);
182 /* see sk_mem_uncharge() for the rationale behind the following schema */
183 if (unlikely(reclaimable
>= PAGE_SIZE
))
184 __mptcp_rmem_reclaim(sk
, reclaimable
);
187 static void mptcp_rfree(struct sk_buff
*skb
)
189 unsigned int len
= skb
->truesize
;
190 struct sock
*sk
= skb
->sk
;
192 atomic_sub(len
, &sk
->sk_rmem_alloc
);
193 mptcp_rmem_uncharge(sk
, len
);
196 void mptcp_set_owner_r(struct sk_buff
*skb
, struct sock
*sk
)
200 skb
->destructor
= mptcp_rfree
;
201 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
202 mptcp_rmem_charge(sk
, skb
->truesize
);
205 /* "inspired" by tcp_data_queue_ofo(), main differences:
207 * - don't cope with sacks
209 static void mptcp_data_queue_ofo(struct mptcp_sock
*msk
, struct sk_buff
*skb
)
211 struct sock
*sk
= (struct sock
*)msk
;
212 struct rb_node
**p
, *parent
;
213 u64 seq
, end_seq
, max_seq
;
214 struct sk_buff
*skb1
;
216 seq
= MPTCP_SKB_CB(skb
)->map_seq
;
217 end_seq
= MPTCP_SKB_CB(skb
)->end_seq
;
218 max_seq
= atomic64_read(&msk
->rcv_wnd_sent
);
220 pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk
, seq
, max_seq
,
221 RB_EMPTY_ROOT(&msk
->out_of_order_queue
));
222 if (after64(end_seq
, max_seq
)) {
225 pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
226 (unsigned long long)end_seq
- (unsigned long)max_seq
,
227 (unsigned long long)atomic64_read(&msk
->rcv_wnd_sent
));
228 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_NODSSWINDOW
);
232 p
= &msk
->out_of_order_queue
.rb_node
;
233 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOQUEUE
);
234 if (RB_EMPTY_ROOT(&msk
->out_of_order_queue
)) {
235 rb_link_node(&skb
->rbnode
, NULL
, p
);
236 rb_insert_color(&skb
->rbnode
, &msk
->out_of_order_queue
);
237 msk
->ooo_last_skb
= skb
;
241 /* with 2 subflows, adding at end of ooo queue is quite likely
242 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
244 if (mptcp_ooo_try_coalesce(msk
, msk
->ooo_last_skb
, skb
)) {
245 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOMERGE
);
246 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOQUEUETAIL
);
250 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
251 if (!before64(seq
, MPTCP_SKB_CB(msk
->ooo_last_skb
)->end_seq
)) {
252 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOQUEUETAIL
);
253 parent
= &msk
->ooo_last_skb
->rbnode
;
254 p
= &parent
->rb_right
;
258 /* Find place to insert this segment. Handle overlaps on the way. */
262 skb1
= rb_to_skb(parent
);
263 if (before64(seq
, MPTCP_SKB_CB(skb1
)->map_seq
)) {
264 p
= &parent
->rb_left
;
267 if (before64(seq
, MPTCP_SKB_CB(skb1
)->end_seq
)) {
268 if (!after64(end_seq
, MPTCP_SKB_CB(skb1
)->end_seq
)) {
269 /* All the bits are present. Drop. */
271 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
274 if (after64(seq
, MPTCP_SKB_CB(skb1
)->map_seq
)) {
278 * continue traversing
281 /* skb's seq == skb1's seq and skb covers skb1.
282 * Replace skb1 with skb.
284 rb_replace_node(&skb1
->rbnode
, &skb
->rbnode
,
285 &msk
->out_of_order_queue
);
286 mptcp_drop(sk
, skb1
);
287 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
290 } else if (mptcp_ooo_try_coalesce(msk
, skb1
, skb
)) {
291 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOMERGE
);
294 p
= &parent
->rb_right
;
298 /* Insert segment into RB tree. */
299 rb_link_node(&skb
->rbnode
, parent
, p
);
300 rb_insert_color(&skb
->rbnode
, &msk
->out_of_order_queue
);
303 /* Remove other segments covered by skb. */
304 while ((skb1
= skb_rb_next(skb
)) != NULL
) {
305 if (before64(end_seq
, MPTCP_SKB_CB(skb1
)->end_seq
))
307 rb_erase(&skb1
->rbnode
, &msk
->out_of_order_queue
);
308 mptcp_drop(sk
, skb1
);
309 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
311 /* If there is no skb after us, we are the last_skb ! */
313 msk
->ooo_last_skb
= skb
;
317 mptcp_set_owner_r(skb
, sk
);
320 static bool mptcp_rmem_schedule(struct sock
*sk
, struct sock
*ssk
, int size
)
322 struct mptcp_sock
*msk
= mptcp_sk(sk
);
325 if (size
<= msk
->rmem_fwd_alloc
)
328 size
-= msk
->rmem_fwd_alloc
;
329 amt
= sk_mem_pages(size
);
330 amount
= amt
<< PAGE_SHIFT
;
331 if (!__sk_mem_raise_allocated(sk
, size
, amt
, SK_MEM_RECV
))
334 mptcp_rmem_fwd_alloc_add(sk
, amount
);
338 static bool __mptcp_move_skb(struct mptcp_sock
*msk
, struct sock
*ssk
,
339 struct sk_buff
*skb
, unsigned int offset
,
342 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
343 struct sock
*sk
= (struct sock
*)msk
;
344 struct sk_buff
*tail
;
347 __skb_unlink(skb
, &ssk
->sk_receive_queue
);
352 /* try to fetch required memory from subflow */
353 if (!mptcp_rmem_schedule(sk
, ssk
, skb
->truesize
))
356 has_rxtstamp
= TCP_SKB_CB(skb
)->has_rxtstamp
;
358 /* the skb map_seq accounts for the skb offset:
359 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
362 MPTCP_SKB_CB(skb
)->map_seq
= mptcp_subflow_get_mapped_dsn(subflow
);
363 MPTCP_SKB_CB(skb
)->end_seq
= MPTCP_SKB_CB(skb
)->map_seq
+ copy_len
;
364 MPTCP_SKB_CB(skb
)->offset
= offset
;
365 MPTCP_SKB_CB(skb
)->has_rxtstamp
= has_rxtstamp
;
367 if (MPTCP_SKB_CB(skb
)->map_seq
== msk
->ack_seq
) {
369 msk
->bytes_received
+= copy_len
;
370 WRITE_ONCE(msk
->ack_seq
, msk
->ack_seq
+ copy_len
);
371 tail
= skb_peek_tail(&sk
->sk_receive_queue
);
372 if (tail
&& mptcp_try_coalesce(sk
, tail
, skb
))
375 mptcp_set_owner_r(skb
, sk
);
376 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
378 } else if (after64(MPTCP_SKB_CB(skb
)->map_seq
, msk
->ack_seq
)) {
379 mptcp_data_queue_ofo(msk
, skb
);
383 /* old data, keep it simple and drop the whole pkt, sender
384 * will retransmit as needed, if needed.
386 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
392 static void mptcp_stop_rtx_timer(struct sock
*sk
)
394 struct inet_connection_sock
*icsk
= inet_csk(sk
);
396 sk_stop_timer(sk
, &icsk
->icsk_retransmit_timer
);
397 mptcp_sk(sk
)->timer_ival
= 0;
400 static void mptcp_close_wake_up(struct sock
*sk
)
402 if (sock_flag(sk
, SOCK_DEAD
))
405 sk
->sk_state_change(sk
);
406 if (sk
->sk_shutdown
== SHUTDOWN_MASK
||
407 sk
->sk_state
== TCP_CLOSE
)
408 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_HUP
);
410 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
413 static bool mptcp_pending_data_fin_ack(struct sock
*sk
)
415 struct mptcp_sock
*msk
= mptcp_sk(sk
);
417 return ((1 << sk
->sk_state
) &
418 (TCPF_FIN_WAIT1
| TCPF_CLOSING
| TCPF_LAST_ACK
)) &&
419 msk
->write_seq
== READ_ONCE(msk
->snd_una
);
422 static void mptcp_check_data_fin_ack(struct sock
*sk
)
424 struct mptcp_sock
*msk
= mptcp_sk(sk
);
426 /* Look for an acknowledged DATA_FIN */
427 if (mptcp_pending_data_fin_ack(sk
)) {
428 WRITE_ONCE(msk
->snd_data_fin_enable
, 0);
430 switch (sk
->sk_state
) {
432 mptcp_set_state(sk
, TCP_FIN_WAIT2
);
436 mptcp_set_state(sk
, TCP_CLOSE
);
440 mptcp_close_wake_up(sk
);
444 static bool mptcp_pending_data_fin(struct sock
*sk
, u64
*seq
)
446 struct mptcp_sock
*msk
= mptcp_sk(sk
);
448 if (READ_ONCE(msk
->rcv_data_fin
) &&
449 ((1 << sk
->sk_state
) &
450 (TCPF_ESTABLISHED
| TCPF_FIN_WAIT1
| TCPF_FIN_WAIT2
))) {
451 u64 rcv_data_fin_seq
= READ_ONCE(msk
->rcv_data_fin_seq
);
453 if (msk
->ack_seq
== rcv_data_fin_seq
) {
455 *seq
= rcv_data_fin_seq
;
464 static void mptcp_set_datafin_timeout(struct sock
*sk
)
466 struct inet_connection_sock
*icsk
= inet_csk(sk
);
469 retransmits
= min_t(u32
, icsk
->icsk_retransmits
,
470 ilog2(TCP_RTO_MAX
/ TCP_RTO_MIN
));
472 mptcp_sk(sk
)->timer_ival
= TCP_RTO_MIN
<< retransmits
;
475 static void __mptcp_set_timeout(struct sock
*sk
, long tout
)
477 mptcp_sk(sk
)->timer_ival
= tout
> 0 ? tout
: TCP_RTO_MIN
;
480 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context
*subflow
)
482 const struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
484 return inet_csk(ssk
)->icsk_pending
&& !subflow
->stale_count
?
485 inet_csk(ssk
)->icsk_timeout
- jiffies
: 0;
488 static void mptcp_set_timeout(struct sock
*sk
)
490 struct mptcp_subflow_context
*subflow
;
493 mptcp_for_each_subflow(mptcp_sk(sk
), subflow
)
494 tout
= max(tout
, mptcp_timeout_from_subflow(subflow
));
495 __mptcp_set_timeout(sk
, tout
);
498 static inline bool tcp_can_send_ack(const struct sock
*ssk
)
500 return !((1 << inet_sk_state_load(ssk
)) &
501 (TCPF_SYN_SENT
| TCPF_SYN_RECV
| TCPF_TIME_WAIT
| TCPF_CLOSE
| TCPF_LISTEN
));
504 void __mptcp_subflow_send_ack(struct sock
*ssk
)
506 if (tcp_can_send_ack(ssk
))
510 static void mptcp_subflow_send_ack(struct sock
*ssk
)
514 slow
= lock_sock_fast(ssk
);
515 __mptcp_subflow_send_ack(ssk
);
516 unlock_sock_fast(ssk
, slow
);
519 static void mptcp_send_ack(struct mptcp_sock
*msk
)
521 struct mptcp_subflow_context
*subflow
;
523 mptcp_for_each_subflow(msk
, subflow
)
524 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow
));
527 static void mptcp_subflow_cleanup_rbuf(struct sock
*ssk
)
531 slow
= lock_sock_fast(ssk
);
532 if (tcp_can_send_ack(ssk
))
533 tcp_cleanup_rbuf(ssk
, 1);
534 unlock_sock_fast(ssk
, slow
);
537 static bool mptcp_subflow_could_cleanup(const struct sock
*ssk
, bool rx_empty
)
539 const struct inet_connection_sock
*icsk
= inet_csk(ssk
);
540 u8 ack_pending
= READ_ONCE(icsk
->icsk_ack
.pending
);
541 const struct tcp_sock
*tp
= tcp_sk(ssk
);
543 return (ack_pending
& ICSK_ACK_SCHED
) &&
544 ((READ_ONCE(tp
->rcv_nxt
) - READ_ONCE(tp
->rcv_wup
) >
545 READ_ONCE(icsk
->icsk_ack
.rcv_mss
)) ||
546 (rx_empty
&& ack_pending
&
547 (ICSK_ACK_PUSHED2
| ICSK_ACK_PUSHED
)));
550 static void mptcp_cleanup_rbuf(struct mptcp_sock
*msk
)
552 int old_space
= READ_ONCE(msk
->old_wspace
);
553 struct mptcp_subflow_context
*subflow
;
554 struct sock
*sk
= (struct sock
*)msk
;
555 int space
= __mptcp_space(sk
);
556 bool cleanup
, rx_empty
;
558 cleanup
= (space
> 0) && (space
>= (old_space
<< 1));
559 rx_empty
= !__mptcp_rmem(sk
);
561 mptcp_for_each_subflow(msk
, subflow
) {
562 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
564 if (cleanup
|| mptcp_subflow_could_cleanup(ssk
, rx_empty
))
565 mptcp_subflow_cleanup_rbuf(ssk
);
569 static bool mptcp_check_data_fin(struct sock
*sk
)
571 struct mptcp_sock
*msk
= mptcp_sk(sk
);
572 u64 rcv_data_fin_seq
;
575 /* Need to ack a DATA_FIN received from a peer while this side
576 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
577 * msk->rcv_data_fin was set when parsing the incoming options
578 * at the subflow level and the msk lock was not held, so this
579 * is the first opportunity to act on the DATA_FIN and change
582 * If we are caught up to the sequence number of the incoming
583 * DATA_FIN, send the DATA_ACK now and do state transition. If
584 * not caught up, do nothing and let the recv code send DATA_ACK
588 if (mptcp_pending_data_fin(sk
, &rcv_data_fin_seq
)) {
589 WRITE_ONCE(msk
->ack_seq
, msk
->ack_seq
+ 1);
590 WRITE_ONCE(msk
->rcv_data_fin
, 0);
592 WRITE_ONCE(sk
->sk_shutdown
, sk
->sk_shutdown
| RCV_SHUTDOWN
);
593 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
595 switch (sk
->sk_state
) {
596 case TCP_ESTABLISHED
:
597 mptcp_set_state(sk
, TCP_CLOSE_WAIT
);
600 mptcp_set_state(sk
, TCP_CLOSING
);
603 mptcp_set_state(sk
, TCP_CLOSE
);
606 /* Other states not expected */
612 if (!__mptcp_check_fallback(msk
))
614 mptcp_close_wake_up(sk
);
619 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock
*msk
,
623 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
624 struct sock
*sk
= (struct sock
*)msk
;
625 unsigned int moved
= 0;
626 bool more_data_avail
;
631 sk_rbuf
= READ_ONCE(sk
->sk_rcvbuf
);
633 if (!(sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)) {
634 int ssk_rbuf
= READ_ONCE(ssk
->sk_rcvbuf
);
636 if (unlikely(ssk_rbuf
> sk_rbuf
)) {
637 WRITE_ONCE(sk
->sk_rcvbuf
, ssk_rbuf
);
642 pr_debug("msk=%p ssk=%p", msk
, ssk
);
645 u32 map_remaining
, offset
;
646 u32 seq
= tp
->copied_seq
;
650 /* try to move as much data as available */
651 map_remaining
= subflow
->map_data_len
-
652 mptcp_subflow_get_map_offset(subflow
);
654 skb
= skb_peek(&ssk
->sk_receive_queue
);
656 /* With racing move_skbs_to_msk() and __mptcp_move_skbs(),
657 * a different CPU can have already processed the pending
658 * data, stop here or we can enter an infinite loop
665 if (__mptcp_check_fallback(msk
)) {
666 /* Under fallback skbs have no MPTCP extension and TCP could
667 * collapse them between the dummy map creation and the
668 * current dequeue. Be sure to adjust the map size.
670 map_remaining
= skb
->len
;
671 subflow
->map_data_len
= skb
->len
;
674 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
675 fin
= TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
;
681 if (offset
< skb
->len
) {
682 size_t len
= skb
->len
- offset
;
687 if (__mptcp_move_skb(msk
, ssk
, skb
, offset
, len
))
691 if (WARN_ON_ONCE(map_remaining
< len
))
695 sk_eat_skb(ssk
, skb
);
699 WRITE_ONCE(tp
->copied_seq
, seq
);
700 more_data_avail
= mptcp_subflow_data_available(ssk
);
702 if (atomic_read(&sk
->sk_rmem_alloc
) > sk_rbuf
) {
706 } while (more_data_avail
);
712 static bool __mptcp_ofo_queue(struct mptcp_sock
*msk
)
714 struct sock
*sk
= (struct sock
*)msk
;
715 struct sk_buff
*skb
, *tail
;
720 p
= rb_first(&msk
->out_of_order_queue
);
721 pr_debug("msk=%p empty=%d", msk
, RB_EMPTY_ROOT(&msk
->out_of_order_queue
));
724 if (after64(MPTCP_SKB_CB(skb
)->map_seq
, msk
->ack_seq
))
728 rb_erase(&skb
->rbnode
, &msk
->out_of_order_queue
);
730 if (unlikely(!after64(MPTCP_SKB_CB(skb
)->end_seq
,
733 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
737 end_seq
= MPTCP_SKB_CB(skb
)->end_seq
;
738 tail
= skb_peek_tail(&sk
->sk_receive_queue
);
739 if (!tail
|| !mptcp_ooo_try_coalesce(msk
, tail
, skb
)) {
740 int delta
= msk
->ack_seq
- MPTCP_SKB_CB(skb
)->map_seq
;
742 /* skip overlapping data, if any */
743 pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
744 MPTCP_SKB_CB(skb
)->map_seq
, msk
->ack_seq
,
746 MPTCP_SKB_CB(skb
)->offset
+= delta
;
747 MPTCP_SKB_CB(skb
)->map_seq
+= delta
;
748 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
750 msk
->bytes_received
+= end_seq
- msk
->ack_seq
;
751 msk
->ack_seq
= end_seq
;
757 static bool __mptcp_subflow_error_report(struct sock
*sk
, struct sock
*ssk
)
759 int err
= sock_error(ssk
);
765 /* only propagate errors on fallen-back sockets or
768 if (sk
->sk_state
!= TCP_SYN_SENT
&& !__mptcp_check_fallback(mptcp_sk(sk
)))
771 /* We need to propagate only transition to CLOSE state.
772 * Orphaned socket will see such state change via
773 * subflow_sched_work_if_closed() and that path will properly
774 * destroy the msk as needed.
776 ssk_state
= inet_sk_state_load(ssk
);
777 if (ssk_state
== TCP_CLOSE
&& !sock_flag(sk
, SOCK_DEAD
))
778 mptcp_set_state(sk
, ssk_state
);
779 WRITE_ONCE(sk
->sk_err
, -err
);
781 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
787 void __mptcp_error_report(struct sock
*sk
)
789 struct mptcp_subflow_context
*subflow
;
790 struct mptcp_sock
*msk
= mptcp_sk(sk
);
792 mptcp_for_each_subflow(msk
, subflow
)
793 if (__mptcp_subflow_error_report(sk
, mptcp_subflow_tcp_sock(subflow
)))
797 /* In most cases we will be able to lock the mptcp socket. If its already
798 * owned, we need to defer to the work queue to avoid ABBA deadlock.
800 static bool move_skbs_to_msk(struct mptcp_sock
*msk
, struct sock
*ssk
)
802 struct sock
*sk
= (struct sock
*)msk
;
803 unsigned int moved
= 0;
805 __mptcp_move_skbs_from_subflow(msk
, ssk
, &moved
);
806 __mptcp_ofo_queue(msk
);
807 if (unlikely(ssk
->sk_err
)) {
808 if (!sock_owned_by_user(sk
))
809 __mptcp_error_report(sk
);
811 __set_bit(MPTCP_ERROR_REPORT
, &msk
->cb_flags
);
814 /* If the moves have caught up with the DATA_FIN sequence number
815 * it's time to ack the DATA_FIN and change socket state, but
816 * this is not a good place to change state. Let the workqueue
819 if (mptcp_pending_data_fin(sk
, NULL
))
820 mptcp_schedule_work(sk
);
824 void mptcp_data_ready(struct sock
*sk
, struct sock
*ssk
)
826 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
827 struct mptcp_sock
*msk
= mptcp_sk(sk
);
828 int sk_rbuf
, ssk_rbuf
;
830 /* The peer can send data while we are shutting down this
831 * subflow at msk destruction time, but we must avoid enqueuing
832 * more data to the msk receive queue
834 if (unlikely(subflow
->disposable
))
837 ssk_rbuf
= READ_ONCE(ssk
->sk_rcvbuf
);
838 sk_rbuf
= READ_ONCE(sk
->sk_rcvbuf
);
839 if (unlikely(ssk_rbuf
> sk_rbuf
))
842 /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
843 if (__mptcp_rmem(sk
) > sk_rbuf
) {
844 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_RCVPRUNED
);
848 /* Wake-up the reader only for in-sequence data */
850 if (move_skbs_to_msk(msk
, ssk
) && mptcp_epollin_ready(sk
))
851 sk
->sk_data_ready(sk
);
852 mptcp_data_unlock(sk
);
855 static void mptcp_subflow_joined(struct mptcp_sock
*msk
, struct sock
*ssk
)
857 mptcp_subflow_ctx(ssk
)->map_seq
= READ_ONCE(msk
->ack_seq
);
858 WRITE_ONCE(msk
->allow_infinite_fallback
, false);
859 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED
, msk
, ssk
, GFP_ATOMIC
);
862 static bool __mptcp_finish_join(struct mptcp_sock
*msk
, struct sock
*ssk
)
864 struct sock
*sk
= (struct sock
*)msk
;
866 if (sk
->sk_state
!= TCP_ESTABLISHED
)
869 /* attach to msk socket only after we are sure we will deal with it
872 if (sk
->sk_socket
&& !ssk
->sk_socket
)
873 mptcp_sock_graft(ssk
, sk
->sk_socket
);
875 mptcp_subflow_ctx(ssk
)->subflow_id
= msk
->subflow_id
++;
876 mptcp_sockopt_sync_locked(msk
, ssk
);
877 mptcp_subflow_joined(msk
, ssk
);
878 mptcp_stop_tout_timer(sk
);
879 __mptcp_propagate_sndbuf(sk
, ssk
);
883 static void __mptcp_flush_join_list(struct sock
*sk
, struct list_head
*join_list
)
885 struct mptcp_subflow_context
*tmp
, *subflow
;
886 struct mptcp_sock
*msk
= mptcp_sk(sk
);
888 list_for_each_entry_safe(subflow
, tmp
, join_list
, node
) {
889 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
890 bool slow
= lock_sock_fast(ssk
);
892 list_move_tail(&subflow
->node
, &msk
->conn_list
);
893 if (!__mptcp_finish_join(msk
, ssk
))
894 mptcp_subflow_reset(ssk
);
895 unlock_sock_fast(ssk
, slow
);
899 static bool mptcp_rtx_timer_pending(struct sock
*sk
)
901 return timer_pending(&inet_csk(sk
)->icsk_retransmit_timer
);
904 static void mptcp_reset_rtx_timer(struct sock
*sk
)
906 struct inet_connection_sock
*icsk
= inet_csk(sk
);
909 /* prevent rescheduling on close */
910 if (unlikely(inet_sk_state_load(sk
) == TCP_CLOSE
))
913 tout
= mptcp_sk(sk
)->timer_ival
;
914 sk_reset_timer(sk
, &icsk
->icsk_retransmit_timer
, jiffies
+ tout
);
917 bool mptcp_schedule_work(struct sock
*sk
)
919 if (inet_sk_state_load(sk
) != TCP_CLOSE
&&
920 schedule_work(&mptcp_sk(sk
)->work
)) {
921 /* each subflow already holds a reference to the sk, and the
922 * workqueue is invoked by a subflow, so sk can't go away here.
930 static struct sock
*mptcp_subflow_recv_lookup(const struct mptcp_sock
*msk
)
932 struct mptcp_subflow_context
*subflow
;
934 msk_owned_by_me(msk
);
936 mptcp_for_each_subflow(msk
, subflow
) {
937 if (READ_ONCE(subflow
->data_avail
))
938 return mptcp_subflow_tcp_sock(subflow
);
944 static bool mptcp_skb_can_collapse_to(u64 write_seq
,
945 const struct sk_buff
*skb
,
946 const struct mptcp_ext
*mpext
)
948 if (!tcp_skb_can_collapse_to(skb
))
951 /* can collapse only if MPTCP level sequence is in order and this
952 * mapping has not been xmitted yet
954 return mpext
&& mpext
->data_seq
+ mpext
->data_len
== write_seq
&&
958 /* we can append data to the given data frag if:
959 * - there is space available in the backing page_frag
960 * - the data frag tail matches the current page_frag free offset
961 * - the data frag end sequence number matches the current write seq
963 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock
*msk
,
964 const struct page_frag
*pfrag
,
965 const struct mptcp_data_frag
*df
)
967 return df
&& pfrag
->page
== df
->page
&&
968 pfrag
->size
- pfrag
->offset
> 0 &&
969 pfrag
->offset
== (df
->offset
+ df
->data_len
) &&
970 df
->data_seq
+ df
->data_len
== msk
->write_seq
;
973 static void dfrag_uncharge(struct sock
*sk
, int len
)
975 sk_mem_uncharge(sk
, len
);
976 sk_wmem_queued_add(sk
, -len
);
979 static void dfrag_clear(struct sock
*sk
, struct mptcp_data_frag
*dfrag
)
981 int len
= dfrag
->data_len
+ dfrag
->overhead
;
983 list_del(&dfrag
->list
);
984 dfrag_uncharge(sk
, len
);
985 put_page(dfrag
->page
);
988 static void __mptcp_clean_una(struct sock
*sk
)
990 struct mptcp_sock
*msk
= mptcp_sk(sk
);
991 struct mptcp_data_frag
*dtmp
, *dfrag
;
994 snd_una
= msk
->snd_una
;
995 list_for_each_entry_safe(dfrag
, dtmp
, &msk
->rtx_queue
, list
) {
996 if (after64(dfrag
->data_seq
+ dfrag
->data_len
, snd_una
))
999 if (unlikely(dfrag
== msk
->first_pending
)) {
1000 /* in recovery mode can see ack after the current snd head */
1001 if (WARN_ON_ONCE(!msk
->recovery
))
1004 WRITE_ONCE(msk
->first_pending
, mptcp_send_next(sk
));
1007 dfrag_clear(sk
, dfrag
);
1010 dfrag
= mptcp_rtx_head(sk
);
1011 if (dfrag
&& after64(snd_una
, dfrag
->data_seq
)) {
1012 u64 delta
= snd_una
- dfrag
->data_seq
;
1014 /* prevent wrap around in recovery mode */
1015 if (unlikely(delta
> dfrag
->already_sent
)) {
1016 if (WARN_ON_ONCE(!msk
->recovery
))
1018 if (WARN_ON_ONCE(delta
> dfrag
->data_len
))
1020 dfrag
->already_sent
+= delta
- dfrag
->already_sent
;
1023 dfrag
->data_seq
+= delta
;
1024 dfrag
->offset
+= delta
;
1025 dfrag
->data_len
-= delta
;
1026 dfrag
->already_sent
-= delta
;
1028 dfrag_uncharge(sk
, delta
);
1031 /* all retransmitted data acked, recovery completed */
1032 if (unlikely(msk
->recovery
) && after64(msk
->snd_una
, msk
->recovery_snd_nxt
))
1033 msk
->recovery
= false;
1036 if (snd_una
== READ_ONCE(msk
->snd_nxt
) &&
1037 snd_una
== READ_ONCE(msk
->write_seq
)) {
1038 if (mptcp_rtx_timer_pending(sk
) && !mptcp_data_fin_enabled(msk
))
1039 mptcp_stop_rtx_timer(sk
);
1041 mptcp_reset_rtx_timer(sk
);
1045 static void __mptcp_clean_una_wakeup(struct sock
*sk
)
1047 lockdep_assert_held_once(&sk
->sk_lock
.slock
);
1049 __mptcp_clean_una(sk
);
1050 mptcp_write_space(sk
);
1053 static void mptcp_clean_una_wakeup(struct sock
*sk
)
1055 mptcp_data_lock(sk
);
1056 __mptcp_clean_una_wakeup(sk
);
1057 mptcp_data_unlock(sk
);
1060 static void mptcp_enter_memory_pressure(struct sock
*sk
)
1062 struct mptcp_subflow_context
*subflow
;
1063 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1066 mptcp_for_each_subflow(msk
, subflow
) {
1067 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
1070 tcp_enter_memory_pressure(ssk
);
1071 sk_stream_moderate_sndbuf(ssk
);
1075 __mptcp_sync_sndbuf(sk
);
1078 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
1081 static bool mptcp_page_frag_refill(struct sock
*sk
, struct page_frag
*pfrag
)
1083 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag
),
1084 pfrag
, sk
->sk_allocation
)))
1087 mptcp_enter_memory_pressure(sk
);
1091 static struct mptcp_data_frag
*
1092 mptcp_carve_data_frag(const struct mptcp_sock
*msk
, struct page_frag
*pfrag
,
1095 int offset
= ALIGN(orig_offset
, sizeof(long));
1096 struct mptcp_data_frag
*dfrag
;
1098 dfrag
= (struct mptcp_data_frag
*)(page_to_virt(pfrag
->page
) + offset
);
1099 dfrag
->data_len
= 0;
1100 dfrag
->data_seq
= msk
->write_seq
;
1101 dfrag
->overhead
= offset
- orig_offset
+ sizeof(struct mptcp_data_frag
);
1102 dfrag
->offset
= offset
+ sizeof(struct mptcp_data_frag
);
1103 dfrag
->already_sent
= 0;
1104 dfrag
->page
= pfrag
->page
;
1109 struct mptcp_sendmsg_info
{
1115 bool data_lock_held
;
1118 static int mptcp_check_allowed_size(const struct mptcp_sock
*msk
, struct sock
*ssk
,
1119 u64 data_seq
, int avail_size
)
1121 u64 window_end
= mptcp_wnd_end(msk
);
1124 if (__mptcp_check_fallback(msk
))
1127 mptcp_snd_wnd
= window_end
- data_seq
;
1128 avail_size
= min_t(unsigned int, mptcp_snd_wnd
, avail_size
);
1130 if (unlikely(tcp_sk(ssk
)->snd_wnd
< mptcp_snd_wnd
)) {
1131 tcp_sk(ssk
)->snd_wnd
= min_t(u64
, U32_MAX
, mptcp_snd_wnd
);
1132 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_SNDWNDSHARED
);
1138 static bool __mptcp_add_ext(struct sk_buff
*skb
, gfp_t gfp
)
1140 struct skb_ext
*mpext
= __skb_ext_alloc(gfp
);
1144 __skb_ext_set(skb
, SKB_EXT_MPTCP
, mpext
);
1148 static struct sk_buff
*__mptcp_do_alloc_tx_skb(struct sock
*sk
, gfp_t gfp
)
1150 struct sk_buff
*skb
;
1152 skb
= alloc_skb_fclone(MAX_TCP_HEADER
, gfp
);
1154 if (likely(__mptcp_add_ext(skb
, gfp
))) {
1155 skb_reserve(skb
, MAX_TCP_HEADER
);
1156 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1157 INIT_LIST_HEAD(&skb
->tcp_tsorted_anchor
);
1162 mptcp_enter_memory_pressure(sk
);
1167 static struct sk_buff
*__mptcp_alloc_tx_skb(struct sock
*sk
, struct sock
*ssk
, gfp_t gfp
)
1169 struct sk_buff
*skb
;
1171 skb
= __mptcp_do_alloc_tx_skb(sk
, gfp
);
1175 if (likely(sk_wmem_schedule(ssk
, skb
->truesize
))) {
1176 tcp_skb_entail(ssk
, skb
);
1179 tcp_skb_tsorted_anchor_cleanup(skb
);
1184 static struct sk_buff
*mptcp_alloc_tx_skb(struct sock
*sk
, struct sock
*ssk
, bool data_lock_held
)
1186 gfp_t gfp
= data_lock_held
? GFP_ATOMIC
: sk
->sk_allocation
;
1188 return __mptcp_alloc_tx_skb(sk
, ssk
, gfp
);
1191 /* note: this always recompute the csum on the whole skb, even
1192 * if we just appended a single frag. More status info needed
1194 static void mptcp_update_data_checksum(struct sk_buff
*skb
, int added
)
1196 struct mptcp_ext
*mpext
= mptcp_get_ext(skb
);
1197 __wsum csum
= ~csum_unfold(mpext
->csum
);
1198 int offset
= skb
->len
- added
;
1200 mpext
->csum
= csum_fold(csum_block_add(csum
, skb_checksum(skb
, offset
, added
, 0), offset
));
1203 static void mptcp_update_infinite_map(struct mptcp_sock
*msk
,
1205 struct mptcp_ext
*mpext
)
1210 mpext
->infinite_map
= 1;
1211 mpext
->data_len
= 0;
1213 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_INFINITEMAPTX
);
1214 mptcp_subflow_ctx(ssk
)->send_infinite_map
= 0;
1216 mptcp_do_fallback(ssk
);
1219 #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1))
1221 static int mptcp_sendmsg_frag(struct sock
*sk
, struct sock
*ssk
,
1222 struct mptcp_data_frag
*dfrag
,
1223 struct mptcp_sendmsg_info
*info
)
1225 u64 data_seq
= dfrag
->data_seq
+ info
->sent
;
1226 int offset
= dfrag
->offset
+ info
->sent
;
1227 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1228 bool zero_window_probe
= false;
1229 struct mptcp_ext
*mpext
= NULL
;
1230 bool can_coalesce
= false;
1231 bool reuse_skb
= true;
1232 struct sk_buff
*skb
;
1236 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
1237 msk
, ssk
, dfrag
->data_seq
, dfrag
->data_len
, info
->sent
);
1239 if (WARN_ON_ONCE(info
->sent
> info
->limit
||
1240 info
->limit
> dfrag
->data_len
))
1243 if (unlikely(!__tcp_can_send(ssk
)))
1246 /* compute send limit */
1247 if (unlikely(ssk
->sk_gso_max_size
> MPTCP_MAX_GSO_SIZE
))
1248 ssk
->sk_gso_max_size
= MPTCP_MAX_GSO_SIZE
;
1249 info
->mss_now
= tcp_send_mss(ssk
, &info
->size_goal
, info
->flags
);
1250 copy
= info
->size_goal
;
1252 skb
= tcp_write_queue_tail(ssk
);
1253 if (skb
&& copy
> skb
->len
) {
1254 /* Limit the write to the size available in the
1255 * current skb, if any, so that we create at most a new skb.
1256 * Explicitly tells TCP internals to avoid collapsing on later
1257 * queue management operation, to avoid breaking the ext <->
1258 * SSN association set here
1260 mpext
= mptcp_get_ext(skb
);
1261 if (!mptcp_skb_can_collapse_to(data_seq
, skb
, mpext
)) {
1262 TCP_SKB_CB(skb
)->eor
= 1;
1266 i
= skb_shinfo(skb
)->nr_frags
;
1267 can_coalesce
= skb_can_coalesce(skb
, i
, dfrag
->page
, offset
);
1268 if (!can_coalesce
&& i
>= READ_ONCE(sysctl_max_skb_frags
)) {
1269 tcp_mark_push(tcp_sk(ssk
), skb
);
1276 skb
= mptcp_alloc_tx_skb(sk
, ssk
, info
->data_lock_held
);
1280 i
= skb_shinfo(skb
)->nr_frags
;
1282 mpext
= mptcp_get_ext(skb
);
1285 /* Zero window and all data acked? Probe. */
1286 copy
= mptcp_check_allowed_size(msk
, ssk
, data_seq
, copy
);
1288 u64 snd_una
= READ_ONCE(msk
->snd_una
);
1290 if (snd_una
!= msk
->snd_nxt
|| tcp_write_queue_tail(ssk
)) {
1291 tcp_remove_empty_skb(ssk
);
1295 zero_window_probe
= true;
1296 data_seq
= snd_una
- 1;
1300 copy
= min_t(size_t, copy
, info
->limit
- info
->sent
);
1301 if (!sk_wmem_schedule(ssk
, copy
)) {
1302 tcp_remove_empty_skb(ssk
);
1307 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1309 get_page(dfrag
->page
);
1310 skb_fill_page_desc(skb
, i
, dfrag
->page
, offset
, copy
);
1314 skb
->data_len
+= copy
;
1315 skb
->truesize
+= copy
;
1316 sk_wmem_queued_add(ssk
, copy
);
1317 sk_mem_charge(ssk
, copy
);
1318 WRITE_ONCE(tcp_sk(ssk
)->write_seq
, tcp_sk(ssk
)->write_seq
+ copy
);
1319 TCP_SKB_CB(skb
)->end_seq
+= copy
;
1320 tcp_skb_pcount_set(skb
, 0);
1322 /* on skb reuse we just need to update the DSS len */
1324 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_PSH
;
1325 mpext
->data_len
+= copy
;
1329 memset(mpext
, 0, sizeof(*mpext
));
1330 mpext
->data_seq
= data_seq
;
1331 mpext
->subflow_seq
= mptcp_subflow_ctx(ssk
)->rel_write_seq
;
1332 mpext
->data_len
= copy
;
1336 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
1337 mpext
->data_seq
, mpext
->subflow_seq
, mpext
->data_len
,
1340 if (zero_window_probe
) {
1341 mptcp_subflow_ctx(ssk
)->rel_write_seq
+= copy
;
1343 if (READ_ONCE(msk
->csum_enabled
))
1344 mptcp_update_data_checksum(skb
, copy
);
1345 tcp_push_pending_frames(ssk
);
1349 if (READ_ONCE(msk
->csum_enabled
))
1350 mptcp_update_data_checksum(skb
, copy
);
1351 if (mptcp_subflow_ctx(ssk
)->send_infinite_map
)
1352 mptcp_update_infinite_map(msk
, ssk
, mpext
);
1353 trace_mptcp_sendmsg_frag(mpext
);
1354 mptcp_subflow_ctx(ssk
)->rel_write_seq
+= copy
;
1358 #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
1359 sizeof(struct tcphdr) - \
1360 MAX_TCP_OPTION_SPACE - \
1361 sizeof(struct ipv6hdr) - \
1362 sizeof(struct frag_hdr))
1364 struct subflow_send_info
{
1369 void mptcp_subflow_set_active(struct mptcp_subflow_context
*subflow
)
1371 if (!subflow
->stale
)
1375 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow
)), MPTCP_MIB_SUBFLOWRECOVER
);
1378 bool mptcp_subflow_active(struct mptcp_subflow_context
*subflow
)
1380 if (unlikely(subflow
->stale
)) {
1381 u32 rcv_tstamp
= READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow
))->rcv_tstamp
);
1383 if (subflow
->stale_rcv_tstamp
== rcv_tstamp
)
1386 mptcp_subflow_set_active(subflow
);
1388 return __mptcp_subflow_active(subflow
);
1391 #define SSK_MODE_ACTIVE 0
1392 #define SSK_MODE_BACKUP 1
1393 #define SSK_MODE_MAX 2
1395 /* implement the mptcp packet scheduler;
1396 * returns the subflow that will transmit the next DSS
1397 * additionally updates the rtx timeout
1399 struct sock
*mptcp_subflow_get_send(struct mptcp_sock
*msk
)
1401 struct subflow_send_info send_info
[SSK_MODE_MAX
];
1402 struct mptcp_subflow_context
*subflow
;
1403 struct sock
*sk
= (struct sock
*)msk
;
1404 u32 pace
, burst
, wmem
;
1405 int i
, nr_active
= 0;
1410 /* pick the subflow with the lower wmem/wspace ratio */
1411 for (i
= 0; i
< SSK_MODE_MAX
; ++i
) {
1412 send_info
[i
].ssk
= NULL
;
1413 send_info
[i
].linger_time
= -1;
1416 mptcp_for_each_subflow(msk
, subflow
) {
1417 trace_mptcp_subflow_get_send(subflow
);
1418 ssk
= mptcp_subflow_tcp_sock(subflow
);
1419 if (!mptcp_subflow_active(subflow
))
1422 tout
= max(tout
, mptcp_timeout_from_subflow(subflow
));
1423 nr_active
+= !subflow
->backup
;
1424 pace
= subflow
->avg_pacing_rate
;
1425 if (unlikely(!pace
)) {
1426 /* init pacing rate from socket */
1427 subflow
->avg_pacing_rate
= READ_ONCE(ssk
->sk_pacing_rate
);
1428 pace
= subflow
->avg_pacing_rate
;
1433 linger_time
= div_u64((u64
)READ_ONCE(ssk
->sk_wmem_queued
) << 32, pace
);
1434 if (linger_time
< send_info
[subflow
->backup
].linger_time
) {
1435 send_info
[subflow
->backup
].ssk
= ssk
;
1436 send_info
[subflow
->backup
].linger_time
= linger_time
;
1439 __mptcp_set_timeout(sk
, tout
);
1441 /* pick the best backup if no other subflow is active */
1443 send_info
[SSK_MODE_ACTIVE
].ssk
= send_info
[SSK_MODE_BACKUP
].ssk
;
1445 /* According to the blest algorithm, to avoid HoL blocking for the
1446 * faster flow, we need to:
1447 * - estimate the faster flow linger time
1448 * - use the above to estimate the amount of byte transferred
1449 * by the faster flow
1450 * - check that the amount of queued data is greter than the above,
1451 * otherwise do not use the picked, slower, subflow
1452 * We select the subflow with the shorter estimated time to flush
1453 * the queued mem, which basically ensure the above. We just need
1454 * to check that subflow has a non empty cwin.
1456 ssk
= send_info
[SSK_MODE_ACTIVE
].ssk
;
1457 if (!ssk
|| !sk_stream_memory_free(ssk
))
1460 burst
= min_t(int, MPTCP_SEND_BURST_SIZE
, mptcp_wnd_end(msk
) - msk
->snd_nxt
);
1461 wmem
= READ_ONCE(ssk
->sk_wmem_queued
);
1465 subflow
= mptcp_subflow_ctx(ssk
);
1466 subflow
->avg_pacing_rate
= div_u64((u64
)subflow
->avg_pacing_rate
* wmem
+
1467 READ_ONCE(ssk
->sk_pacing_rate
) * burst
,
1469 msk
->snd_burst
= burst
;
1473 static void mptcp_push_release(struct sock
*ssk
, struct mptcp_sendmsg_info
*info
)
1475 tcp_push(ssk
, 0, info
->mss_now
, tcp_sk(ssk
)->nonagle
, info
->size_goal
);
1479 static void mptcp_update_post_push(struct mptcp_sock
*msk
,
1480 struct mptcp_data_frag
*dfrag
,
1483 u64 snd_nxt_new
= dfrag
->data_seq
;
1485 dfrag
->already_sent
+= sent
;
1487 msk
->snd_burst
-= sent
;
1489 snd_nxt_new
+= dfrag
->already_sent
;
1491 /* snd_nxt_new can be smaller than snd_nxt in case mptcp
1492 * is recovering after a failover. In that event, this re-sends
1495 * Thus compute snd_nxt_new candidate based on
1496 * the dfrag->data_seq that was sent and the data
1497 * that has been handed to the subflow for transmission
1498 * and skip update in case it was old dfrag.
1500 if (likely(after64(snd_nxt_new
, msk
->snd_nxt
))) {
1501 msk
->bytes_sent
+= snd_nxt_new
- msk
->snd_nxt
;
1502 msk
->snd_nxt
= snd_nxt_new
;
1506 void mptcp_check_and_set_pending(struct sock
*sk
)
1508 if (mptcp_send_head(sk
)) {
1509 mptcp_data_lock(sk
);
1510 mptcp_sk(sk
)->cb_flags
|= BIT(MPTCP_PUSH_PENDING
);
1511 mptcp_data_unlock(sk
);
1515 static int __subflow_push_pending(struct sock
*sk
, struct sock
*ssk
,
1516 struct mptcp_sendmsg_info
*info
)
1518 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1519 struct mptcp_data_frag
*dfrag
;
1520 int len
, copied
= 0, err
= 0;
1522 while ((dfrag
= mptcp_send_head(sk
))) {
1523 info
->sent
= dfrag
->already_sent
;
1524 info
->limit
= dfrag
->data_len
;
1525 len
= dfrag
->data_len
- dfrag
->already_sent
;
1529 ret
= mptcp_sendmsg_frag(sk
, ssk
, dfrag
, info
);
1531 err
= copied
? : ret
;
1539 mptcp_update_post_push(msk
, dfrag
, ret
);
1541 WRITE_ONCE(msk
->first_pending
, mptcp_send_next(sk
));
1543 if (msk
->snd_burst
<= 0 ||
1544 !sk_stream_memory_free(ssk
) ||
1545 !mptcp_subflow_active(mptcp_subflow_ctx(ssk
))) {
1549 mptcp_set_timeout(sk
);
1557 void __mptcp_push_pending(struct sock
*sk
, unsigned int flags
)
1559 struct sock
*prev_ssk
= NULL
, *ssk
= NULL
;
1560 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1561 struct mptcp_sendmsg_info info
= {
1564 bool do_check_data_fin
= false;
1567 while (mptcp_send_head(sk
) && (push_count
> 0)) {
1568 struct mptcp_subflow_context
*subflow
;
1571 if (mptcp_sched_get_send(msk
))
1576 mptcp_for_each_subflow(msk
, subflow
) {
1577 if (READ_ONCE(subflow
->scheduled
)) {
1578 mptcp_subflow_set_scheduled(subflow
, false);
1581 ssk
= mptcp_subflow_tcp_sock(subflow
);
1582 if (ssk
!= prev_ssk
) {
1583 /* First check. If the ssk has changed since
1584 * the last round, release prev_ssk
1587 mptcp_push_release(prev_ssk
, &info
);
1589 /* Need to lock the new subflow only if different
1590 * from the previous one, otherwise we are still
1591 * helding the relevant lock
1598 ret
= __subflow_push_pending(sk
, ssk
, &info
);
1600 if (ret
!= -EAGAIN
||
1601 (1 << ssk
->sk_state
) &
1602 (TCPF_FIN_WAIT1
| TCPF_FIN_WAIT2
| TCPF_CLOSE
))
1606 do_check_data_fin
= true;
1611 /* at this point we held the socket lock for the last subflow we used */
1613 mptcp_push_release(ssk
, &info
);
1615 /* ensure the rtx timer is running */
1616 if (!mptcp_rtx_timer_pending(sk
))
1617 mptcp_reset_rtx_timer(sk
);
1618 if (do_check_data_fin
)
1619 mptcp_check_send_data_fin(sk
);
1622 static void __mptcp_subflow_push_pending(struct sock
*sk
, struct sock
*ssk
, bool first
)
1624 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1625 struct mptcp_sendmsg_info info
= {
1626 .data_lock_held
= true,
1628 bool keep_pushing
= true;
1629 struct sock
*xmit_ssk
;
1633 while (mptcp_send_head(sk
) && keep_pushing
) {
1634 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
1637 /* check for a different subflow usage only after
1638 * spooling the first chunk of data
1641 mptcp_subflow_set_scheduled(subflow
, false);
1642 ret
= __subflow_push_pending(sk
, ssk
, &info
);
1650 if (mptcp_sched_get_send(msk
))
1653 if (READ_ONCE(subflow
->scheduled
)) {
1654 mptcp_subflow_set_scheduled(subflow
, false);
1655 ret
= __subflow_push_pending(sk
, ssk
, &info
);
1657 keep_pushing
= false;
1661 mptcp_for_each_subflow(msk
, subflow
) {
1662 if (READ_ONCE(subflow
->scheduled
)) {
1663 xmit_ssk
= mptcp_subflow_tcp_sock(subflow
);
1664 if (xmit_ssk
!= ssk
) {
1665 mptcp_subflow_delegate(subflow
,
1666 MPTCP_DELEGATE_SEND
);
1667 keep_pushing
= false;
1674 /* __mptcp_alloc_tx_skb could have released some wmem and we are
1675 * not going to flush it via release_sock()
1678 tcp_push(ssk
, 0, info
.mss_now
, tcp_sk(ssk
)->nonagle
,
1680 if (!mptcp_rtx_timer_pending(sk
))
1681 mptcp_reset_rtx_timer(sk
);
1683 if (msk
->snd_data_fin_enable
&&
1684 msk
->snd_nxt
+ 1 == msk
->write_seq
)
1685 mptcp_schedule_work(sk
);
1689 static void mptcp_set_nospace(struct sock
*sk
)
1691 /* enable autotune */
1692 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1694 /* will be cleared on avail space */
1695 set_bit(MPTCP_NOSPACE
, &mptcp_sk(sk
)->flags
);
1698 static int mptcp_disconnect(struct sock
*sk
, int flags
);
1700 static int mptcp_sendmsg_fastopen(struct sock
*sk
, struct msghdr
*msg
,
1701 size_t len
, int *copied_syn
)
1703 unsigned int saved_flags
= msg
->msg_flags
;
1704 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1708 /* on flags based fastopen the mptcp is supposed to create the
1709 * first subflow right now. Otherwise we are in the defer_connect
1710 * path, and the first subflow must be already present.
1711 * Since the defer_connect flag is cleared after the first succsful
1712 * fastopen attempt, no need to check for additional subflow status.
1714 if (msg
->msg_flags
& MSG_FASTOPEN
) {
1715 ssk
= __mptcp_nmpc_sk(msk
);
1717 return PTR_ERR(ssk
);
1725 msg
->msg_flags
|= MSG_DONTWAIT
;
1726 msk
->fastopening
= 1;
1727 ret
= tcp_sendmsg_fastopen(ssk
, msg
, copied_syn
, len
, NULL
);
1728 msk
->fastopening
= 0;
1729 msg
->msg_flags
= saved_flags
;
1732 /* do the blocking bits of inet_stream_connect outside the ssk socket lock */
1733 if (ret
== -EINPROGRESS
&& !(msg
->msg_flags
& MSG_DONTWAIT
)) {
1734 ret
= __inet_stream_connect(sk
->sk_socket
, msg
->msg_name
,
1735 msg
->msg_namelen
, msg
->msg_flags
, 1);
1737 /* Keep the same behaviour of plain TCP: zero the copied bytes in
1738 * case of any error, except timeout or signal
1740 if (ret
&& ret
!= -EINPROGRESS
&& ret
!= -ERESTARTSYS
&& ret
!= -EINTR
)
1742 } else if (ret
&& ret
!= -EINPROGRESS
) {
1743 /* The disconnect() op called by tcp_sendmsg_fastopen()/
1744 * __inet_stream_connect() can fail, due to looking check,
1745 * see mptcp_disconnect().
1746 * Attempt it again outside the problematic scope.
1748 if (!mptcp_disconnect(sk
, 0))
1749 sk
->sk_socket
->state
= SS_UNCONNECTED
;
1751 inet_clear_bit(DEFER_CONNECT
, sk
);
1756 static int do_copy_data_nocache(struct sock
*sk
, int copy
,
1757 struct iov_iter
*from
, char *to
)
1759 if (sk
->sk_route_caps
& NETIF_F_NOCACHE_COPY
) {
1760 if (!copy_from_iter_full_nocache(to
, copy
, from
))
1762 } else if (!copy_from_iter_full(to
, copy
, from
)) {
1768 static int mptcp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1770 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1771 struct page_frag
*pfrag
;
1776 /* silently ignore everything else */
1777 msg
->msg_flags
&= MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
| MSG_FASTOPEN
;
1781 if (unlikely(inet_test_bit(DEFER_CONNECT
, sk
) ||
1782 msg
->msg_flags
& MSG_FASTOPEN
)) {
1785 ret
= mptcp_sendmsg_fastopen(sk
, msg
, len
, &copied_syn
);
1786 copied
+= copied_syn
;
1787 if (ret
== -EINPROGRESS
&& copied_syn
> 0)
1793 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1795 if ((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)) {
1796 ret
= sk_stream_wait_connect(sk
, &timeo
);
1802 if (unlikely(sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
)))
1805 pfrag
= sk_page_frag(sk
);
1807 while (msg_data_left(msg
)) {
1808 int total_ts
, frag_truesize
= 0;
1809 struct mptcp_data_frag
*dfrag
;
1810 bool dfrag_collapsed
;
1811 size_t psize
, offset
;
1813 /* reuse tail pfrag, if possible, or carve a new one from the
1816 dfrag
= mptcp_pending_tail(sk
);
1817 dfrag_collapsed
= mptcp_frag_can_collapse_to(msk
, pfrag
, dfrag
);
1818 if (!dfrag_collapsed
) {
1819 if (!sk_stream_memory_free(sk
))
1820 goto wait_for_memory
;
1822 if (!mptcp_page_frag_refill(sk
, pfrag
))
1823 goto wait_for_memory
;
1825 dfrag
= mptcp_carve_data_frag(msk
, pfrag
, pfrag
->offset
);
1826 frag_truesize
= dfrag
->overhead
;
1829 /* we do not bound vs wspace, to allow a single packet.
1830 * memory accounting will prevent execessive memory usage
1833 offset
= dfrag
->offset
+ dfrag
->data_len
;
1834 psize
= pfrag
->size
- offset
;
1835 psize
= min_t(size_t, psize
, msg_data_left(msg
));
1836 total_ts
= psize
+ frag_truesize
;
1838 if (!sk_wmem_schedule(sk
, total_ts
))
1839 goto wait_for_memory
;
1841 ret
= do_copy_data_nocache(sk
, psize
, &msg
->msg_iter
,
1842 page_address(dfrag
->page
) + offset
);
1846 /* data successfully copied into the write queue */
1847 sk_forward_alloc_add(sk
, -total_ts
);
1849 dfrag
->data_len
+= psize
;
1850 frag_truesize
+= psize
;
1851 pfrag
->offset
+= frag_truesize
;
1852 WRITE_ONCE(msk
->write_seq
, msk
->write_seq
+ psize
);
1854 /* charge data on mptcp pending queue to the msk socket
1855 * Note: we charge such data both to sk and ssk
1857 sk_wmem_queued_add(sk
, frag_truesize
);
1858 if (!dfrag_collapsed
) {
1859 get_page(dfrag
->page
);
1860 list_add_tail(&dfrag
->list
, &msk
->rtx_queue
);
1861 if (!msk
->first_pending
)
1862 WRITE_ONCE(msk
->first_pending
, dfrag
);
1864 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk
,
1865 dfrag
->data_seq
, dfrag
->data_len
, dfrag
->already_sent
,
1871 mptcp_set_nospace(sk
);
1872 __mptcp_push_pending(sk
, msg
->msg_flags
);
1873 ret
= sk_stream_wait_memory(sk
, &timeo
);
1879 __mptcp_push_pending(sk
, msg
->msg_flags
);
1889 copied
= sk_stream_error(sk
, msg
->msg_flags
, ret
);
1893 static int __mptcp_recvmsg_mskq(struct mptcp_sock
*msk
,
1895 size_t len
, int flags
,
1896 struct scm_timestamping_internal
*tss
,
1899 struct sk_buff
*skb
, *tmp
;
1902 skb_queue_walk_safe(&msk
->receive_queue
, skb
, tmp
) {
1903 u32 offset
= MPTCP_SKB_CB(skb
)->offset
;
1904 u32 data_len
= skb
->len
- offset
;
1905 u32 count
= min_t(size_t, len
- copied
, data_len
);
1908 if (!(flags
& MSG_TRUNC
)) {
1909 err
= skb_copy_datagram_msg(skb
, offset
, msg
, count
);
1910 if (unlikely(err
< 0)) {
1917 if (MPTCP_SKB_CB(skb
)->has_rxtstamp
) {
1918 tcp_update_recv_tstamps(skb
, tss
);
1919 *cmsg_flags
|= MPTCP_CMSG_TS
;
1924 if (count
< data_len
) {
1925 if (!(flags
& MSG_PEEK
)) {
1926 MPTCP_SKB_CB(skb
)->offset
+= count
;
1927 MPTCP_SKB_CB(skb
)->map_seq
+= count
;
1928 msk
->bytes_consumed
+= count
;
1933 if (!(flags
& MSG_PEEK
)) {
1934 /* we will bulk release the skb memory later */
1935 skb
->destructor
= NULL
;
1936 WRITE_ONCE(msk
->rmem_released
, msk
->rmem_released
+ skb
->truesize
);
1937 __skb_unlink(skb
, &msk
->receive_queue
);
1939 msk
->bytes_consumed
+= count
;
1949 /* receive buffer autotuning. See tcp_rcv_space_adjust for more information.
1951 * Only difference: Use highest rtt estimate of the subflows in use.
1953 static void mptcp_rcv_space_adjust(struct mptcp_sock
*msk
, int copied
)
1955 struct mptcp_subflow_context
*subflow
;
1956 struct sock
*sk
= (struct sock
*)msk
;
1957 u8 scaling_ratio
= U8_MAX
;
1958 u32 time
, advmss
= 1;
1961 msk_owned_by_me(msk
);
1966 msk
->rcvq_space
.copied
+= copied
;
1968 mstamp
= div_u64(tcp_clock_ns(), NSEC_PER_USEC
);
1969 time
= tcp_stamp_us_delta(mstamp
, msk
->rcvq_space
.time
);
1971 rtt_us
= msk
->rcvq_space
.rtt_us
;
1972 if (rtt_us
&& time
< (rtt_us
>> 3))
1976 mptcp_for_each_subflow(msk
, subflow
) {
1977 const struct tcp_sock
*tp
;
1981 tp
= tcp_sk(mptcp_subflow_tcp_sock(subflow
));
1983 sf_rtt_us
= READ_ONCE(tp
->rcv_rtt_est
.rtt_us
);
1984 sf_advmss
= READ_ONCE(tp
->advmss
);
1986 rtt_us
= max(sf_rtt_us
, rtt_us
);
1987 advmss
= max(sf_advmss
, advmss
);
1988 scaling_ratio
= min(tp
->scaling_ratio
, scaling_ratio
);
1991 msk
->rcvq_space
.rtt_us
= rtt_us
;
1992 msk
->scaling_ratio
= scaling_ratio
;
1993 if (time
< (rtt_us
>> 3) || rtt_us
== 0)
1996 if (msk
->rcvq_space
.copied
<= msk
->rcvq_space
.space
)
1999 if (READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_moderate_rcvbuf
) &&
2000 !(sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)) {
2004 rcvwin
= ((u64
)msk
->rcvq_space
.copied
<< 1) + 16 * advmss
;
2006 grow
= rcvwin
* (msk
->rcvq_space
.copied
- msk
->rcvq_space
.space
);
2008 do_div(grow
, msk
->rcvq_space
.space
);
2009 rcvwin
+= (grow
<< 1);
2011 rcvbuf
= min_t(u64
, __tcp_space_from_win(scaling_ratio
, rcvwin
),
2012 READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_rmem
[2]));
2014 if (rcvbuf
> sk
->sk_rcvbuf
) {
2017 window_clamp
= __tcp_win_from_space(scaling_ratio
, rcvbuf
);
2018 WRITE_ONCE(sk
->sk_rcvbuf
, rcvbuf
);
2020 /* Make subflows follow along. If we do not do this, we
2021 * get drops at subflow level if skbs can't be moved to
2022 * the mptcp rx queue fast enough (announced rcv_win can
2023 * exceed ssk->sk_rcvbuf).
2025 mptcp_for_each_subflow(msk
, subflow
) {
2029 ssk
= mptcp_subflow_tcp_sock(subflow
);
2030 slow
= lock_sock_fast(ssk
);
2031 WRITE_ONCE(ssk
->sk_rcvbuf
, rcvbuf
);
2032 tcp_sk(ssk
)->window_clamp
= window_clamp
;
2033 tcp_cleanup_rbuf(ssk
, 1);
2034 unlock_sock_fast(ssk
, slow
);
2039 msk
->rcvq_space
.space
= msk
->rcvq_space
.copied
;
2041 msk
->rcvq_space
.copied
= 0;
2042 msk
->rcvq_space
.time
= mstamp
;
2045 static void __mptcp_update_rmem(struct sock
*sk
)
2047 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2049 if (!msk
->rmem_released
)
2052 atomic_sub(msk
->rmem_released
, &sk
->sk_rmem_alloc
);
2053 mptcp_rmem_uncharge(sk
, msk
->rmem_released
);
2054 WRITE_ONCE(msk
->rmem_released
, 0);
2057 static void __mptcp_splice_receive_queue(struct sock
*sk
)
2059 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2061 skb_queue_splice_tail_init(&sk
->sk_receive_queue
, &msk
->receive_queue
);
2064 static bool __mptcp_move_skbs(struct mptcp_sock
*msk
)
2066 struct sock
*sk
= (struct sock
*)msk
;
2067 unsigned int moved
= 0;
2071 struct sock
*ssk
= mptcp_subflow_recv_lookup(msk
);
2074 /* we can have data pending in the subflows only if the msk
2075 * receive buffer was full at subflow_data_ready() time,
2076 * that is an unlikely slow path.
2081 slowpath
= lock_sock_fast(ssk
);
2082 mptcp_data_lock(sk
);
2083 __mptcp_update_rmem(sk
);
2084 done
= __mptcp_move_skbs_from_subflow(msk
, ssk
, &moved
);
2085 mptcp_data_unlock(sk
);
2087 if (unlikely(ssk
->sk_err
))
2088 __mptcp_error_report(sk
);
2089 unlock_sock_fast(ssk
, slowpath
);
2092 /* acquire the data lock only if some input data is pending */
2094 if (!RB_EMPTY_ROOT(&msk
->out_of_order_queue
) ||
2095 !skb_queue_empty_lockless(&sk
->sk_receive_queue
)) {
2096 mptcp_data_lock(sk
);
2097 __mptcp_update_rmem(sk
);
2098 ret
|= __mptcp_ofo_queue(msk
);
2099 __mptcp_splice_receive_queue(sk
);
2100 mptcp_data_unlock(sk
);
2103 mptcp_check_data_fin((struct sock
*)msk
);
2104 return !skb_queue_empty(&msk
->receive_queue
);
2107 static unsigned int mptcp_inq_hint(const struct sock
*sk
)
2109 const struct mptcp_sock
*msk
= mptcp_sk(sk
);
2110 const struct sk_buff
*skb
;
2112 skb
= skb_peek(&msk
->receive_queue
);
2114 u64 hint_val
= msk
->ack_seq
- MPTCP_SKB_CB(skb
)->map_seq
;
2116 if (hint_val
>= INT_MAX
)
2119 return (unsigned int)hint_val
;
2122 if (sk
->sk_state
== TCP_CLOSE
|| (sk
->sk_shutdown
& RCV_SHUTDOWN
))
2128 static int mptcp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
2129 int flags
, int *addr_len
)
2131 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2132 struct scm_timestamping_internal tss
;
2133 int copied
= 0, cmsg_flags
= 0;
2137 /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */
2138 if (unlikely(flags
& MSG_ERRQUEUE
))
2139 return inet_recv_error(sk
, msg
, len
, addr_len
);
2142 if (unlikely(sk
->sk_state
== TCP_LISTEN
)) {
2147 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
2149 len
= min_t(size_t, len
, INT_MAX
);
2150 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
2152 if (unlikely(msk
->recvmsg_inq
))
2153 cmsg_flags
= MPTCP_CMSG_INQ
;
2155 while (copied
< len
) {
2158 bytes_read
= __mptcp_recvmsg_mskq(msk
, msg
, len
- copied
, flags
, &tss
, &cmsg_flags
);
2159 if (unlikely(bytes_read
< 0)) {
2161 copied
= bytes_read
;
2165 copied
+= bytes_read
;
2167 /* be sure to advertise window change */
2168 mptcp_cleanup_rbuf(msk
);
2170 if (skb_queue_empty(&msk
->receive_queue
) && __mptcp_move_skbs(msk
))
2173 /* only the master socket status is relevant here. The exit
2174 * conditions mirror closely tcp_recvmsg()
2176 if (copied
>= target
)
2181 sk
->sk_state
== TCP_CLOSE
||
2182 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
2184 signal_pending(current
))
2188 copied
= sock_error(sk
);
2192 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2193 /* race breaker: the shutdown could be after the
2194 * previous receive queue check
2196 if (__mptcp_move_skbs(msk
))
2201 if (sk
->sk_state
== TCP_CLOSE
) {
2211 if (signal_pending(current
)) {
2212 copied
= sock_intr_errno(timeo
);
2217 pr_debug("block timeout %ld", timeo
);
2218 sk_wait_data(sk
, &timeo
, NULL
);
2222 if (cmsg_flags
&& copied
>= 0) {
2223 if (cmsg_flags
& MPTCP_CMSG_TS
)
2224 tcp_recv_timestamp(msg
, sk
, &tss
);
2226 if (cmsg_flags
& MPTCP_CMSG_INQ
) {
2227 unsigned int inq
= mptcp_inq_hint(sk
);
2229 put_cmsg(msg
, SOL_TCP
, TCP_CM_INQ
, sizeof(inq
), &inq
);
2233 pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
2234 msk
, skb_queue_empty_lockless(&sk
->sk_receive_queue
),
2235 skb_queue_empty(&msk
->receive_queue
), copied
);
2236 if (!(flags
& MSG_PEEK
))
2237 mptcp_rcv_space_adjust(msk
, copied
);
2243 static void mptcp_retransmit_timer(struct timer_list
*t
)
2245 struct inet_connection_sock
*icsk
= from_timer(icsk
, t
,
2246 icsk_retransmit_timer
);
2247 struct sock
*sk
= &icsk
->icsk_inet
.sk
;
2248 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2251 if (!sock_owned_by_user(sk
)) {
2252 /* we need a process context to retransmit */
2253 if (!test_and_set_bit(MPTCP_WORK_RTX
, &msk
->flags
))
2254 mptcp_schedule_work(sk
);
2256 /* delegate our work to tcp_release_cb() */
2257 __set_bit(MPTCP_RETRANSMIT
, &msk
->cb_flags
);
2263 static void mptcp_tout_timer(struct timer_list
*t
)
2265 struct sock
*sk
= from_timer(sk
, t
, sk_timer
);
2267 mptcp_schedule_work(sk
);
2271 /* Find an idle subflow. Return NULL if there is unacked data at tcp
2274 * A backup subflow is returned only if that is the only kind available.
2276 struct sock
*mptcp_subflow_get_retrans(struct mptcp_sock
*msk
)
2278 struct sock
*backup
= NULL
, *pick
= NULL
;
2279 struct mptcp_subflow_context
*subflow
;
2280 int min_stale_count
= INT_MAX
;
2282 mptcp_for_each_subflow(msk
, subflow
) {
2283 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
2285 if (!__mptcp_subflow_active(subflow
))
2288 /* still data outstanding at TCP level? skip this */
2289 if (!tcp_rtx_and_write_queues_empty(ssk
)) {
2290 mptcp_pm_subflow_chk_stale(msk
, ssk
);
2291 min_stale_count
= min_t(int, min_stale_count
, subflow
->stale_count
);
2295 if (subflow
->backup
) {
2308 /* use backup only if there are no progresses anywhere */
2309 return min_stale_count
> 1 ? backup
: NULL
;
2312 bool __mptcp_retransmit_pending_data(struct sock
*sk
)
2314 struct mptcp_data_frag
*cur
, *rtx_head
;
2315 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2317 if (__mptcp_check_fallback(msk
))
2320 /* the closing socket has some data untransmitted and/or unacked:
2321 * some data in the mptcp rtx queue has not really xmitted yet.
2322 * keep it simple and re-inject the whole mptcp level rtx queue
2324 mptcp_data_lock(sk
);
2325 __mptcp_clean_una_wakeup(sk
);
2326 rtx_head
= mptcp_rtx_head(sk
);
2328 mptcp_data_unlock(sk
);
2332 msk
->recovery_snd_nxt
= msk
->snd_nxt
;
2333 msk
->recovery
= true;
2334 mptcp_data_unlock(sk
);
2336 msk
->first_pending
= rtx_head
;
2339 /* be sure to clear the "sent status" on all re-injected fragments */
2340 list_for_each_entry(cur
, &msk
->rtx_queue
, list
) {
2341 if (!cur
->already_sent
)
2343 cur
->already_sent
= 0;
2349 /* flags for __mptcp_close_ssk() */
2350 #define MPTCP_CF_PUSH BIT(1)
2351 #define MPTCP_CF_FASTCLOSE BIT(2)
2353 /* be sure to send a reset only if the caller asked for it, also
2354 * clean completely the subflow status when the subflow reaches
2357 static void __mptcp_subflow_disconnect(struct sock
*ssk
,
2358 struct mptcp_subflow_context
*subflow
,
2361 if (((1 << ssk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)) ||
2362 (flags
& MPTCP_CF_FASTCLOSE
)) {
2363 /* The MPTCP code never wait on the subflow sockets, TCP-level
2364 * disconnect should never fail
2366 WARN_ON_ONCE(tcp_disconnect(ssk
, 0));
2367 mptcp_subflow_ctx_reset(subflow
);
2369 tcp_shutdown(ssk
, SEND_SHUTDOWN
);
2373 /* subflow sockets can be either outgoing (connect) or incoming
2376 * Outgoing subflows use in-kernel sockets.
2377 * Incoming subflows do not have their own 'struct socket' allocated,
2378 * so we need to use tcp_close() after detaching them from the mptcp
2381 static void __mptcp_close_ssk(struct sock
*sk
, struct sock
*ssk
,
2382 struct mptcp_subflow_context
*subflow
,
2385 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2386 bool dispose_it
, need_push
= false;
2388 /* If the first subflow moved to a close state before accept, e.g. due
2389 * to an incoming reset or listener shutdown, the subflow socket is
2390 * already deleted by inet_child_forget() and the mptcp socket can't
2393 if (msk
->in_accept_queue
&& msk
->first
== ssk
&&
2394 (sock_flag(sk
, SOCK_DEAD
) || sock_flag(ssk
, SOCK_DEAD
))) {
2395 /* ensure later check in mptcp_worker() will dispose the msk */
2396 sock_set_flag(sk
, SOCK_DEAD
);
2397 mptcp_set_close_tout(sk
, tcp_jiffies32
- (mptcp_close_timeout(sk
) + 1));
2398 lock_sock_nested(ssk
, SINGLE_DEPTH_NESTING
);
2399 mptcp_subflow_drop_ctx(ssk
);
2403 dispose_it
= msk
->free_first
|| ssk
!= msk
->first
;
2405 list_del(&subflow
->node
);
2407 lock_sock_nested(ssk
, SINGLE_DEPTH_NESTING
);
2409 if ((flags
& MPTCP_CF_FASTCLOSE
) && !__mptcp_check_fallback(msk
)) {
2410 /* be sure to force the tcp_close path
2411 * to generate the egress reset
2413 ssk
->sk_lingertime
= 0;
2414 sock_set_flag(ssk
, SOCK_LINGER
);
2415 subflow
->send_fastclose
= 1;
2418 need_push
= (flags
& MPTCP_CF_PUSH
) && __mptcp_retransmit_pending_data(sk
);
2420 __mptcp_subflow_disconnect(ssk
, subflow
, flags
);
2426 subflow
->disposable
= 1;
2428 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2429 * the ssk has been already destroyed, we just need to release the
2430 * reference owned by msk;
2432 if (!inet_csk(ssk
)->icsk_ulp_ops
) {
2433 WARN_ON_ONCE(!sock_flag(ssk
, SOCK_DEAD
));
2434 kfree_rcu(subflow
, rcu
);
2436 /* otherwise tcp will dispose of the ssk and subflow ctx */
2437 __tcp_close(ssk
, 0);
2439 /* close acquired an extra ref */
2444 __mptcp_subflow_error_report(sk
, ssk
);
2449 if (ssk
== msk
->first
)
2450 WRITE_ONCE(msk
->first
, NULL
);
2453 __mptcp_sync_sndbuf(sk
);
2455 __mptcp_push_pending(sk
, 0);
2457 /* Catch every 'all subflows closed' scenario, including peers silently
2458 * closing them, e.g. due to timeout.
2459 * For established sockets, allow an additional timeout before closing,
2460 * as the protocol can still create more subflows.
2462 if (list_is_singular(&msk
->conn_list
) && msk
->first
&&
2463 inet_sk_state_load(msk
->first
) == TCP_CLOSE
) {
2464 if (sk
->sk_state
!= TCP_ESTABLISHED
||
2465 msk
->in_accept_queue
|| sock_flag(sk
, SOCK_DEAD
)) {
2466 mptcp_set_state(sk
, TCP_CLOSE
);
2467 mptcp_close_wake_up(sk
);
2469 mptcp_start_tout_timer(sk
);
2474 void mptcp_close_ssk(struct sock
*sk
, struct sock
*ssk
,
2475 struct mptcp_subflow_context
*subflow
)
2477 if (sk
->sk_state
== TCP_ESTABLISHED
)
2478 mptcp_event(MPTCP_EVENT_SUB_CLOSED
, mptcp_sk(sk
), ssk
, GFP_KERNEL
);
2480 /* subflow aborted before reaching the fully_established status
2481 * attempt the creation of the next subflow
2483 mptcp_pm_subflow_check_next(mptcp_sk(sk
), subflow
);
2485 __mptcp_close_ssk(sk
, ssk
, subflow
, MPTCP_CF_PUSH
);
2488 static unsigned int mptcp_sync_mss(struct sock
*sk
, u32 pmtu
)
2493 static void __mptcp_close_subflow(struct sock
*sk
)
2495 struct mptcp_subflow_context
*subflow
, *tmp
;
2496 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2500 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
) {
2501 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
2503 if (inet_sk_state_load(ssk
) != TCP_CLOSE
)
2506 /* 'subflow_data_ready' will re-sched once rx queue is empty */
2507 if (!skb_queue_empty_lockless(&ssk
->sk_receive_queue
))
2510 mptcp_close_ssk(sk
, ssk
, subflow
);
2515 static bool mptcp_close_tout_expired(const struct sock
*sk
)
2517 if (!inet_csk(sk
)->icsk_mtup
.probe_timestamp
||
2518 sk
->sk_state
== TCP_CLOSE
)
2521 return time_after32(tcp_jiffies32
,
2522 inet_csk(sk
)->icsk_mtup
.probe_timestamp
+ mptcp_close_timeout(sk
));
2525 static void mptcp_check_fastclose(struct mptcp_sock
*msk
)
2527 struct mptcp_subflow_context
*subflow
, *tmp
;
2528 struct sock
*sk
= (struct sock
*)msk
;
2530 if (likely(!READ_ONCE(msk
->rcv_fastclose
)))
2533 mptcp_token_destroy(msk
);
2535 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
) {
2536 struct sock
*tcp_sk
= mptcp_subflow_tcp_sock(subflow
);
2539 slow
= lock_sock_fast(tcp_sk
);
2540 if (tcp_sk
->sk_state
!= TCP_CLOSE
) {
2541 tcp_send_active_reset(tcp_sk
, GFP_ATOMIC
);
2542 tcp_set_state(tcp_sk
, TCP_CLOSE
);
2544 unlock_sock_fast(tcp_sk
, slow
);
2547 /* Mirror the tcp_reset() error propagation */
2548 switch (sk
->sk_state
) {
2550 WRITE_ONCE(sk
->sk_err
, ECONNREFUSED
);
2552 case TCP_CLOSE_WAIT
:
2553 WRITE_ONCE(sk
->sk_err
, EPIPE
);
2558 WRITE_ONCE(sk
->sk_err
, ECONNRESET
);
2561 mptcp_set_state(sk
, TCP_CLOSE
);
2562 WRITE_ONCE(sk
->sk_shutdown
, SHUTDOWN_MASK
);
2563 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
2564 set_bit(MPTCP_WORK_CLOSE_SUBFLOW
, &msk
->flags
);
2566 /* the calling mptcp_worker will properly destroy the socket */
2567 if (sock_flag(sk
, SOCK_DEAD
))
2570 sk
->sk_state_change(sk
);
2571 sk_error_report(sk
);
2574 static void __mptcp_retrans(struct sock
*sk
)
2576 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2577 struct mptcp_subflow_context
*subflow
;
2578 struct mptcp_sendmsg_info info
= {};
2579 struct mptcp_data_frag
*dfrag
;
2584 mptcp_clean_una_wakeup(sk
);
2586 /* first check ssk: need to kick "stale" logic */
2587 err
= mptcp_sched_get_retrans(msk
);
2588 dfrag
= mptcp_rtx_head(sk
);
2590 if (mptcp_data_fin_enabled(msk
)) {
2591 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2593 icsk
->icsk_retransmits
++;
2594 mptcp_set_datafin_timeout(sk
);
2595 mptcp_send_ack(msk
);
2600 if (!mptcp_send_head(sk
))
2609 mptcp_for_each_subflow(msk
, subflow
) {
2610 if (READ_ONCE(subflow
->scheduled
)) {
2613 mptcp_subflow_set_scheduled(subflow
, false);
2615 ssk
= mptcp_subflow_tcp_sock(subflow
);
2619 /* limit retransmission to the bytes already sent on some subflows */
2621 info
.limit
= READ_ONCE(msk
->csum_enabled
) ? dfrag
->data_len
:
2622 dfrag
->already_sent
;
2623 while (info
.sent
< info
.limit
) {
2624 ret
= mptcp_sendmsg_frag(sk
, ssk
, dfrag
, &info
);
2628 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_RETRANSSEGS
);
2633 len
= max(copied
, len
);
2634 tcp_push(ssk
, 0, info
.mss_now
, tcp_sk(ssk
)->nonagle
,
2636 WRITE_ONCE(msk
->allow_infinite_fallback
, false);
2643 msk
->bytes_retrans
+= len
;
2644 dfrag
->already_sent
= max(dfrag
->already_sent
, len
);
2647 mptcp_check_and_set_pending(sk
);
2649 if (!mptcp_rtx_timer_pending(sk
))
2650 mptcp_reset_rtx_timer(sk
);
2653 /* schedule the timeout timer for the relevant event: either close timeout
2654 * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
2656 void mptcp_reset_tout_timer(struct mptcp_sock
*msk
, unsigned long fail_tout
)
2658 struct sock
*sk
= (struct sock
*)msk
;
2659 unsigned long timeout
, close_timeout
;
2661 if (!fail_tout
&& !inet_csk(sk
)->icsk_mtup
.probe_timestamp
)
2664 close_timeout
= inet_csk(sk
)->icsk_mtup
.probe_timestamp
- tcp_jiffies32
+ jiffies
+
2665 mptcp_close_timeout(sk
);
2667 /* the close timeout takes precedence on the fail one, and here at least one of
2670 timeout
= inet_csk(sk
)->icsk_mtup
.probe_timestamp
? close_timeout
: fail_tout
;
2672 sk_reset_timer(sk
, &sk
->sk_timer
, timeout
);
2675 static void mptcp_mp_fail_no_response(struct mptcp_sock
*msk
)
2677 struct sock
*ssk
= msk
->first
;
2683 pr_debug("MP_FAIL doesn't respond, reset the subflow");
2685 slow
= lock_sock_fast(ssk
);
2686 mptcp_subflow_reset(ssk
);
2687 WRITE_ONCE(mptcp_subflow_ctx(ssk
)->fail_tout
, 0);
2688 unlock_sock_fast(ssk
, slow
);
2691 static void mptcp_do_fastclose(struct sock
*sk
)
2693 struct mptcp_subflow_context
*subflow
, *tmp
;
2694 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2696 mptcp_set_state(sk
, TCP_CLOSE
);
2697 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
)
2698 __mptcp_close_ssk(sk
, mptcp_subflow_tcp_sock(subflow
),
2699 subflow
, MPTCP_CF_FASTCLOSE
);
2702 static void mptcp_worker(struct work_struct
*work
)
2704 struct mptcp_sock
*msk
= container_of(work
, struct mptcp_sock
, work
);
2705 struct sock
*sk
= (struct sock
*)msk
;
2706 unsigned long fail_tout
;
2710 state
= sk
->sk_state
;
2711 if (unlikely((1 << state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
2714 mptcp_check_fastclose(msk
);
2716 mptcp_pm_nl_work(msk
);
2718 mptcp_check_send_data_fin(sk
);
2719 mptcp_check_data_fin_ack(sk
);
2720 mptcp_check_data_fin(sk
);
2722 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW
, &msk
->flags
))
2723 __mptcp_close_subflow(sk
);
2725 if (mptcp_close_tout_expired(sk
)) {
2726 mptcp_do_fastclose(sk
);
2727 mptcp_close_wake_up(sk
);
2730 if (sock_flag(sk
, SOCK_DEAD
) && sk
->sk_state
== TCP_CLOSE
) {
2731 __mptcp_destroy_sock(sk
);
2735 if (test_and_clear_bit(MPTCP_WORK_RTX
, &msk
->flags
))
2736 __mptcp_retrans(sk
);
2738 fail_tout
= msk
->first
? READ_ONCE(mptcp_subflow_ctx(msk
->first
)->fail_tout
) : 0;
2739 if (fail_tout
&& time_after(jiffies
, fail_tout
))
2740 mptcp_mp_fail_no_response(msk
);
2747 static void __mptcp_init_sock(struct sock
*sk
)
2749 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2751 INIT_LIST_HEAD(&msk
->conn_list
);
2752 INIT_LIST_HEAD(&msk
->join_list
);
2753 INIT_LIST_HEAD(&msk
->rtx_queue
);
2754 INIT_WORK(&msk
->work
, mptcp_worker
);
2755 __skb_queue_head_init(&msk
->receive_queue
);
2756 msk
->out_of_order_queue
= RB_ROOT
;
2757 msk
->first_pending
= NULL
;
2758 msk
->rmem_fwd_alloc
= 0;
2759 WRITE_ONCE(msk
->rmem_released
, 0);
2760 msk
->timer_ival
= TCP_RTO_MIN
;
2761 msk
->scaling_ratio
= TCP_DEFAULT_SCALING_RATIO
;
2763 WRITE_ONCE(msk
->first
, NULL
);
2764 inet_csk(sk
)->icsk_sync_mss
= mptcp_sync_mss
;
2765 WRITE_ONCE(msk
->csum_enabled
, mptcp_is_checksum_enabled(sock_net(sk
)));
2766 WRITE_ONCE(msk
->allow_infinite_fallback
, true);
2767 msk
->recovery
= false;
2768 msk
->subflow_id
= 1;
2770 mptcp_pm_data_init(msk
);
2772 /* re-use the csk retrans timer for MPTCP-level retrans */
2773 timer_setup(&msk
->sk
.icsk_retransmit_timer
, mptcp_retransmit_timer
, 0);
2774 timer_setup(&sk
->sk_timer
, mptcp_tout_timer
, 0);
2777 static void mptcp_ca_reset(struct sock
*sk
)
2779 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2781 tcp_assign_congestion_control(sk
);
2782 strcpy(mptcp_sk(sk
)->ca_name
, icsk
->icsk_ca_ops
->name
);
2784 /* no need to keep a reference to the ops, the name will suffice */
2785 tcp_cleanup_congestion_control(sk
);
2786 icsk
->icsk_ca_ops
= NULL
;
2789 static int mptcp_init_sock(struct sock
*sk
)
2791 struct net
*net
= sock_net(sk
);
2794 __mptcp_init_sock(sk
);
2796 if (!mptcp_is_enabled(net
))
2797 return -ENOPROTOOPT
;
2799 if (unlikely(!net
->mib
.mptcp_statistics
) && !mptcp_mib_alloc(net
))
2802 ret
= mptcp_init_sched(mptcp_sk(sk
),
2803 mptcp_sched_find(mptcp_get_scheduler(net
)));
2807 set_bit(SOCK_CUSTOM_SOCKOPT
, &sk
->sk_socket
->flags
);
2809 /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
2810 * propagate the correct value
2814 sk_sockets_allocated_inc(sk
);
2815 sk
->sk_rcvbuf
= READ_ONCE(net
->ipv4
.sysctl_tcp_rmem
[1]);
2816 sk
->sk_sndbuf
= READ_ONCE(net
->ipv4
.sysctl_tcp_wmem
[1]);
2821 static void __mptcp_clear_xmit(struct sock
*sk
)
2823 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2824 struct mptcp_data_frag
*dtmp
, *dfrag
;
2826 WRITE_ONCE(msk
->first_pending
, NULL
);
2827 list_for_each_entry_safe(dfrag
, dtmp
, &msk
->rtx_queue
, list
)
2828 dfrag_clear(sk
, dfrag
);
2831 void mptcp_cancel_work(struct sock
*sk
)
2833 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2835 if (cancel_work_sync(&msk
->work
))
2839 void mptcp_subflow_shutdown(struct sock
*sk
, struct sock
*ssk
, int how
)
2843 switch (ssk
->sk_state
) {
2845 if (!(how
& RCV_SHUTDOWN
))
2849 WARN_ON_ONCE(tcp_disconnect(ssk
, O_NONBLOCK
));
2852 if (__mptcp_check_fallback(mptcp_sk(sk
))) {
2853 pr_debug("Fallback");
2854 ssk
->sk_shutdown
|= how
;
2855 tcp_shutdown(ssk
, how
);
2857 /* simulate the data_fin ack reception to let the state
2858 * machine move forward
2860 WRITE_ONCE(mptcp_sk(sk
)->snd_una
, mptcp_sk(sk
)->snd_nxt
);
2861 mptcp_schedule_work(sk
);
2863 pr_debug("Sending DATA_FIN on subflow %p", ssk
);
2865 if (!mptcp_rtx_timer_pending(sk
))
2866 mptcp_reset_rtx_timer(sk
);
2874 void mptcp_set_state(struct sock
*sk
, int state
)
2876 int oldstate
= sk
->sk_state
;
2879 case TCP_ESTABLISHED
:
2880 if (oldstate
!= TCP_ESTABLISHED
)
2881 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_CURRESTAB
);
2885 if (oldstate
== TCP_ESTABLISHED
)
2886 MPTCP_DEC_STATS(sock_net(sk
), MPTCP_MIB_CURRESTAB
);
2889 inet_sk_state_store(sk
, state
);
2892 static const unsigned char new_state
[16] = {
2893 /* current state: new state: action: */
2894 [0 /* (Invalid) */] = TCP_CLOSE
,
2895 [TCP_ESTABLISHED
] = TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2896 [TCP_SYN_SENT
] = TCP_CLOSE
,
2897 [TCP_SYN_RECV
] = TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2898 [TCP_FIN_WAIT1
] = TCP_FIN_WAIT1
,
2899 [TCP_FIN_WAIT2
] = TCP_FIN_WAIT2
,
2900 [TCP_TIME_WAIT
] = TCP_CLOSE
, /* should not happen ! */
2901 [TCP_CLOSE
] = TCP_CLOSE
,
2902 [TCP_CLOSE_WAIT
] = TCP_LAST_ACK
| TCP_ACTION_FIN
,
2903 [TCP_LAST_ACK
] = TCP_LAST_ACK
,
2904 [TCP_LISTEN
] = TCP_CLOSE
,
2905 [TCP_CLOSING
] = TCP_CLOSING
,
2906 [TCP_NEW_SYN_RECV
] = TCP_CLOSE
, /* should not happen ! */
2909 static int mptcp_close_state(struct sock
*sk
)
2911 int next
= (int)new_state
[sk
->sk_state
];
2912 int ns
= next
& TCP_STATE_MASK
;
2914 mptcp_set_state(sk
, ns
);
2916 return next
& TCP_ACTION_FIN
;
2919 static void mptcp_check_send_data_fin(struct sock
*sk
)
2921 struct mptcp_subflow_context
*subflow
;
2922 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2924 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
2925 msk
, msk
->snd_data_fin_enable
, !!mptcp_send_head(sk
),
2926 msk
->snd_nxt
, msk
->write_seq
);
2928 /* we still need to enqueue subflows or not really shutting down,
2931 if (!msk
->snd_data_fin_enable
|| msk
->snd_nxt
+ 1 != msk
->write_seq
||
2932 mptcp_send_head(sk
))
2935 WRITE_ONCE(msk
->snd_nxt
, msk
->write_seq
);
2937 mptcp_for_each_subflow(msk
, subflow
) {
2938 struct sock
*tcp_sk
= mptcp_subflow_tcp_sock(subflow
);
2940 mptcp_subflow_shutdown(sk
, tcp_sk
, SEND_SHUTDOWN
);
2944 static void __mptcp_wr_shutdown(struct sock
*sk
)
2946 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2948 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
2949 msk
, msk
->snd_data_fin_enable
, sk
->sk_shutdown
, sk
->sk_state
,
2950 !!mptcp_send_head(sk
));
2952 /* will be ignored by fallback sockets */
2953 WRITE_ONCE(msk
->write_seq
, msk
->write_seq
+ 1);
2954 WRITE_ONCE(msk
->snd_data_fin_enable
, 1);
2956 mptcp_check_send_data_fin(sk
);
2959 static void __mptcp_destroy_sock(struct sock
*sk
)
2961 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2963 pr_debug("msk=%p", msk
);
2967 mptcp_stop_rtx_timer(sk
);
2968 sk_stop_timer(sk
, &sk
->sk_timer
);
2970 mptcp_release_sched(msk
);
2972 sk
->sk_prot
->destroy(sk
);
2974 WARN_ON_ONCE(msk
->rmem_fwd_alloc
);
2975 WARN_ON_ONCE(msk
->rmem_released
);
2976 sk_stream_kill_queues(sk
);
2977 xfrm_sk_free_policy(sk
);
2982 void __mptcp_unaccepted_force_close(struct sock
*sk
)
2984 sock_set_flag(sk
, SOCK_DEAD
);
2985 mptcp_do_fastclose(sk
);
2986 __mptcp_destroy_sock(sk
);
2989 static __poll_t
mptcp_check_readable(struct sock
*sk
)
2991 return mptcp_epollin_ready(sk
) ? EPOLLIN
| EPOLLRDNORM
: 0;
2994 static void mptcp_check_listen_stop(struct sock
*sk
)
2998 if (inet_sk_state_load(sk
) != TCP_LISTEN
)
3001 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
3002 ssk
= mptcp_sk(sk
)->first
;
3003 if (WARN_ON_ONCE(!ssk
|| inet_sk_state_load(ssk
) != TCP_LISTEN
))
3006 lock_sock_nested(ssk
, SINGLE_DEPTH_NESTING
);
3007 tcp_set_state(ssk
, TCP_CLOSE
);
3008 mptcp_subflow_queue_clean(sk
, ssk
);
3009 inet_csk_listen_stop(ssk
);
3010 mptcp_event_pm_listener(ssk
, MPTCP_EVENT_LISTENER_CLOSED
);
3014 bool __mptcp_close(struct sock
*sk
, long timeout
)
3016 struct mptcp_subflow_context
*subflow
;
3017 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3018 bool do_cancel_work
= false;
3019 int subflows_alive
= 0;
3021 WRITE_ONCE(sk
->sk_shutdown
, SHUTDOWN_MASK
);
3023 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
)) {
3024 mptcp_check_listen_stop(sk
);
3025 mptcp_set_state(sk
, TCP_CLOSE
);
3029 if (mptcp_data_avail(msk
) || timeout
< 0) {
3030 /* If the msk has read data, or the caller explicitly ask it,
3031 * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose
3033 mptcp_do_fastclose(sk
);
3035 } else if (mptcp_close_state(sk
)) {
3036 __mptcp_wr_shutdown(sk
);
3039 sk_stream_wait_close(sk
, timeout
);
3042 /* orphan all the subflows */
3043 mptcp_for_each_subflow(msk
, subflow
) {
3044 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
3045 bool slow
= lock_sock_fast_nested(ssk
);
3047 subflows_alive
+= ssk
->sk_state
!= TCP_CLOSE
;
3049 /* since the close timeout takes precedence on the fail one,
3052 if (ssk
== msk
->first
)
3053 subflow
->fail_tout
= 0;
3055 /* detach from the parent socket, but allow data_ready to
3056 * push incoming data into the mptcp stack, to properly ack it
3058 ssk
->sk_socket
= NULL
;
3060 unlock_sock_fast(ssk
, slow
);
3064 /* all the subflows are closed, only timeout can change the msk
3065 * state, let's not keep resources busy for no reasons
3067 if (subflows_alive
== 0)
3068 mptcp_set_state(sk
, TCP_CLOSE
);
3071 pr_debug("msk=%p state=%d", sk
, sk
->sk_state
);
3073 mptcp_event(MPTCP_EVENT_CLOSED
, msk
, NULL
, GFP_KERNEL
);
3075 if (sk
->sk_state
== TCP_CLOSE
) {
3076 __mptcp_destroy_sock(sk
);
3077 do_cancel_work
= true;
3079 mptcp_start_tout_timer(sk
);
3082 return do_cancel_work
;
3085 static void mptcp_close(struct sock
*sk
, long timeout
)
3087 bool do_cancel_work
;
3091 do_cancel_work
= __mptcp_close(sk
, timeout
);
3094 mptcp_cancel_work(sk
);
3099 static void mptcp_copy_inaddrs(struct sock
*msk
, const struct sock
*ssk
)
3101 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3102 const struct ipv6_pinfo
*ssk6
= inet6_sk(ssk
);
3103 struct ipv6_pinfo
*msk6
= inet6_sk(msk
);
3105 msk
->sk_v6_daddr
= ssk
->sk_v6_daddr
;
3106 msk
->sk_v6_rcv_saddr
= ssk
->sk_v6_rcv_saddr
;
3109 msk6
->saddr
= ssk6
->saddr
;
3110 msk6
->flow_label
= ssk6
->flow_label
;
3114 inet_sk(msk
)->inet_num
= inet_sk(ssk
)->inet_num
;
3115 inet_sk(msk
)->inet_dport
= inet_sk(ssk
)->inet_dport
;
3116 inet_sk(msk
)->inet_sport
= inet_sk(ssk
)->inet_sport
;
3117 inet_sk(msk
)->inet_daddr
= inet_sk(ssk
)->inet_daddr
;
3118 inet_sk(msk
)->inet_saddr
= inet_sk(ssk
)->inet_saddr
;
3119 inet_sk(msk
)->inet_rcv_saddr
= inet_sk(ssk
)->inet_rcv_saddr
;
3122 static int mptcp_disconnect(struct sock
*sk
, int flags
)
3124 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3126 /* We are on the fastopen error path. We can't call straight into the
3127 * subflows cleanup code due to lock nesting (we are already under
3128 * msk->firstsocket lock).
3130 if (msk
->fastopening
)
3133 mptcp_check_listen_stop(sk
);
3134 mptcp_set_state(sk
, TCP_CLOSE
);
3136 mptcp_stop_rtx_timer(sk
);
3137 mptcp_stop_tout_timer(sk
);
3140 mptcp_event(MPTCP_EVENT_CLOSED
, msk
, NULL
, GFP_KERNEL
);
3142 /* msk->subflow is still intact, the following will not free the first
3145 mptcp_destroy_common(msk
, MPTCP_CF_FASTCLOSE
);
3146 WRITE_ONCE(msk
->flags
, 0);
3148 msk
->recovery
= false;
3149 msk
->can_ack
= false;
3150 msk
->fully_established
= false;
3151 msk
->rcv_data_fin
= false;
3152 msk
->snd_data_fin_enable
= false;
3153 msk
->rcv_fastclose
= false;
3154 msk
->use_64bit_ack
= false;
3155 msk
->bytes_consumed
= 0;
3156 WRITE_ONCE(msk
->csum_enabled
, mptcp_is_checksum_enabled(sock_net(sk
)));
3157 mptcp_pm_data_reset(msk
);
3159 msk
->bytes_acked
= 0;
3160 msk
->bytes_received
= 0;
3161 msk
->bytes_sent
= 0;
3162 msk
->bytes_retrans
= 0;
3164 WRITE_ONCE(sk
->sk_shutdown
, 0);
3165 sk_error_report(sk
);
3169 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3170 static struct ipv6_pinfo
*mptcp_inet6_sk(const struct sock
*sk
)
3172 unsigned int offset
= sizeof(struct mptcp6_sock
) - sizeof(struct ipv6_pinfo
);
3174 return (struct ipv6_pinfo
*)(((u8
*)sk
) + offset
);
3178 struct sock
*mptcp_sk_clone_init(const struct sock
*sk
,
3179 const struct mptcp_options_received
*mp_opt
,
3181 struct request_sock
*req
)
3183 struct mptcp_subflow_request_sock
*subflow_req
= mptcp_subflow_rsk(req
);
3184 struct sock
*nsk
= sk_clone_lock(sk
, GFP_ATOMIC
);
3185 struct mptcp_sock
*msk
;
3190 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3191 if (nsk
->sk_family
== AF_INET6
)
3192 inet_sk(nsk
)->pinet6
= mptcp_inet6_sk(nsk
);
3195 __mptcp_init_sock(nsk
);
3197 msk
= mptcp_sk(nsk
);
3198 msk
->local_key
= subflow_req
->local_key
;
3199 msk
->token
= subflow_req
->token
;
3200 msk
->in_accept_queue
= 1;
3201 WRITE_ONCE(msk
->fully_established
, false);
3202 if (mp_opt
->suboptions
& OPTION_MPTCP_CSUMREQD
)
3203 WRITE_ONCE(msk
->csum_enabled
, true);
3205 msk
->write_seq
= subflow_req
->idsn
+ 1;
3206 msk
->snd_nxt
= msk
->write_seq
;
3207 msk
->snd_una
= msk
->write_seq
;
3208 msk
->wnd_end
= msk
->snd_nxt
+ req
->rsk_rcv_wnd
;
3209 msk
->setsockopt_seq
= mptcp_sk(sk
)->setsockopt_seq
;
3210 mptcp_init_sched(msk
, mptcp_sk(sk
)->sched
);
3212 /* passive msk is created after the first/MPC subflow */
3213 msk
->subflow_id
= 2;
3215 sock_reset_flag(nsk
, SOCK_RCU_FREE
);
3216 security_inet_csk_clone(nsk
, req
);
3218 /* this can't race with mptcp_close(), as the msk is
3219 * not yet exposted to user-space
3221 mptcp_set_state(nsk
, TCP_ESTABLISHED
);
3223 /* The msk maintain a ref to each subflow in the connections list */
3224 WRITE_ONCE(msk
->first
, ssk
);
3225 list_add(&mptcp_subflow_ctx(ssk
)->node
, &msk
->conn_list
);
3228 /* new mpc subflow takes ownership of the newly
3229 * created mptcp socket
3231 mptcp_token_accept(subflow_req
, msk
);
3233 /* set msk addresses early to ensure mptcp_pm_get_local_id()
3234 * uses the correct data
3236 mptcp_copy_inaddrs(nsk
, ssk
);
3237 __mptcp_propagate_sndbuf(nsk
, ssk
);
3239 mptcp_rcv_space_init(msk
, ssk
);
3240 bh_unlock_sock(nsk
);
3242 /* note: the newly allocated socket refcount is 2 now */
3246 void mptcp_rcv_space_init(struct mptcp_sock
*msk
, const struct sock
*ssk
)
3248 const struct tcp_sock
*tp
= tcp_sk(ssk
);
3250 msk
->rcvq_space
.copied
= 0;
3251 msk
->rcvq_space
.rtt_us
= 0;
3253 msk
->rcvq_space
.time
= tp
->tcp_mstamp
;
3255 /* initial rcv_space offering made to peer */
3256 msk
->rcvq_space
.space
= min_t(u32
, tp
->rcv_wnd
,
3257 TCP_INIT_CWND
* tp
->advmss
);
3258 if (msk
->rcvq_space
.space
== 0)
3259 msk
->rcvq_space
.space
= TCP_INIT_CWND
* TCP_MSS_DEFAULT
;
3261 WRITE_ONCE(msk
->wnd_end
, msk
->snd_nxt
+ tcp_sk(ssk
)->snd_wnd
);
3264 void mptcp_destroy_common(struct mptcp_sock
*msk
, unsigned int flags
)
3266 struct mptcp_subflow_context
*subflow
, *tmp
;
3267 struct sock
*sk
= (struct sock
*)msk
;
3269 __mptcp_clear_xmit(sk
);
3271 /* join list will be eventually flushed (with rst) at sock lock release time */
3272 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
)
3273 __mptcp_close_ssk(sk
, mptcp_subflow_tcp_sock(subflow
), subflow
, flags
);
3275 /* move to sk_receive_queue, sk_stream_kill_queues will purge it */
3276 mptcp_data_lock(sk
);
3277 skb_queue_splice_tail_init(&msk
->receive_queue
, &sk
->sk_receive_queue
);
3278 __skb_queue_purge(&sk
->sk_receive_queue
);
3279 skb_rbtree_purge(&msk
->out_of_order_queue
);
3280 mptcp_data_unlock(sk
);
3282 /* move all the rx fwd alloc into the sk_mem_reclaim_final in
3283 * inet_sock_destruct() will dispose it
3285 sk_forward_alloc_add(sk
, msk
->rmem_fwd_alloc
);
3286 WRITE_ONCE(msk
->rmem_fwd_alloc
, 0);
3287 mptcp_token_destroy(msk
);
3288 mptcp_pm_free_anno_list(msk
);
3289 mptcp_free_local_addr_list(msk
);
3292 static void mptcp_destroy(struct sock
*sk
)
3294 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3296 /* allow the following to close even the initial subflow */
3297 msk
->free_first
= 1;
3298 mptcp_destroy_common(msk
, 0);
3299 sk_sockets_allocated_dec(sk
);
3302 void __mptcp_data_acked(struct sock
*sk
)
3304 if (!sock_owned_by_user(sk
))
3305 __mptcp_clean_una(sk
);
3307 __set_bit(MPTCP_CLEAN_UNA
, &mptcp_sk(sk
)->cb_flags
);
3309 if (mptcp_pending_data_fin_ack(sk
))
3310 mptcp_schedule_work(sk
);
3313 void __mptcp_check_push(struct sock
*sk
, struct sock
*ssk
)
3315 if (!mptcp_send_head(sk
))
3318 if (!sock_owned_by_user(sk
))
3319 __mptcp_subflow_push_pending(sk
, ssk
, false);
3321 __set_bit(MPTCP_PUSH_PENDING
, &mptcp_sk(sk
)->cb_flags
);
3324 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
3325 BIT(MPTCP_RETRANSMIT) | \
3326 BIT(MPTCP_FLUSH_JOIN_LIST))
3328 /* processes deferred events and flush wmem */
3329 static void mptcp_release_cb(struct sock
*sk
)
3330 __must_hold(&sk
->sk_lock
.slock
)
3332 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3335 unsigned long flags
= (msk
->cb_flags
& MPTCP_FLAGS_PROCESS_CTX_NEED
);
3336 struct list_head join_list
;
3341 INIT_LIST_HEAD(&join_list
);
3342 list_splice_init(&msk
->join_list
, &join_list
);
3344 /* the following actions acquire the subflow socket lock
3346 * 1) can't be invoked in atomic scope
3347 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
3348 * datapath acquires the msk socket spinlock while helding
3349 * the subflow socket lock
3351 msk
->cb_flags
&= ~flags
;
3352 spin_unlock_bh(&sk
->sk_lock
.slock
);
3354 if (flags
& BIT(MPTCP_FLUSH_JOIN_LIST
))
3355 __mptcp_flush_join_list(sk
, &join_list
);
3356 if (flags
& BIT(MPTCP_PUSH_PENDING
))
3357 __mptcp_push_pending(sk
, 0);
3358 if (flags
& BIT(MPTCP_RETRANSMIT
))
3359 __mptcp_retrans(sk
);
3362 spin_lock_bh(&sk
->sk_lock
.slock
);
3365 if (__test_and_clear_bit(MPTCP_CLEAN_UNA
, &msk
->cb_flags
))
3366 __mptcp_clean_una_wakeup(sk
);
3367 if (unlikely(msk
->cb_flags
)) {
3368 /* be sure to sync the msk state before taking actions
3369 * depending on sk_state (MPTCP_ERROR_REPORT)
3370 * On sk release avoid actions depending on the first subflow
3372 if (__test_and_clear_bit(MPTCP_SYNC_STATE
, &msk
->cb_flags
) && msk
->first
)
3373 __mptcp_sync_state(sk
, msk
->pending_state
);
3374 if (__test_and_clear_bit(MPTCP_ERROR_REPORT
, &msk
->cb_flags
))
3375 __mptcp_error_report(sk
);
3376 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF
, &msk
->cb_flags
))
3377 __mptcp_sync_sndbuf(sk
);
3380 __mptcp_update_rmem(sk
);
3383 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
3384 * TCP can't schedule delack timer before the subflow is fully established.
3385 * MPTCP uses the delack timer to do 3rd ack retransmissions
3387 static void schedule_3rdack_retransmission(struct sock
*ssk
)
3389 struct inet_connection_sock
*icsk
= inet_csk(ssk
);
3390 struct tcp_sock
*tp
= tcp_sk(ssk
);
3391 unsigned long timeout
;
3393 if (mptcp_subflow_ctx(ssk
)->fully_established
)
3396 /* reschedule with a timeout above RTT, as we must look only for drop */
3398 timeout
= usecs_to_jiffies(tp
->srtt_us
>> (3 - 1));
3400 timeout
= TCP_TIMEOUT_INIT
;
3403 WARN_ON_ONCE(icsk
->icsk_ack
.pending
& ICSK_ACK_TIMER
);
3404 icsk
->icsk_ack
.pending
|= ICSK_ACK_SCHED
| ICSK_ACK_TIMER
;
3405 icsk
->icsk_ack
.timeout
= timeout
;
3406 sk_reset_timer(ssk
, &icsk
->icsk_delack_timer
, timeout
);
3409 void mptcp_subflow_process_delegated(struct sock
*ssk
, long status
)
3411 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
3412 struct sock
*sk
= subflow
->conn
;
3414 if (status
& BIT(MPTCP_DELEGATE_SEND
)) {
3415 mptcp_data_lock(sk
);
3416 if (!sock_owned_by_user(sk
))
3417 __mptcp_subflow_push_pending(sk
, ssk
, true);
3419 __set_bit(MPTCP_PUSH_PENDING
, &mptcp_sk(sk
)->cb_flags
);
3420 mptcp_data_unlock(sk
);
3422 if (status
& BIT(MPTCP_DELEGATE_SNDBUF
)) {
3423 mptcp_data_lock(sk
);
3424 if (!sock_owned_by_user(sk
))
3425 __mptcp_sync_sndbuf(sk
);
3427 __set_bit(MPTCP_SYNC_SNDBUF
, &mptcp_sk(sk
)->cb_flags
);
3428 mptcp_data_unlock(sk
);
3430 if (status
& BIT(MPTCP_DELEGATE_ACK
))
3431 schedule_3rdack_retransmission(ssk
);
3434 static int mptcp_hash(struct sock
*sk
)
3436 /* should never be called,
3437 * we hash the TCP subflows not the master socket
3443 static void mptcp_unhash(struct sock
*sk
)
3445 /* called from sk_common_release(), but nothing to do here */
3448 static int mptcp_get_port(struct sock
*sk
, unsigned short snum
)
3450 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3452 pr_debug("msk=%p, ssk=%p", msk
, msk
->first
);
3453 if (WARN_ON_ONCE(!msk
->first
))
3456 return inet_csk_get_port(msk
->first
, snum
);
3459 void mptcp_finish_connect(struct sock
*ssk
)
3461 struct mptcp_subflow_context
*subflow
;
3462 struct mptcp_sock
*msk
;
3465 subflow
= mptcp_subflow_ctx(ssk
);
3469 pr_debug("msk=%p, token=%u", sk
, subflow
->token
);
3471 subflow
->map_seq
= subflow
->iasn
;
3472 subflow
->map_subflow_seq
= 1;
3474 /* the socket is not connected yet, no msk/subflow ops can access/race
3475 * accessing the field below
3477 WRITE_ONCE(msk
->local_key
, subflow
->local_key
);
3478 WRITE_ONCE(msk
->write_seq
, subflow
->idsn
+ 1);
3479 WRITE_ONCE(msk
->snd_nxt
, msk
->write_seq
);
3480 WRITE_ONCE(msk
->snd_una
, msk
->write_seq
);
3482 mptcp_pm_new_connection(msk
, ssk
, 0);
3484 mptcp_rcv_space_init(msk
, ssk
);
3487 void mptcp_sock_graft(struct sock
*sk
, struct socket
*parent
)
3489 write_lock_bh(&sk
->sk_callback_lock
);
3490 rcu_assign_pointer(sk
->sk_wq
, &parent
->wq
);
3491 sk_set_socket(sk
, parent
);
3492 sk
->sk_uid
= SOCK_INODE(parent
)->i_uid
;
3493 write_unlock_bh(&sk
->sk_callback_lock
);
3496 bool mptcp_finish_join(struct sock
*ssk
)
3498 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
3499 struct mptcp_sock
*msk
= mptcp_sk(subflow
->conn
);
3500 struct sock
*parent
= (void *)msk
;
3503 pr_debug("msk=%p, subflow=%p", msk
, subflow
);
3505 /* mptcp socket already closing? */
3506 if (!mptcp_is_fully_established(parent
)) {
3507 subflow
->reset_reason
= MPTCP_RST_EMPTCP
;
3511 /* active subflow, already present inside the conn_list */
3512 if (!list_empty(&subflow
->node
)) {
3513 mptcp_subflow_joined(msk
, ssk
);
3514 mptcp_propagate_sndbuf(parent
, ssk
);
3518 if (!mptcp_pm_allow_new_subflow(msk
))
3519 goto err_prohibited
;
3521 /* If we can't acquire msk socket lock here, let the release callback
3524 mptcp_data_lock(parent
);
3525 if (!sock_owned_by_user(parent
)) {
3526 ret
= __mptcp_finish_join(msk
, ssk
);
3529 list_add_tail(&subflow
->node
, &msk
->conn_list
);
3533 list_add_tail(&subflow
->node
, &msk
->join_list
);
3534 __set_bit(MPTCP_FLUSH_JOIN_LIST
, &msk
->cb_flags
);
3536 mptcp_data_unlock(parent
);
3540 subflow
->reset_reason
= MPTCP_RST_EPROHIBIT
;
3547 static void mptcp_shutdown(struct sock
*sk
, int how
)
3549 pr_debug("sk=%p, how=%d", sk
, how
);
3551 if ((how
& SEND_SHUTDOWN
) && mptcp_close_state(sk
))
3552 __mptcp_wr_shutdown(sk
);
3555 static int mptcp_forward_alloc_get(const struct sock
*sk
)
3557 return READ_ONCE(sk
->sk_forward_alloc
) +
3558 READ_ONCE(mptcp_sk(sk
)->rmem_fwd_alloc
);
3561 static int mptcp_ioctl_outq(const struct mptcp_sock
*msk
, u64 v
)
3563 const struct sock
*sk
= (void *)msk
;
3566 if (sk
->sk_state
== TCP_LISTEN
)
3569 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
3572 delta
= msk
->write_seq
- v
;
3573 if (__mptcp_check_fallback(msk
) && msk
->first
) {
3574 struct tcp_sock
*tp
= tcp_sk(msk
->first
);
3576 /* the first subflow is disconnected after close - see
3577 * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq
3578 * so ignore that status, too.
3580 if (!((1 << msk
->first
->sk_state
) &
3581 (TCPF_SYN_SENT
| TCPF_SYN_RECV
| TCPF_CLOSE
)))
3582 delta
+= READ_ONCE(tp
->write_seq
) - tp
->snd_una
;
3584 if (delta
> INT_MAX
)
3590 static int mptcp_ioctl(struct sock
*sk
, int cmd
, int *karg
)
3592 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3597 if (sk
->sk_state
== TCP_LISTEN
)
3601 __mptcp_move_skbs(msk
);
3602 *karg
= mptcp_inq_hint(sk
);
3606 slow
= lock_sock_fast(sk
);
3607 *karg
= mptcp_ioctl_outq(msk
, READ_ONCE(msk
->snd_una
));
3608 unlock_sock_fast(sk
, slow
);
3611 slow
= lock_sock_fast(sk
);
3612 *karg
= mptcp_ioctl_outq(msk
, msk
->snd_nxt
);
3613 unlock_sock_fast(sk
, slow
);
3616 return -ENOIOCTLCMD
;
3622 static void mptcp_subflow_early_fallback(struct mptcp_sock
*msk
,
3623 struct mptcp_subflow_context
*subflow
)
3625 subflow
->request_mptcp
= 0;
3626 __mptcp_do_fallback(msk
);
3629 static int mptcp_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
3631 struct mptcp_subflow_context
*subflow
;
3632 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3636 ssk
= __mptcp_nmpc_sk(msk
);
3638 return PTR_ERR(ssk
);
3640 mptcp_set_state(sk
, TCP_SYN_SENT
);
3641 subflow
= mptcp_subflow_ctx(ssk
);
3642 #ifdef CONFIG_TCP_MD5SIG
3643 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
3646 if (rcu_access_pointer(tcp_sk(ssk
)->md5sig_info
))
3647 mptcp_subflow_early_fallback(msk
, subflow
);
3649 if (subflow
->request_mptcp
&& mptcp_token_new_connect(ssk
)) {
3650 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_TOKENFALLBACKINIT
);
3651 mptcp_subflow_early_fallback(msk
, subflow
);
3653 if (likely(!__mptcp_check_fallback(msk
)))
3654 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_MPCAPABLEACTIVE
);
3656 /* if reaching here via the fastopen/sendmsg path, the caller already
3657 * acquired the subflow socket lock, too.
3659 if (!msk
->fastopening
)
3662 /* the following mirrors closely a very small chunk of code from
3663 * __inet_stream_connect()
3665 if (ssk
->sk_state
!= TCP_CLOSE
)
3668 if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk
)) {
3669 err
= ssk
->sk_prot
->pre_connect(ssk
, uaddr
, addr_len
);
3674 err
= ssk
->sk_prot
->connect(ssk
, uaddr
, addr_len
);
3678 inet_assign_bit(DEFER_CONNECT
, sk
, inet_test_bit(DEFER_CONNECT
, ssk
));
3681 if (!msk
->fastopening
)
3684 /* on successful connect, the msk state will be moved to established by
3685 * subflow_finish_connect()
3687 if (unlikely(err
)) {
3688 /* avoid leaving a dangling token in an unconnected socket */
3689 mptcp_token_destroy(msk
);
3690 mptcp_set_state(sk
, TCP_CLOSE
);
3694 mptcp_copy_inaddrs(sk
, ssk
);
3698 static struct proto mptcp_prot
= {
3700 .owner
= THIS_MODULE
,
3701 .init
= mptcp_init_sock
,
3702 .connect
= mptcp_connect
,
3703 .disconnect
= mptcp_disconnect
,
3704 .close
= mptcp_close
,
3705 .setsockopt
= mptcp_setsockopt
,
3706 .getsockopt
= mptcp_getsockopt
,
3707 .shutdown
= mptcp_shutdown
,
3708 .destroy
= mptcp_destroy
,
3709 .sendmsg
= mptcp_sendmsg
,
3710 .ioctl
= mptcp_ioctl
,
3711 .recvmsg
= mptcp_recvmsg
,
3712 .release_cb
= mptcp_release_cb
,
3714 .unhash
= mptcp_unhash
,
3715 .get_port
= mptcp_get_port
,
3716 .forward_alloc_get
= mptcp_forward_alloc_get
,
3717 .sockets_allocated
= &mptcp_sockets_allocated
,
3719 .memory_allocated
= &tcp_memory_allocated
,
3720 .per_cpu_fw_alloc
= &tcp_memory_per_cpu_fw_alloc
,
3722 .memory_pressure
= &tcp_memory_pressure
,
3723 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_wmem
),
3724 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_rmem
),
3725 .sysctl_mem
= sysctl_tcp_mem
,
3726 .obj_size
= sizeof(struct mptcp_sock
),
3727 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
3728 .no_autobind
= true,
3731 static int mptcp_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
3733 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
3734 struct sock
*ssk
, *sk
= sock
->sk
;
3738 ssk
= __mptcp_nmpc_sk(msk
);
3744 if (sk
->sk_family
== AF_INET
)
3745 err
= inet_bind_sk(ssk
, uaddr
, addr_len
);
3746 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3747 else if (sk
->sk_family
== AF_INET6
)
3748 err
= inet6_bind_sk(ssk
, uaddr
, addr_len
);
3751 mptcp_copy_inaddrs(sk
, ssk
);
3758 static int mptcp_listen(struct socket
*sock
, int backlog
)
3760 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
3761 struct sock
*sk
= sock
->sk
;
3765 pr_debug("msk=%p", msk
);
3770 if (sock
->state
!= SS_UNCONNECTED
|| sock
->type
!= SOCK_STREAM
)
3773 ssk
= __mptcp_nmpc_sk(msk
);
3779 mptcp_set_state(sk
, TCP_LISTEN
);
3780 sock_set_flag(sk
, SOCK_RCU_FREE
);
3783 err
= __inet_listen_sk(ssk
, backlog
);
3785 mptcp_set_state(sk
, inet_sk_state_load(ssk
));
3788 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
3789 mptcp_copy_inaddrs(sk
, ssk
);
3790 mptcp_event_pm_listener(ssk
, MPTCP_EVENT_LISTENER_CREATED
);
3798 static int mptcp_stream_accept(struct socket
*sock
, struct socket
*newsock
,
3799 int flags
, bool kern
)
3801 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
3802 struct sock
*ssk
, *newsk
;
3805 pr_debug("msk=%p", msk
);
3807 /* Buggy applications can call accept on socket states other then LISTEN
3808 * but no need to allocate the first subflow just to error out.
3810 ssk
= READ_ONCE(msk
->first
);
3814 pr_debug("ssk=%p, listener=%p", ssk
, mptcp_subflow_ctx(ssk
));
3815 newsk
= inet_csk_accept(ssk
, flags
, &err
, kern
);
3819 pr_debug("newsk=%p, subflow is mptcp=%d", newsk
, sk_is_mptcp(newsk
));
3820 if (sk_is_mptcp(newsk
)) {
3821 struct mptcp_subflow_context
*subflow
;
3822 struct sock
*new_mptcp_sock
;
3824 subflow
= mptcp_subflow_ctx(newsk
);
3825 new_mptcp_sock
= subflow
->conn
;
3827 /* is_mptcp should be false if subflow->conn is missing, see
3828 * subflow_syn_recv_sock()
3830 if (WARN_ON_ONCE(!new_mptcp_sock
)) {
3831 tcp_sk(newsk
)->is_mptcp
= 0;
3835 newsk
= new_mptcp_sock
;
3836 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_MPCAPABLEPASSIVEACK
);
3838 newsk
->sk_kern_sock
= kern
;
3840 __inet_accept(sock
, newsock
, newsk
);
3842 set_bit(SOCK_CUSTOM_SOCKOPT
, &newsock
->flags
);
3843 msk
= mptcp_sk(newsk
);
3844 msk
->in_accept_queue
= 0;
3846 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
3847 * This is needed so NOSPACE flag can be set from tcp stack.
3849 mptcp_for_each_subflow(msk
, subflow
) {
3850 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
3852 if (!ssk
->sk_socket
)
3853 mptcp_sock_graft(ssk
, newsock
);
3856 /* Do late cleanup for the first subflow as necessary. Also
3857 * deal with bad peers not doing a complete shutdown.
3859 if (unlikely(inet_sk_state_load(msk
->first
) == TCP_CLOSE
)) {
3860 __mptcp_close_ssk(newsk
, msk
->first
,
3861 mptcp_subflow_ctx(msk
->first
), 0);
3862 if (unlikely(list_is_singular(&msk
->conn_list
)))
3863 mptcp_set_state(newsk
, TCP_CLOSE
);
3866 MPTCP_INC_STATS(sock_net(ssk
),
3867 MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK
);
3869 newsk
->sk_kern_sock
= kern
;
3871 __inet_accept(sock
, newsock
, newsk
);
3872 /* we are being invoked after accepting a non-mp-capable
3873 * flow: sk is a tcp_sk, not an mptcp one.
3875 * Hand the socket over to tcp so all further socket ops
3878 WRITE_ONCE(newsock
->sk
->sk_socket
->ops
,
3879 mptcp_fallback_tcp_ops(newsock
->sk
));
3881 release_sock(newsk
);
3886 static __poll_t
mptcp_check_writeable(struct mptcp_sock
*msk
)
3888 struct sock
*sk
= (struct sock
*)msk
;
3890 if (sk_stream_is_writeable(sk
))
3891 return EPOLLOUT
| EPOLLWRNORM
;
3893 mptcp_set_nospace(sk
);
3894 smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
3895 if (sk_stream_is_writeable(sk
))
3896 return EPOLLOUT
| EPOLLWRNORM
;
3901 static __poll_t
mptcp_poll(struct file
*file
, struct socket
*sock
,
3902 struct poll_table_struct
*wait
)
3904 struct sock
*sk
= sock
->sk
;
3905 struct mptcp_sock
*msk
;
3911 sock_poll_wait(file
, sock
, wait
);
3913 state
= inet_sk_state_load(sk
);
3914 pr_debug("msk=%p state=%d flags=%lx", msk
, state
, msk
->flags
);
3915 if (state
== TCP_LISTEN
) {
3916 struct sock
*ssk
= READ_ONCE(msk
->first
);
3918 if (WARN_ON_ONCE(!ssk
))
3921 return inet_csk_listen_poll(ssk
);
3924 shutdown
= READ_ONCE(sk
->sk_shutdown
);
3925 if (shutdown
== SHUTDOWN_MASK
|| state
== TCP_CLOSE
)
3927 if (shutdown
& RCV_SHUTDOWN
)
3928 mask
|= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
3930 if (state
!= TCP_SYN_SENT
&& state
!= TCP_SYN_RECV
) {
3931 mask
|= mptcp_check_readable(sk
);
3932 if (shutdown
& SEND_SHUTDOWN
)
3933 mask
|= EPOLLOUT
| EPOLLWRNORM
;
3935 mask
|= mptcp_check_writeable(msk
);
3936 } else if (state
== TCP_SYN_SENT
&&
3937 inet_test_bit(DEFER_CONNECT
, sk
)) {
3938 /* cf tcp_poll() note about TFO */
3939 mask
|= EPOLLOUT
| EPOLLWRNORM
;
3942 /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
3944 if (READ_ONCE(sk
->sk_err
))
3950 static const struct proto_ops mptcp_stream_ops
= {
3952 .owner
= THIS_MODULE
,
3953 .release
= inet_release
,
3955 .connect
= inet_stream_connect
,
3956 .socketpair
= sock_no_socketpair
,
3957 .accept
= mptcp_stream_accept
,
3958 .getname
= inet_getname
,
3960 .ioctl
= inet_ioctl
,
3961 .gettstamp
= sock_gettstamp
,
3962 .listen
= mptcp_listen
,
3963 .shutdown
= inet_shutdown
,
3964 .setsockopt
= sock_common_setsockopt
,
3965 .getsockopt
= sock_common_getsockopt
,
3966 .sendmsg
= inet_sendmsg
,
3967 .recvmsg
= inet_recvmsg
,
3968 .mmap
= sock_no_mmap
,
3969 .set_rcvlowat
= mptcp_set_rcvlowat
,
3972 static struct inet_protosw mptcp_protosw
= {
3973 .type
= SOCK_STREAM
,
3974 .protocol
= IPPROTO_MPTCP
,
3975 .prot
= &mptcp_prot
,
3976 .ops
= &mptcp_stream_ops
,
3977 .flags
= INET_PROTOSW_ICSK
,
3980 static int mptcp_napi_poll(struct napi_struct
*napi
, int budget
)
3982 struct mptcp_delegated_action
*delegated
;
3983 struct mptcp_subflow_context
*subflow
;
3986 delegated
= container_of(napi
, struct mptcp_delegated_action
, napi
);
3987 while ((subflow
= mptcp_subflow_delegated_next(delegated
)) != NULL
) {
3988 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
3990 bh_lock_sock_nested(ssk
);
3991 if (!sock_owned_by_user(ssk
)) {
3992 mptcp_subflow_process_delegated(ssk
, xchg(&subflow
->delegated_status
, 0));
3994 /* tcp_release_cb_override already processed
3995 * the action or will do at next release_sock().
3996 * In both case must dequeue the subflow here - on the same
3997 * CPU that scheduled it.
4000 clear_bit(MPTCP_DELEGATE_SCHEDULED
, &subflow
->delegated_status
);
4002 bh_unlock_sock(ssk
);
4005 if (++work_done
== budget
)
4009 /* always provide a 0 'work_done' argument, so that napi_complete_done
4010 * will not try accessing the NULL napi->dev ptr
4012 napi_complete_done(napi
, 0);
4016 void __init
mptcp_proto_init(void)
4018 struct mptcp_delegated_action
*delegated
;
4021 mptcp_prot
.h
.hashinfo
= tcp_prot
.h
.hashinfo
;
4023 if (percpu_counter_init(&mptcp_sockets_allocated
, 0, GFP_KERNEL
))
4024 panic("Failed to allocate MPTCP pcpu counter\n");
4026 init_dummy_netdev(&mptcp_napi_dev
);
4027 for_each_possible_cpu(cpu
) {
4028 delegated
= per_cpu_ptr(&mptcp_delegated_actions
, cpu
);
4029 INIT_LIST_HEAD(&delegated
->head
);
4030 netif_napi_add_tx(&mptcp_napi_dev
, &delegated
->napi
,
4032 napi_enable(&delegated
->napi
);
4035 mptcp_subflow_init();
4040 if (proto_register(&mptcp_prot
, 1) != 0)
4041 panic("Failed to register MPTCP proto.\n");
4043 inet_register_protosw(&mptcp_protosw
);
4045 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb
) > sizeof_field(struct sk_buff
, cb
));
4048 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4049 static const struct proto_ops mptcp_v6_stream_ops
= {
4051 .owner
= THIS_MODULE
,
4052 .release
= inet6_release
,
4054 .connect
= inet_stream_connect
,
4055 .socketpair
= sock_no_socketpair
,
4056 .accept
= mptcp_stream_accept
,
4057 .getname
= inet6_getname
,
4059 .ioctl
= inet6_ioctl
,
4060 .gettstamp
= sock_gettstamp
,
4061 .listen
= mptcp_listen
,
4062 .shutdown
= inet_shutdown
,
4063 .setsockopt
= sock_common_setsockopt
,
4064 .getsockopt
= sock_common_getsockopt
,
4065 .sendmsg
= inet6_sendmsg
,
4066 .recvmsg
= inet6_recvmsg
,
4067 .mmap
= sock_no_mmap
,
4068 #ifdef CONFIG_COMPAT
4069 .compat_ioctl
= inet6_compat_ioctl
,
4071 .set_rcvlowat
= mptcp_set_rcvlowat
,
4074 static struct proto mptcp_v6_prot
;
4076 static struct inet_protosw mptcp_v6_protosw
= {
4077 .type
= SOCK_STREAM
,
4078 .protocol
= IPPROTO_MPTCP
,
4079 .prot
= &mptcp_v6_prot
,
4080 .ops
= &mptcp_v6_stream_ops
,
4081 .flags
= INET_PROTOSW_ICSK
,
4084 int __init
mptcp_proto_v6_init(void)
4088 mptcp_v6_prot
= mptcp_prot
;
4089 strcpy(mptcp_v6_prot
.name
, "MPTCPv6");
4090 mptcp_v6_prot
.slab
= NULL
;
4091 mptcp_v6_prot
.obj_size
= sizeof(struct mptcp6_sock
);
4092 mptcp_v6_prot
.ipv6_pinfo_offset
= offsetof(struct mptcp6_sock
, np
);
4094 err
= proto_register(&mptcp_v6_prot
, 1);
4098 err
= inet6_register_protosw(&mptcp_v6_protosw
);
4100 proto_unregister(&mptcp_v6_prot
);