1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2017 - 2019, Intel Corporation.
7 #define pr_fmt(fmt) "MPTCP: " fmt
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/sched/signal.h>
13 #include <linux/atomic.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
19 #include <net/tcp_states.h>
20 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
21 #include <net/transp_v6.h>
23 #include <net/mptcp.h>
25 #include <asm/ioctls.h>
29 #define CREATE_TRACE_POINTS
30 #include <trace/events/mptcp.h>
32 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
34 struct mptcp_sock msk
;
40 MPTCP_CMSG_TS
= BIT(0),
41 MPTCP_CMSG_INQ
= BIT(1),
44 static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp
;
46 static void __mptcp_destroy_sock(struct sock
*sk
);
47 static void mptcp_check_send_data_fin(struct sock
*sk
);
49 DEFINE_PER_CPU(struct mptcp_delegated_action
, mptcp_delegated_actions
);
50 static struct net_device mptcp_napi_dev
;
52 /* Returns end sequence number of the receiver's advertised window */
53 static u64
mptcp_wnd_end(const struct mptcp_sock
*msk
)
55 return READ_ONCE(msk
->wnd_end
);
58 static bool mptcp_is_tcpsk(struct sock
*sk
)
60 struct socket
*sock
= sk
->sk_socket
;
62 if (unlikely(sk
->sk_prot
== &tcp_prot
)) {
63 /* we are being invoked after mptcp_accept() has
64 * accepted a non-mp-capable flow: sk is a tcp_sk,
67 * Hand the socket over to tcp so all further socket ops
70 WRITE_ONCE(sock
->ops
, &inet_stream_ops
);
72 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
73 } else if (unlikely(sk
->sk_prot
== &tcpv6_prot
)) {
74 WRITE_ONCE(sock
->ops
, &inet6_stream_ops
);
82 static int __mptcp_socket_create(struct mptcp_sock
*msk
)
84 struct mptcp_subflow_context
*subflow
;
85 struct sock
*sk
= (struct sock
*)msk
;
89 err
= mptcp_subflow_create_socket(sk
, sk
->sk_family
, &ssock
);
93 msk
->scaling_ratio
= tcp_sk(ssock
->sk
)->scaling_ratio
;
94 WRITE_ONCE(msk
->first
, ssock
->sk
);
95 subflow
= mptcp_subflow_ctx(ssock
->sk
);
96 list_add(&subflow
->node
, &msk
->conn_list
);
98 subflow
->request_mptcp
= 1;
99 subflow
->subflow_id
= msk
->subflow_id
++;
101 /* This is the first subflow, always with id 0 */
102 subflow
->local_id_valid
= 1;
103 mptcp_sock_graft(msk
->first
, sk
->sk_socket
);
104 iput(SOCK_INODE(ssock
));
109 /* If the MPC handshake is not started, returns the first subflow,
110 * eventually allocating it.
112 struct sock
*__mptcp_nmpc_sk(struct mptcp_sock
*msk
)
114 struct sock
*sk
= (struct sock
*)msk
;
117 if (!((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
118 return ERR_PTR(-EINVAL
);
121 ret
= __mptcp_socket_create(msk
);
129 static void mptcp_drop(struct sock
*sk
, struct sk_buff
*skb
)
131 sk_drops_add(sk
, skb
);
135 static void mptcp_rmem_fwd_alloc_add(struct sock
*sk
, int size
)
137 WRITE_ONCE(mptcp_sk(sk
)->rmem_fwd_alloc
,
138 mptcp_sk(sk
)->rmem_fwd_alloc
+ size
);
141 static void mptcp_rmem_charge(struct sock
*sk
, int size
)
143 mptcp_rmem_fwd_alloc_add(sk
, -size
);
146 static bool mptcp_try_coalesce(struct sock
*sk
, struct sk_buff
*to
,
147 struct sk_buff
*from
)
152 if (MPTCP_SKB_CB(from
)->offset
||
153 !skb_try_coalesce(to
, from
, &fragstolen
, &delta
))
156 pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
157 MPTCP_SKB_CB(from
)->map_seq
, MPTCP_SKB_CB(to
)->map_seq
,
158 to
->len
, MPTCP_SKB_CB(from
)->end_seq
);
159 MPTCP_SKB_CB(to
)->end_seq
= MPTCP_SKB_CB(from
)->end_seq
;
161 /* note the fwd memory can reach a negative value after accounting
162 * for the delta, but the later skb free will restore a non
165 atomic_add(delta
, &sk
->sk_rmem_alloc
);
166 mptcp_rmem_charge(sk
, delta
);
167 kfree_skb_partial(from
, fragstolen
);
172 static bool mptcp_ooo_try_coalesce(struct mptcp_sock
*msk
, struct sk_buff
*to
,
173 struct sk_buff
*from
)
175 if (MPTCP_SKB_CB(from
)->map_seq
!= MPTCP_SKB_CB(to
)->end_seq
)
178 return mptcp_try_coalesce((struct sock
*)msk
, to
, from
);
181 static void __mptcp_rmem_reclaim(struct sock
*sk
, int amount
)
183 amount
>>= PAGE_SHIFT
;
184 mptcp_rmem_charge(sk
, amount
<< PAGE_SHIFT
);
185 __sk_mem_reduce_allocated(sk
, amount
);
188 static void mptcp_rmem_uncharge(struct sock
*sk
, int size
)
190 struct mptcp_sock
*msk
= mptcp_sk(sk
);
193 mptcp_rmem_fwd_alloc_add(sk
, size
);
194 reclaimable
= msk
->rmem_fwd_alloc
- sk_unused_reserved_mem(sk
);
196 /* see sk_mem_uncharge() for the rationale behind the following schema */
197 if (unlikely(reclaimable
>= PAGE_SIZE
))
198 __mptcp_rmem_reclaim(sk
, reclaimable
);
201 static void mptcp_rfree(struct sk_buff
*skb
)
203 unsigned int len
= skb
->truesize
;
204 struct sock
*sk
= skb
->sk
;
206 atomic_sub(len
, &sk
->sk_rmem_alloc
);
207 mptcp_rmem_uncharge(sk
, len
);
210 void mptcp_set_owner_r(struct sk_buff
*skb
, struct sock
*sk
)
214 skb
->destructor
= mptcp_rfree
;
215 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
216 mptcp_rmem_charge(sk
, skb
->truesize
);
219 /* "inspired" by tcp_data_queue_ofo(), main differences:
221 * - don't cope with sacks
223 static void mptcp_data_queue_ofo(struct mptcp_sock
*msk
, struct sk_buff
*skb
)
225 struct sock
*sk
= (struct sock
*)msk
;
226 struct rb_node
**p
, *parent
;
227 u64 seq
, end_seq
, max_seq
;
228 struct sk_buff
*skb1
;
230 seq
= MPTCP_SKB_CB(skb
)->map_seq
;
231 end_seq
= MPTCP_SKB_CB(skb
)->end_seq
;
232 max_seq
= atomic64_read(&msk
->rcv_wnd_sent
);
234 pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk
, seq
, max_seq
,
235 RB_EMPTY_ROOT(&msk
->out_of_order_queue
));
236 if (after64(end_seq
, max_seq
)) {
239 pr_debug("oow by %lld, rcv_wnd_sent %llu\n",
240 (unsigned long long)end_seq
- (unsigned long)max_seq
,
241 (unsigned long long)atomic64_read(&msk
->rcv_wnd_sent
));
242 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_NODSSWINDOW
);
246 p
= &msk
->out_of_order_queue
.rb_node
;
247 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOQUEUE
);
248 if (RB_EMPTY_ROOT(&msk
->out_of_order_queue
)) {
249 rb_link_node(&skb
->rbnode
, NULL
, p
);
250 rb_insert_color(&skb
->rbnode
, &msk
->out_of_order_queue
);
251 msk
->ooo_last_skb
= skb
;
255 /* with 2 subflows, adding at end of ooo queue is quite likely
256 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
258 if (mptcp_ooo_try_coalesce(msk
, msk
->ooo_last_skb
, skb
)) {
259 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOMERGE
);
260 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOQUEUETAIL
);
264 /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
265 if (!before64(seq
, MPTCP_SKB_CB(msk
->ooo_last_skb
)->end_seq
)) {
266 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOQUEUETAIL
);
267 parent
= &msk
->ooo_last_skb
->rbnode
;
268 p
= &parent
->rb_right
;
272 /* Find place to insert this segment. Handle overlaps on the way. */
276 skb1
= rb_to_skb(parent
);
277 if (before64(seq
, MPTCP_SKB_CB(skb1
)->map_seq
)) {
278 p
= &parent
->rb_left
;
281 if (before64(seq
, MPTCP_SKB_CB(skb1
)->end_seq
)) {
282 if (!after64(end_seq
, MPTCP_SKB_CB(skb1
)->end_seq
)) {
283 /* All the bits are present. Drop. */
285 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
288 if (after64(seq
, MPTCP_SKB_CB(skb1
)->map_seq
)) {
292 * continue traversing
295 /* skb's seq == skb1's seq and skb covers skb1.
296 * Replace skb1 with skb.
298 rb_replace_node(&skb1
->rbnode
, &skb
->rbnode
,
299 &msk
->out_of_order_queue
);
300 mptcp_drop(sk
, skb1
);
301 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
304 } else if (mptcp_ooo_try_coalesce(msk
, skb1
, skb
)) {
305 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_OFOMERGE
);
308 p
= &parent
->rb_right
;
312 /* Insert segment into RB tree. */
313 rb_link_node(&skb
->rbnode
, parent
, p
);
314 rb_insert_color(&skb
->rbnode
, &msk
->out_of_order_queue
);
317 /* Remove other segments covered by skb. */
318 while ((skb1
= skb_rb_next(skb
)) != NULL
) {
319 if (before64(end_seq
, MPTCP_SKB_CB(skb1
)->end_seq
))
321 rb_erase(&skb1
->rbnode
, &msk
->out_of_order_queue
);
322 mptcp_drop(sk
, skb1
);
323 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
325 /* If there is no skb after us, we are the last_skb ! */
327 msk
->ooo_last_skb
= skb
;
331 mptcp_set_owner_r(skb
, sk
);
334 static bool mptcp_rmem_schedule(struct sock
*sk
, struct sock
*ssk
, int size
)
336 struct mptcp_sock
*msk
= mptcp_sk(sk
);
339 if (size
<= msk
->rmem_fwd_alloc
)
342 size
-= msk
->rmem_fwd_alloc
;
343 amt
= sk_mem_pages(size
);
344 amount
= amt
<< PAGE_SHIFT
;
345 if (!__sk_mem_raise_allocated(sk
, size
, amt
, SK_MEM_RECV
))
348 mptcp_rmem_fwd_alloc_add(sk
, amount
);
352 static bool __mptcp_move_skb(struct mptcp_sock
*msk
, struct sock
*ssk
,
353 struct sk_buff
*skb
, unsigned int offset
,
356 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
357 struct sock
*sk
= (struct sock
*)msk
;
358 struct sk_buff
*tail
;
361 __skb_unlink(skb
, &ssk
->sk_receive_queue
);
366 /* try to fetch required memory from subflow */
367 if (!mptcp_rmem_schedule(sk
, ssk
, skb
->truesize
))
370 has_rxtstamp
= TCP_SKB_CB(skb
)->has_rxtstamp
;
372 /* the skb map_seq accounts for the skb offset:
373 * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
376 MPTCP_SKB_CB(skb
)->map_seq
= mptcp_subflow_get_mapped_dsn(subflow
);
377 MPTCP_SKB_CB(skb
)->end_seq
= MPTCP_SKB_CB(skb
)->map_seq
+ copy_len
;
378 MPTCP_SKB_CB(skb
)->offset
= offset
;
379 MPTCP_SKB_CB(skb
)->has_rxtstamp
= has_rxtstamp
;
381 if (MPTCP_SKB_CB(skb
)->map_seq
== msk
->ack_seq
) {
383 msk
->bytes_received
+= copy_len
;
384 WRITE_ONCE(msk
->ack_seq
, msk
->ack_seq
+ copy_len
);
385 tail
= skb_peek_tail(&sk
->sk_receive_queue
);
386 if (tail
&& mptcp_try_coalesce(sk
, tail
, skb
))
389 mptcp_set_owner_r(skb
, sk
);
390 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
392 } else if (after64(MPTCP_SKB_CB(skb
)->map_seq
, msk
->ack_seq
)) {
393 mptcp_data_queue_ofo(msk
, skb
);
397 /* old data, keep it simple and drop the whole pkt, sender
398 * will retransmit as needed, if needed.
400 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
406 static void mptcp_stop_rtx_timer(struct sock
*sk
)
408 struct inet_connection_sock
*icsk
= inet_csk(sk
);
410 sk_stop_timer(sk
, &icsk
->icsk_retransmit_timer
);
411 mptcp_sk(sk
)->timer_ival
= 0;
414 static void mptcp_close_wake_up(struct sock
*sk
)
416 if (sock_flag(sk
, SOCK_DEAD
))
419 sk
->sk_state_change(sk
);
420 if (sk
->sk_shutdown
== SHUTDOWN_MASK
||
421 sk
->sk_state
== TCP_CLOSE
)
422 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_HUP
);
424 sk_wake_async(sk
, SOCK_WAKE_WAITD
, POLL_IN
);
427 static bool mptcp_pending_data_fin_ack(struct sock
*sk
)
429 struct mptcp_sock
*msk
= mptcp_sk(sk
);
431 return ((1 << sk
->sk_state
) &
432 (TCPF_FIN_WAIT1
| TCPF_CLOSING
| TCPF_LAST_ACK
)) &&
433 msk
->write_seq
== READ_ONCE(msk
->snd_una
);
436 static void mptcp_check_data_fin_ack(struct sock
*sk
)
438 struct mptcp_sock
*msk
= mptcp_sk(sk
);
440 /* Look for an acknowledged DATA_FIN */
441 if (mptcp_pending_data_fin_ack(sk
)) {
442 WRITE_ONCE(msk
->snd_data_fin_enable
, 0);
444 switch (sk
->sk_state
) {
446 inet_sk_state_store(sk
, TCP_FIN_WAIT2
);
450 inet_sk_state_store(sk
, TCP_CLOSE
);
454 mptcp_close_wake_up(sk
);
458 static bool mptcp_pending_data_fin(struct sock
*sk
, u64
*seq
)
460 struct mptcp_sock
*msk
= mptcp_sk(sk
);
462 if (READ_ONCE(msk
->rcv_data_fin
) &&
463 ((1 << sk
->sk_state
) &
464 (TCPF_ESTABLISHED
| TCPF_FIN_WAIT1
| TCPF_FIN_WAIT2
))) {
465 u64 rcv_data_fin_seq
= READ_ONCE(msk
->rcv_data_fin_seq
);
467 if (msk
->ack_seq
== rcv_data_fin_seq
) {
469 *seq
= rcv_data_fin_seq
;
478 static void mptcp_set_datafin_timeout(struct sock
*sk
)
480 struct inet_connection_sock
*icsk
= inet_csk(sk
);
483 retransmits
= min_t(u32
, icsk
->icsk_retransmits
,
484 ilog2(TCP_RTO_MAX
/ TCP_RTO_MIN
));
486 mptcp_sk(sk
)->timer_ival
= TCP_RTO_MIN
<< retransmits
;
489 static void __mptcp_set_timeout(struct sock
*sk
, long tout
)
491 mptcp_sk(sk
)->timer_ival
= tout
> 0 ? tout
: TCP_RTO_MIN
;
494 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context
*subflow
)
496 const struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
498 return inet_csk(ssk
)->icsk_pending
&& !subflow
->stale_count
?
499 inet_csk(ssk
)->icsk_timeout
- jiffies
: 0;
502 static void mptcp_set_timeout(struct sock
*sk
)
504 struct mptcp_subflow_context
*subflow
;
507 mptcp_for_each_subflow(mptcp_sk(sk
), subflow
)
508 tout
= max(tout
, mptcp_timeout_from_subflow(subflow
));
509 __mptcp_set_timeout(sk
, tout
);
512 static inline bool tcp_can_send_ack(const struct sock
*ssk
)
514 return !((1 << inet_sk_state_load(ssk
)) &
515 (TCPF_SYN_SENT
| TCPF_SYN_RECV
| TCPF_TIME_WAIT
| TCPF_CLOSE
| TCPF_LISTEN
));
518 void __mptcp_subflow_send_ack(struct sock
*ssk
)
520 if (tcp_can_send_ack(ssk
))
524 static void mptcp_subflow_send_ack(struct sock
*ssk
)
528 slow
= lock_sock_fast(ssk
);
529 __mptcp_subflow_send_ack(ssk
);
530 unlock_sock_fast(ssk
, slow
);
533 static void mptcp_send_ack(struct mptcp_sock
*msk
)
535 struct mptcp_subflow_context
*subflow
;
537 mptcp_for_each_subflow(msk
, subflow
)
538 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow
));
541 static void mptcp_subflow_cleanup_rbuf(struct sock
*ssk
)
545 slow
= lock_sock_fast(ssk
);
546 if (tcp_can_send_ack(ssk
))
547 tcp_cleanup_rbuf(ssk
, 1);
548 unlock_sock_fast(ssk
, slow
);
551 static bool mptcp_subflow_could_cleanup(const struct sock
*ssk
, bool rx_empty
)
553 const struct inet_connection_sock
*icsk
= inet_csk(ssk
);
554 u8 ack_pending
= READ_ONCE(icsk
->icsk_ack
.pending
);
555 const struct tcp_sock
*tp
= tcp_sk(ssk
);
557 return (ack_pending
& ICSK_ACK_SCHED
) &&
558 ((READ_ONCE(tp
->rcv_nxt
) - READ_ONCE(tp
->rcv_wup
) >
559 READ_ONCE(icsk
->icsk_ack
.rcv_mss
)) ||
560 (rx_empty
&& ack_pending
&
561 (ICSK_ACK_PUSHED2
| ICSK_ACK_PUSHED
)));
564 static void mptcp_cleanup_rbuf(struct mptcp_sock
*msk
)
566 int old_space
= READ_ONCE(msk
->old_wspace
);
567 struct mptcp_subflow_context
*subflow
;
568 struct sock
*sk
= (struct sock
*)msk
;
569 int space
= __mptcp_space(sk
);
570 bool cleanup
, rx_empty
;
572 cleanup
= (space
> 0) && (space
>= (old_space
<< 1));
573 rx_empty
= !__mptcp_rmem(sk
);
575 mptcp_for_each_subflow(msk
, subflow
) {
576 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
578 if (cleanup
|| mptcp_subflow_could_cleanup(ssk
, rx_empty
))
579 mptcp_subflow_cleanup_rbuf(ssk
);
583 static bool mptcp_check_data_fin(struct sock
*sk
)
585 struct mptcp_sock
*msk
= mptcp_sk(sk
);
586 u64 rcv_data_fin_seq
;
589 /* Need to ack a DATA_FIN received from a peer while this side
590 * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2.
591 * msk->rcv_data_fin was set when parsing the incoming options
592 * at the subflow level and the msk lock was not held, so this
593 * is the first opportunity to act on the DATA_FIN and change
596 * If we are caught up to the sequence number of the incoming
597 * DATA_FIN, send the DATA_ACK now and do state transition. If
598 * not caught up, do nothing and let the recv code send DATA_ACK
602 if (mptcp_pending_data_fin(sk
, &rcv_data_fin_seq
)) {
603 WRITE_ONCE(msk
->ack_seq
, msk
->ack_seq
+ 1);
604 WRITE_ONCE(msk
->rcv_data_fin
, 0);
606 WRITE_ONCE(sk
->sk_shutdown
, sk
->sk_shutdown
| RCV_SHUTDOWN
);
607 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
609 switch (sk
->sk_state
) {
610 case TCP_ESTABLISHED
:
611 inet_sk_state_store(sk
, TCP_CLOSE_WAIT
);
614 inet_sk_state_store(sk
, TCP_CLOSING
);
617 inet_sk_state_store(sk
, TCP_CLOSE
);
620 /* Other states not expected */
626 if (!__mptcp_check_fallback(msk
))
628 mptcp_close_wake_up(sk
);
633 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock
*msk
,
637 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
638 struct sock
*sk
= (struct sock
*)msk
;
639 unsigned int moved
= 0;
640 bool more_data_avail
;
645 sk_rbuf
= READ_ONCE(sk
->sk_rcvbuf
);
647 if (!(sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)) {
648 int ssk_rbuf
= READ_ONCE(ssk
->sk_rcvbuf
);
650 if (unlikely(ssk_rbuf
> sk_rbuf
)) {
651 WRITE_ONCE(sk
->sk_rcvbuf
, ssk_rbuf
);
656 pr_debug("msk=%p ssk=%p", msk
, ssk
);
659 u32 map_remaining
, offset
;
660 u32 seq
= tp
->copied_seq
;
664 /* try to move as much data as available */
665 map_remaining
= subflow
->map_data_len
-
666 mptcp_subflow_get_map_offset(subflow
);
668 skb
= skb_peek(&ssk
->sk_receive_queue
);
670 /* With racing move_skbs_to_msk() and __mptcp_move_skbs(),
671 * a different CPU can have already processed the pending
672 * data, stop here or we can enter an infinite loop
679 if (__mptcp_check_fallback(msk
)) {
680 /* Under fallback skbs have no MPTCP extension and TCP could
681 * collapse them between the dummy map creation and the
682 * current dequeue. Be sure to adjust the map size.
684 map_remaining
= skb
->len
;
685 subflow
->map_data_len
= skb
->len
;
688 offset
= seq
- TCP_SKB_CB(skb
)->seq
;
689 fin
= TCP_SKB_CB(skb
)->tcp_flags
& TCPHDR_FIN
;
695 if (offset
< skb
->len
) {
696 size_t len
= skb
->len
- offset
;
701 if (__mptcp_move_skb(msk
, ssk
, skb
, offset
, len
))
705 if (WARN_ON_ONCE(map_remaining
< len
))
709 sk_eat_skb(ssk
, skb
);
713 WRITE_ONCE(tp
->copied_seq
, seq
);
714 more_data_avail
= mptcp_subflow_data_available(ssk
);
716 if (atomic_read(&sk
->sk_rmem_alloc
) > sk_rbuf
) {
720 } while (more_data_avail
);
726 static bool __mptcp_ofo_queue(struct mptcp_sock
*msk
)
728 struct sock
*sk
= (struct sock
*)msk
;
729 struct sk_buff
*skb
, *tail
;
734 p
= rb_first(&msk
->out_of_order_queue
);
735 pr_debug("msk=%p empty=%d", msk
, RB_EMPTY_ROOT(&msk
->out_of_order_queue
));
738 if (after64(MPTCP_SKB_CB(skb
)->map_seq
, msk
->ack_seq
))
742 rb_erase(&skb
->rbnode
, &msk
->out_of_order_queue
);
744 if (unlikely(!after64(MPTCP_SKB_CB(skb
)->end_seq
,
747 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_DUPDATA
);
751 end_seq
= MPTCP_SKB_CB(skb
)->end_seq
;
752 tail
= skb_peek_tail(&sk
->sk_receive_queue
);
753 if (!tail
|| !mptcp_ooo_try_coalesce(msk
, tail
, skb
)) {
754 int delta
= msk
->ack_seq
- MPTCP_SKB_CB(skb
)->map_seq
;
756 /* skip overlapping data, if any */
757 pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
758 MPTCP_SKB_CB(skb
)->map_seq
, msk
->ack_seq
,
760 MPTCP_SKB_CB(skb
)->offset
+= delta
;
761 MPTCP_SKB_CB(skb
)->map_seq
+= delta
;
762 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
764 msk
->bytes_received
+= end_seq
- msk
->ack_seq
;
765 msk
->ack_seq
= end_seq
;
771 static bool __mptcp_subflow_error_report(struct sock
*sk
, struct sock
*ssk
)
773 int err
= sock_error(ssk
);
779 /* only propagate errors on fallen-back sockets or
782 if (sk
->sk_state
!= TCP_SYN_SENT
&& !__mptcp_check_fallback(mptcp_sk(sk
)))
785 /* We need to propagate only transition to CLOSE state.
786 * Orphaned socket will see such state change via
787 * subflow_sched_work_if_closed() and that path will properly
788 * destroy the msk as needed.
790 ssk_state
= inet_sk_state_load(ssk
);
791 if (ssk_state
== TCP_CLOSE
&& !sock_flag(sk
, SOCK_DEAD
))
792 inet_sk_state_store(sk
, ssk_state
);
793 WRITE_ONCE(sk
->sk_err
, -err
);
795 /* This barrier is coupled with smp_rmb() in mptcp_poll() */
801 void __mptcp_error_report(struct sock
*sk
)
803 struct mptcp_subflow_context
*subflow
;
804 struct mptcp_sock
*msk
= mptcp_sk(sk
);
806 mptcp_for_each_subflow(msk
, subflow
)
807 if (__mptcp_subflow_error_report(sk
, mptcp_subflow_tcp_sock(subflow
)))
811 /* In most cases we will be able to lock the mptcp socket. If its already
812 * owned, we need to defer to the work queue to avoid ABBA deadlock.
814 static bool move_skbs_to_msk(struct mptcp_sock
*msk
, struct sock
*ssk
)
816 struct sock
*sk
= (struct sock
*)msk
;
817 unsigned int moved
= 0;
819 __mptcp_move_skbs_from_subflow(msk
, ssk
, &moved
);
820 __mptcp_ofo_queue(msk
);
821 if (unlikely(ssk
->sk_err
)) {
822 if (!sock_owned_by_user(sk
))
823 __mptcp_error_report(sk
);
825 __set_bit(MPTCP_ERROR_REPORT
, &msk
->cb_flags
);
828 /* If the moves have caught up with the DATA_FIN sequence number
829 * it's time to ack the DATA_FIN and change socket state, but
830 * this is not a good place to change state. Let the workqueue
833 if (mptcp_pending_data_fin(sk
, NULL
))
834 mptcp_schedule_work(sk
);
838 void mptcp_data_ready(struct sock
*sk
, struct sock
*ssk
)
840 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
841 struct mptcp_sock
*msk
= mptcp_sk(sk
);
842 int sk_rbuf
, ssk_rbuf
;
844 /* The peer can send data while we are shutting down this
845 * subflow at msk destruction time, but we must avoid enqueuing
846 * more data to the msk receive queue
848 if (unlikely(subflow
->disposable
))
851 ssk_rbuf
= READ_ONCE(ssk
->sk_rcvbuf
);
852 sk_rbuf
= READ_ONCE(sk
->sk_rcvbuf
);
853 if (unlikely(ssk_rbuf
> sk_rbuf
))
856 /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
857 if (__mptcp_rmem(sk
) > sk_rbuf
) {
858 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_RCVPRUNED
);
862 /* Wake-up the reader only for in-sequence data */
864 if (move_skbs_to_msk(msk
, ssk
) && mptcp_epollin_ready(sk
))
865 sk
->sk_data_ready(sk
);
866 mptcp_data_unlock(sk
);
869 static void mptcp_subflow_joined(struct mptcp_sock
*msk
, struct sock
*ssk
)
871 mptcp_subflow_ctx(ssk
)->map_seq
= READ_ONCE(msk
->ack_seq
);
872 WRITE_ONCE(msk
->allow_infinite_fallback
, false);
873 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED
, msk
, ssk
, GFP_ATOMIC
);
876 static bool __mptcp_finish_join(struct mptcp_sock
*msk
, struct sock
*ssk
)
878 struct sock
*sk
= (struct sock
*)msk
;
880 if (sk
->sk_state
!= TCP_ESTABLISHED
)
883 /* attach to msk socket only after we are sure we will deal with it
886 if (sk
->sk_socket
&& !ssk
->sk_socket
)
887 mptcp_sock_graft(ssk
, sk
->sk_socket
);
889 mptcp_subflow_ctx(ssk
)->subflow_id
= msk
->subflow_id
++;
890 mptcp_sockopt_sync_locked(msk
, ssk
);
891 mptcp_subflow_joined(msk
, ssk
);
892 mptcp_stop_tout_timer(sk
);
893 __mptcp_propagate_sndbuf(sk
, ssk
);
897 static void __mptcp_flush_join_list(struct sock
*sk
, struct list_head
*join_list
)
899 struct mptcp_subflow_context
*tmp
, *subflow
;
900 struct mptcp_sock
*msk
= mptcp_sk(sk
);
902 list_for_each_entry_safe(subflow
, tmp
, join_list
, node
) {
903 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
904 bool slow
= lock_sock_fast(ssk
);
906 list_move_tail(&subflow
->node
, &msk
->conn_list
);
907 if (!__mptcp_finish_join(msk
, ssk
))
908 mptcp_subflow_reset(ssk
);
909 unlock_sock_fast(ssk
, slow
);
913 static bool mptcp_rtx_timer_pending(struct sock
*sk
)
915 return timer_pending(&inet_csk(sk
)->icsk_retransmit_timer
);
918 static void mptcp_reset_rtx_timer(struct sock
*sk
)
920 struct inet_connection_sock
*icsk
= inet_csk(sk
);
923 /* prevent rescheduling on close */
924 if (unlikely(inet_sk_state_load(sk
) == TCP_CLOSE
))
927 tout
= mptcp_sk(sk
)->timer_ival
;
928 sk_reset_timer(sk
, &icsk
->icsk_retransmit_timer
, jiffies
+ tout
);
931 bool mptcp_schedule_work(struct sock
*sk
)
933 if (inet_sk_state_load(sk
) != TCP_CLOSE
&&
934 schedule_work(&mptcp_sk(sk
)->work
)) {
935 /* each subflow already holds a reference to the sk, and the
936 * workqueue is invoked by a subflow, so sk can't go away here.
944 static struct sock
*mptcp_subflow_recv_lookup(const struct mptcp_sock
*msk
)
946 struct mptcp_subflow_context
*subflow
;
948 msk_owned_by_me(msk
);
950 mptcp_for_each_subflow(msk
, subflow
) {
951 if (READ_ONCE(subflow
->data_avail
))
952 return mptcp_subflow_tcp_sock(subflow
);
958 static bool mptcp_skb_can_collapse_to(u64 write_seq
,
959 const struct sk_buff
*skb
,
960 const struct mptcp_ext
*mpext
)
962 if (!tcp_skb_can_collapse_to(skb
))
965 /* can collapse only if MPTCP level sequence is in order and this
966 * mapping has not been xmitted yet
968 return mpext
&& mpext
->data_seq
+ mpext
->data_len
== write_seq
&&
972 /* we can append data to the given data frag if:
973 * - there is space available in the backing page_frag
974 * - the data frag tail matches the current page_frag free offset
975 * - the data frag end sequence number matches the current write seq
977 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock
*msk
,
978 const struct page_frag
*pfrag
,
979 const struct mptcp_data_frag
*df
)
981 return df
&& pfrag
->page
== df
->page
&&
982 pfrag
->size
- pfrag
->offset
> 0 &&
983 pfrag
->offset
== (df
->offset
+ df
->data_len
) &&
984 df
->data_seq
+ df
->data_len
== msk
->write_seq
;
987 static void dfrag_uncharge(struct sock
*sk
, int len
)
989 sk_mem_uncharge(sk
, len
);
990 sk_wmem_queued_add(sk
, -len
);
993 static void dfrag_clear(struct sock
*sk
, struct mptcp_data_frag
*dfrag
)
995 int len
= dfrag
->data_len
+ dfrag
->overhead
;
997 list_del(&dfrag
->list
);
998 dfrag_uncharge(sk
, len
);
999 put_page(dfrag
->page
);
1002 static void __mptcp_clean_una(struct sock
*sk
)
1004 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1005 struct mptcp_data_frag
*dtmp
, *dfrag
;
1008 snd_una
= msk
->snd_una
;
1009 list_for_each_entry_safe(dfrag
, dtmp
, &msk
->rtx_queue
, list
) {
1010 if (after64(dfrag
->data_seq
+ dfrag
->data_len
, snd_una
))
1013 if (unlikely(dfrag
== msk
->first_pending
)) {
1014 /* in recovery mode can see ack after the current snd head */
1015 if (WARN_ON_ONCE(!msk
->recovery
))
1018 WRITE_ONCE(msk
->first_pending
, mptcp_send_next(sk
));
1021 dfrag_clear(sk
, dfrag
);
1024 dfrag
= mptcp_rtx_head(sk
);
1025 if (dfrag
&& after64(snd_una
, dfrag
->data_seq
)) {
1026 u64 delta
= snd_una
- dfrag
->data_seq
;
1028 /* prevent wrap around in recovery mode */
1029 if (unlikely(delta
> dfrag
->already_sent
)) {
1030 if (WARN_ON_ONCE(!msk
->recovery
))
1032 if (WARN_ON_ONCE(delta
> dfrag
->data_len
))
1034 dfrag
->already_sent
+= delta
- dfrag
->already_sent
;
1037 dfrag
->data_seq
+= delta
;
1038 dfrag
->offset
+= delta
;
1039 dfrag
->data_len
-= delta
;
1040 dfrag
->already_sent
-= delta
;
1042 dfrag_uncharge(sk
, delta
);
1045 /* all retransmitted data acked, recovery completed */
1046 if (unlikely(msk
->recovery
) && after64(msk
->snd_una
, msk
->recovery_snd_nxt
))
1047 msk
->recovery
= false;
1050 if (snd_una
== READ_ONCE(msk
->snd_nxt
) &&
1051 snd_una
== READ_ONCE(msk
->write_seq
)) {
1052 if (mptcp_rtx_timer_pending(sk
) && !mptcp_data_fin_enabled(msk
))
1053 mptcp_stop_rtx_timer(sk
);
1055 mptcp_reset_rtx_timer(sk
);
1059 static void __mptcp_clean_una_wakeup(struct sock
*sk
)
1061 lockdep_assert_held_once(&sk
->sk_lock
.slock
);
1063 __mptcp_clean_una(sk
);
1064 mptcp_write_space(sk
);
1067 static void mptcp_clean_una_wakeup(struct sock
*sk
)
1069 mptcp_data_lock(sk
);
1070 __mptcp_clean_una_wakeup(sk
);
1071 mptcp_data_unlock(sk
);
1074 static void mptcp_enter_memory_pressure(struct sock
*sk
)
1076 struct mptcp_subflow_context
*subflow
;
1077 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1080 mptcp_for_each_subflow(msk
, subflow
) {
1081 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
1084 tcp_enter_memory_pressure(ssk
);
1085 sk_stream_moderate_sndbuf(ssk
);
1089 __mptcp_sync_sndbuf(sk
);
1092 /* ensure we get enough memory for the frag hdr, beyond some minimal amount of
1095 static bool mptcp_page_frag_refill(struct sock
*sk
, struct page_frag
*pfrag
)
1097 if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag
),
1098 pfrag
, sk
->sk_allocation
)))
1101 mptcp_enter_memory_pressure(sk
);
1105 static struct mptcp_data_frag
*
1106 mptcp_carve_data_frag(const struct mptcp_sock
*msk
, struct page_frag
*pfrag
,
1109 int offset
= ALIGN(orig_offset
, sizeof(long));
1110 struct mptcp_data_frag
*dfrag
;
1112 dfrag
= (struct mptcp_data_frag
*)(page_to_virt(pfrag
->page
) + offset
);
1113 dfrag
->data_len
= 0;
1114 dfrag
->data_seq
= msk
->write_seq
;
1115 dfrag
->overhead
= offset
- orig_offset
+ sizeof(struct mptcp_data_frag
);
1116 dfrag
->offset
= offset
+ sizeof(struct mptcp_data_frag
);
1117 dfrag
->already_sent
= 0;
1118 dfrag
->page
= pfrag
->page
;
1123 struct mptcp_sendmsg_info
{
1129 bool data_lock_held
;
1132 static int mptcp_check_allowed_size(const struct mptcp_sock
*msk
, struct sock
*ssk
,
1133 u64 data_seq
, int avail_size
)
1135 u64 window_end
= mptcp_wnd_end(msk
);
1138 if (__mptcp_check_fallback(msk
))
1141 mptcp_snd_wnd
= window_end
- data_seq
;
1142 avail_size
= min_t(unsigned int, mptcp_snd_wnd
, avail_size
);
1144 if (unlikely(tcp_sk(ssk
)->snd_wnd
< mptcp_snd_wnd
)) {
1145 tcp_sk(ssk
)->snd_wnd
= min_t(u64
, U32_MAX
, mptcp_snd_wnd
);
1146 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_SNDWNDSHARED
);
1152 static bool __mptcp_add_ext(struct sk_buff
*skb
, gfp_t gfp
)
1154 struct skb_ext
*mpext
= __skb_ext_alloc(gfp
);
1158 __skb_ext_set(skb
, SKB_EXT_MPTCP
, mpext
);
1162 static struct sk_buff
*__mptcp_do_alloc_tx_skb(struct sock
*sk
, gfp_t gfp
)
1164 struct sk_buff
*skb
;
1166 skb
= alloc_skb_fclone(MAX_TCP_HEADER
, gfp
);
1168 if (likely(__mptcp_add_ext(skb
, gfp
))) {
1169 skb_reserve(skb
, MAX_TCP_HEADER
);
1170 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1171 INIT_LIST_HEAD(&skb
->tcp_tsorted_anchor
);
1176 mptcp_enter_memory_pressure(sk
);
1181 static struct sk_buff
*__mptcp_alloc_tx_skb(struct sock
*sk
, struct sock
*ssk
, gfp_t gfp
)
1183 struct sk_buff
*skb
;
1185 skb
= __mptcp_do_alloc_tx_skb(sk
, gfp
);
1189 if (likely(sk_wmem_schedule(ssk
, skb
->truesize
))) {
1190 tcp_skb_entail(ssk
, skb
);
1193 tcp_skb_tsorted_anchor_cleanup(skb
);
1198 static struct sk_buff
*mptcp_alloc_tx_skb(struct sock
*sk
, struct sock
*ssk
, bool data_lock_held
)
1200 gfp_t gfp
= data_lock_held
? GFP_ATOMIC
: sk
->sk_allocation
;
1202 return __mptcp_alloc_tx_skb(sk
, ssk
, gfp
);
1205 /* note: this always recompute the csum on the whole skb, even
1206 * if we just appended a single frag. More status info needed
1208 static void mptcp_update_data_checksum(struct sk_buff
*skb
, int added
)
1210 struct mptcp_ext
*mpext
= mptcp_get_ext(skb
);
1211 __wsum csum
= ~csum_unfold(mpext
->csum
);
1212 int offset
= skb
->len
- added
;
1214 mpext
->csum
= csum_fold(csum_block_add(csum
, skb_checksum(skb
, offset
, added
, 0), offset
));
1217 static void mptcp_update_infinite_map(struct mptcp_sock
*msk
,
1219 struct mptcp_ext
*mpext
)
1224 mpext
->infinite_map
= 1;
1225 mpext
->data_len
= 0;
1227 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_INFINITEMAPTX
);
1228 mptcp_subflow_ctx(ssk
)->send_infinite_map
= 0;
1230 mptcp_do_fallback(ssk
);
1233 static int mptcp_sendmsg_frag(struct sock
*sk
, struct sock
*ssk
,
1234 struct mptcp_data_frag
*dfrag
,
1235 struct mptcp_sendmsg_info
*info
)
1237 u64 data_seq
= dfrag
->data_seq
+ info
->sent
;
1238 int offset
= dfrag
->offset
+ info
->sent
;
1239 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1240 bool zero_window_probe
= false;
1241 struct mptcp_ext
*mpext
= NULL
;
1242 bool can_coalesce
= false;
1243 bool reuse_skb
= true;
1244 struct sk_buff
*skb
;
1248 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
1249 msk
, ssk
, dfrag
->data_seq
, dfrag
->data_len
, info
->sent
);
1251 if (WARN_ON_ONCE(info
->sent
> info
->limit
||
1252 info
->limit
> dfrag
->data_len
))
1255 if (unlikely(!__tcp_can_send(ssk
)))
1258 /* compute send limit */
1259 info
->mss_now
= tcp_send_mss(ssk
, &info
->size_goal
, info
->flags
);
1260 copy
= info
->size_goal
;
1262 skb
= tcp_write_queue_tail(ssk
);
1263 if (skb
&& copy
> skb
->len
) {
1264 /* Limit the write to the size available in the
1265 * current skb, if any, so that we create at most a new skb.
1266 * Explicitly tells TCP internals to avoid collapsing on later
1267 * queue management operation, to avoid breaking the ext <->
1268 * SSN association set here
1270 mpext
= mptcp_get_ext(skb
);
1271 if (!mptcp_skb_can_collapse_to(data_seq
, skb
, mpext
)) {
1272 TCP_SKB_CB(skb
)->eor
= 1;
1276 i
= skb_shinfo(skb
)->nr_frags
;
1277 can_coalesce
= skb_can_coalesce(skb
, i
, dfrag
->page
, offset
);
1278 if (!can_coalesce
&& i
>= READ_ONCE(sysctl_max_skb_frags
)) {
1279 tcp_mark_push(tcp_sk(ssk
), skb
);
1286 skb
= mptcp_alloc_tx_skb(sk
, ssk
, info
->data_lock_held
);
1290 i
= skb_shinfo(skb
)->nr_frags
;
1292 mpext
= mptcp_get_ext(skb
);
1295 /* Zero window and all data acked? Probe. */
1296 copy
= mptcp_check_allowed_size(msk
, ssk
, data_seq
, copy
);
1298 u64 snd_una
= READ_ONCE(msk
->snd_una
);
1300 if (snd_una
!= msk
->snd_nxt
|| tcp_write_queue_tail(ssk
)) {
1301 tcp_remove_empty_skb(ssk
);
1305 zero_window_probe
= true;
1306 data_seq
= snd_una
- 1;
1310 copy
= min_t(size_t, copy
, info
->limit
- info
->sent
);
1311 if (!sk_wmem_schedule(ssk
, copy
)) {
1312 tcp_remove_empty_skb(ssk
);
1317 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], copy
);
1319 get_page(dfrag
->page
);
1320 skb_fill_page_desc(skb
, i
, dfrag
->page
, offset
, copy
);
1324 skb
->data_len
+= copy
;
1325 skb
->truesize
+= copy
;
1326 sk_wmem_queued_add(ssk
, copy
);
1327 sk_mem_charge(ssk
, copy
);
1328 WRITE_ONCE(tcp_sk(ssk
)->write_seq
, tcp_sk(ssk
)->write_seq
+ copy
);
1329 TCP_SKB_CB(skb
)->end_seq
+= copy
;
1330 tcp_skb_pcount_set(skb
, 0);
1332 /* on skb reuse we just need to update the DSS len */
1334 TCP_SKB_CB(skb
)->tcp_flags
&= ~TCPHDR_PSH
;
1335 mpext
->data_len
+= copy
;
1339 memset(mpext
, 0, sizeof(*mpext
));
1340 mpext
->data_seq
= data_seq
;
1341 mpext
->subflow_seq
= mptcp_subflow_ctx(ssk
)->rel_write_seq
;
1342 mpext
->data_len
= copy
;
1346 pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d",
1347 mpext
->data_seq
, mpext
->subflow_seq
, mpext
->data_len
,
1350 if (zero_window_probe
) {
1351 mptcp_subflow_ctx(ssk
)->rel_write_seq
+= copy
;
1353 if (READ_ONCE(msk
->csum_enabled
))
1354 mptcp_update_data_checksum(skb
, copy
);
1355 tcp_push_pending_frames(ssk
);
1359 if (READ_ONCE(msk
->csum_enabled
))
1360 mptcp_update_data_checksum(skb
, copy
);
1361 if (mptcp_subflow_ctx(ssk
)->send_infinite_map
)
1362 mptcp_update_infinite_map(msk
, ssk
, mpext
);
1363 trace_mptcp_sendmsg_frag(mpext
);
1364 mptcp_subflow_ctx(ssk
)->rel_write_seq
+= copy
;
1368 #define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
1369 sizeof(struct tcphdr) - \
1370 MAX_TCP_OPTION_SPACE - \
1371 sizeof(struct ipv6hdr) - \
1372 sizeof(struct frag_hdr))
1374 struct subflow_send_info
{
1379 void mptcp_subflow_set_active(struct mptcp_subflow_context
*subflow
)
1381 if (!subflow
->stale
)
1385 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow
)), MPTCP_MIB_SUBFLOWRECOVER
);
1388 bool mptcp_subflow_active(struct mptcp_subflow_context
*subflow
)
1390 if (unlikely(subflow
->stale
)) {
1391 u32 rcv_tstamp
= READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow
))->rcv_tstamp
);
1393 if (subflow
->stale_rcv_tstamp
== rcv_tstamp
)
1396 mptcp_subflow_set_active(subflow
);
1398 return __mptcp_subflow_active(subflow
);
1401 #define SSK_MODE_ACTIVE 0
1402 #define SSK_MODE_BACKUP 1
1403 #define SSK_MODE_MAX 2
1405 /* implement the mptcp packet scheduler;
1406 * returns the subflow that will transmit the next DSS
1407 * additionally updates the rtx timeout
1409 struct sock
*mptcp_subflow_get_send(struct mptcp_sock
*msk
)
1411 struct subflow_send_info send_info
[SSK_MODE_MAX
];
1412 struct mptcp_subflow_context
*subflow
;
1413 struct sock
*sk
= (struct sock
*)msk
;
1414 u32 pace
, burst
, wmem
;
1415 int i
, nr_active
= 0;
1420 /* pick the subflow with the lower wmem/wspace ratio */
1421 for (i
= 0; i
< SSK_MODE_MAX
; ++i
) {
1422 send_info
[i
].ssk
= NULL
;
1423 send_info
[i
].linger_time
= -1;
1426 mptcp_for_each_subflow(msk
, subflow
) {
1427 trace_mptcp_subflow_get_send(subflow
);
1428 ssk
= mptcp_subflow_tcp_sock(subflow
);
1429 if (!mptcp_subflow_active(subflow
))
1432 tout
= max(tout
, mptcp_timeout_from_subflow(subflow
));
1433 nr_active
+= !subflow
->backup
;
1434 pace
= subflow
->avg_pacing_rate
;
1435 if (unlikely(!pace
)) {
1436 /* init pacing rate from socket */
1437 subflow
->avg_pacing_rate
= READ_ONCE(ssk
->sk_pacing_rate
);
1438 pace
= subflow
->avg_pacing_rate
;
1443 linger_time
= div_u64((u64
)READ_ONCE(ssk
->sk_wmem_queued
) << 32, pace
);
1444 if (linger_time
< send_info
[subflow
->backup
].linger_time
) {
1445 send_info
[subflow
->backup
].ssk
= ssk
;
1446 send_info
[subflow
->backup
].linger_time
= linger_time
;
1449 __mptcp_set_timeout(sk
, tout
);
1451 /* pick the best backup if no other subflow is active */
1453 send_info
[SSK_MODE_ACTIVE
].ssk
= send_info
[SSK_MODE_BACKUP
].ssk
;
1455 /* According to the blest algorithm, to avoid HoL blocking for the
1456 * faster flow, we need to:
1457 * - estimate the faster flow linger time
1458 * - use the above to estimate the amount of byte transferred
1459 * by the faster flow
1460 * - check that the amount of queued data is greter than the above,
1461 * otherwise do not use the picked, slower, subflow
1462 * We select the subflow with the shorter estimated time to flush
1463 * the queued mem, which basically ensure the above. We just need
1464 * to check that subflow has a non empty cwin.
1466 ssk
= send_info
[SSK_MODE_ACTIVE
].ssk
;
1467 if (!ssk
|| !sk_stream_memory_free(ssk
))
1470 burst
= min_t(int, MPTCP_SEND_BURST_SIZE
, mptcp_wnd_end(msk
) - msk
->snd_nxt
);
1471 wmem
= READ_ONCE(ssk
->sk_wmem_queued
);
1475 subflow
= mptcp_subflow_ctx(ssk
);
1476 subflow
->avg_pacing_rate
= div_u64((u64
)subflow
->avg_pacing_rate
* wmem
+
1477 READ_ONCE(ssk
->sk_pacing_rate
) * burst
,
1479 msk
->snd_burst
= burst
;
1483 static void mptcp_push_release(struct sock
*ssk
, struct mptcp_sendmsg_info
*info
)
1485 tcp_push(ssk
, 0, info
->mss_now
, tcp_sk(ssk
)->nonagle
, info
->size_goal
);
1489 static void mptcp_update_post_push(struct mptcp_sock
*msk
,
1490 struct mptcp_data_frag
*dfrag
,
1493 u64 snd_nxt_new
= dfrag
->data_seq
;
1495 dfrag
->already_sent
+= sent
;
1497 msk
->snd_burst
-= sent
;
1499 snd_nxt_new
+= dfrag
->already_sent
;
1501 /* snd_nxt_new can be smaller than snd_nxt in case mptcp
1502 * is recovering after a failover. In that event, this re-sends
1505 * Thus compute snd_nxt_new candidate based on
1506 * the dfrag->data_seq that was sent and the data
1507 * that has been handed to the subflow for transmission
1508 * and skip update in case it was old dfrag.
1510 if (likely(after64(snd_nxt_new
, msk
->snd_nxt
))) {
1511 msk
->bytes_sent
+= snd_nxt_new
- msk
->snd_nxt
;
1512 msk
->snd_nxt
= snd_nxt_new
;
1516 void mptcp_check_and_set_pending(struct sock
*sk
)
1518 if (mptcp_send_head(sk
))
1519 mptcp_sk(sk
)->push_pending
|= BIT(MPTCP_PUSH_PENDING
);
1522 static int __subflow_push_pending(struct sock
*sk
, struct sock
*ssk
,
1523 struct mptcp_sendmsg_info
*info
)
1525 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1526 struct mptcp_data_frag
*dfrag
;
1527 int len
, copied
= 0, err
= 0;
1529 while ((dfrag
= mptcp_send_head(sk
))) {
1530 info
->sent
= dfrag
->already_sent
;
1531 info
->limit
= dfrag
->data_len
;
1532 len
= dfrag
->data_len
- dfrag
->already_sent
;
1536 ret
= mptcp_sendmsg_frag(sk
, ssk
, dfrag
, info
);
1538 err
= copied
? : ret
;
1546 mptcp_update_post_push(msk
, dfrag
, ret
);
1548 WRITE_ONCE(msk
->first_pending
, mptcp_send_next(sk
));
1550 if (msk
->snd_burst
<= 0 ||
1551 !sk_stream_memory_free(ssk
) ||
1552 !mptcp_subflow_active(mptcp_subflow_ctx(ssk
))) {
1556 mptcp_set_timeout(sk
);
1564 void __mptcp_push_pending(struct sock
*sk
, unsigned int flags
)
1566 struct sock
*prev_ssk
= NULL
, *ssk
= NULL
;
1567 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1568 struct mptcp_sendmsg_info info
= {
1571 bool do_check_data_fin
= false;
1574 while (mptcp_send_head(sk
) && (push_count
> 0)) {
1575 struct mptcp_subflow_context
*subflow
;
1578 if (mptcp_sched_get_send(msk
))
1583 mptcp_for_each_subflow(msk
, subflow
) {
1584 if (READ_ONCE(subflow
->scheduled
)) {
1585 mptcp_subflow_set_scheduled(subflow
, false);
1588 ssk
= mptcp_subflow_tcp_sock(subflow
);
1589 if (ssk
!= prev_ssk
) {
1590 /* First check. If the ssk has changed since
1591 * the last round, release prev_ssk
1594 mptcp_push_release(prev_ssk
, &info
);
1596 /* Need to lock the new subflow only if different
1597 * from the previous one, otherwise we are still
1598 * helding the relevant lock
1605 ret
= __subflow_push_pending(sk
, ssk
, &info
);
1607 if (ret
!= -EAGAIN
||
1608 (1 << ssk
->sk_state
) &
1609 (TCPF_FIN_WAIT1
| TCPF_FIN_WAIT2
| TCPF_CLOSE
))
1613 do_check_data_fin
= true;
1618 /* at this point we held the socket lock for the last subflow we used */
1620 mptcp_push_release(ssk
, &info
);
1622 /* ensure the rtx timer is running */
1623 if (!mptcp_rtx_timer_pending(sk
))
1624 mptcp_reset_rtx_timer(sk
);
1625 if (do_check_data_fin
)
1626 mptcp_check_send_data_fin(sk
);
1629 static void __mptcp_subflow_push_pending(struct sock
*sk
, struct sock
*ssk
, bool first
)
1631 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1632 struct mptcp_sendmsg_info info
= {
1633 .data_lock_held
= true,
1635 bool keep_pushing
= true;
1636 struct sock
*xmit_ssk
;
1640 while (mptcp_send_head(sk
) && keep_pushing
) {
1641 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
1644 /* check for a different subflow usage only after
1645 * spooling the first chunk of data
1648 mptcp_subflow_set_scheduled(subflow
, false);
1649 ret
= __subflow_push_pending(sk
, ssk
, &info
);
1657 if (mptcp_sched_get_send(msk
))
1660 if (READ_ONCE(subflow
->scheduled
)) {
1661 mptcp_subflow_set_scheduled(subflow
, false);
1662 ret
= __subflow_push_pending(sk
, ssk
, &info
);
1664 keep_pushing
= false;
1668 mptcp_for_each_subflow(msk
, subflow
) {
1669 if (READ_ONCE(subflow
->scheduled
)) {
1670 xmit_ssk
= mptcp_subflow_tcp_sock(subflow
);
1671 if (xmit_ssk
!= ssk
) {
1672 mptcp_subflow_delegate(subflow
,
1673 MPTCP_DELEGATE_SEND
);
1674 keep_pushing
= false;
1681 /* __mptcp_alloc_tx_skb could have released some wmem and we are
1682 * not going to flush it via release_sock()
1685 tcp_push(ssk
, 0, info
.mss_now
, tcp_sk(ssk
)->nonagle
,
1687 if (!mptcp_rtx_timer_pending(sk
))
1688 mptcp_reset_rtx_timer(sk
);
1690 if (msk
->snd_data_fin_enable
&&
1691 msk
->snd_nxt
+ 1 == msk
->write_seq
)
1692 mptcp_schedule_work(sk
);
1696 static void mptcp_set_nospace(struct sock
*sk
)
1698 /* enable autotune */
1699 set_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
1701 /* will be cleared on avail space */
1702 set_bit(MPTCP_NOSPACE
, &mptcp_sk(sk
)->flags
);
1705 static int mptcp_disconnect(struct sock
*sk
, int flags
);
1707 static int mptcp_sendmsg_fastopen(struct sock
*sk
, struct msghdr
*msg
,
1708 size_t len
, int *copied_syn
)
1710 unsigned int saved_flags
= msg
->msg_flags
;
1711 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1715 /* on flags based fastopen the mptcp is supposed to create the
1716 * first subflow right now. Otherwise we are in the defer_connect
1717 * path, and the first subflow must be already present.
1718 * Since the defer_connect flag is cleared after the first succsful
1719 * fastopen attempt, no need to check for additional subflow status.
1721 if (msg
->msg_flags
& MSG_FASTOPEN
) {
1722 ssk
= __mptcp_nmpc_sk(msk
);
1724 return PTR_ERR(ssk
);
1732 msg
->msg_flags
|= MSG_DONTWAIT
;
1733 msk
->fastopening
= 1;
1734 ret
= tcp_sendmsg_fastopen(ssk
, msg
, copied_syn
, len
, NULL
);
1735 msk
->fastopening
= 0;
1736 msg
->msg_flags
= saved_flags
;
1739 /* do the blocking bits of inet_stream_connect outside the ssk socket lock */
1740 if (ret
== -EINPROGRESS
&& !(msg
->msg_flags
& MSG_DONTWAIT
)) {
1741 ret
= __inet_stream_connect(sk
->sk_socket
, msg
->msg_name
,
1742 msg
->msg_namelen
, msg
->msg_flags
, 1);
1744 /* Keep the same behaviour of plain TCP: zero the copied bytes in
1745 * case of any error, except timeout or signal
1747 if (ret
&& ret
!= -EINPROGRESS
&& ret
!= -ERESTARTSYS
&& ret
!= -EINTR
)
1749 } else if (ret
&& ret
!= -EINPROGRESS
) {
1750 /* The disconnect() op called by tcp_sendmsg_fastopen()/
1751 * __inet_stream_connect() can fail, due to looking check,
1752 * see mptcp_disconnect().
1753 * Attempt it again outside the problematic scope.
1755 if (!mptcp_disconnect(sk
, 0))
1756 sk
->sk_socket
->state
= SS_UNCONNECTED
;
1758 inet_clear_bit(DEFER_CONNECT
, sk
);
1763 static int do_copy_data_nocache(struct sock
*sk
, int copy
,
1764 struct iov_iter
*from
, char *to
)
1766 if (sk
->sk_route_caps
& NETIF_F_NOCACHE_COPY
) {
1767 if (!copy_from_iter_full_nocache(to
, copy
, from
))
1769 } else if (!copy_from_iter_full(to
, copy
, from
)) {
1775 static int mptcp_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1777 struct mptcp_sock
*msk
= mptcp_sk(sk
);
1778 struct page_frag
*pfrag
;
1783 /* silently ignore everything else */
1784 msg
->msg_flags
&= MSG_MORE
| MSG_DONTWAIT
| MSG_NOSIGNAL
| MSG_FASTOPEN
;
1788 if (unlikely(inet_test_bit(DEFER_CONNECT
, sk
) ||
1789 msg
->msg_flags
& MSG_FASTOPEN
)) {
1792 ret
= mptcp_sendmsg_fastopen(sk
, msg
, len
, &copied_syn
);
1793 copied
+= copied_syn
;
1794 if (ret
== -EINPROGRESS
&& copied_syn
> 0)
1800 timeo
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1802 if ((1 << sk
->sk_state
) & ~(TCPF_ESTABLISHED
| TCPF_CLOSE_WAIT
)) {
1803 ret
= sk_stream_wait_connect(sk
, &timeo
);
1809 if (unlikely(sk
->sk_err
|| (sk
->sk_shutdown
& SEND_SHUTDOWN
)))
1812 pfrag
= sk_page_frag(sk
);
1814 while (msg_data_left(msg
)) {
1815 int total_ts
, frag_truesize
= 0;
1816 struct mptcp_data_frag
*dfrag
;
1817 bool dfrag_collapsed
;
1818 size_t psize
, offset
;
1820 /* reuse tail pfrag, if possible, or carve a new one from the
1823 dfrag
= mptcp_pending_tail(sk
);
1824 dfrag_collapsed
= mptcp_frag_can_collapse_to(msk
, pfrag
, dfrag
);
1825 if (!dfrag_collapsed
) {
1826 if (!sk_stream_memory_free(sk
))
1827 goto wait_for_memory
;
1829 if (!mptcp_page_frag_refill(sk
, pfrag
))
1830 goto wait_for_memory
;
1832 dfrag
= mptcp_carve_data_frag(msk
, pfrag
, pfrag
->offset
);
1833 frag_truesize
= dfrag
->overhead
;
1836 /* we do not bound vs wspace, to allow a single packet.
1837 * memory accounting will prevent execessive memory usage
1840 offset
= dfrag
->offset
+ dfrag
->data_len
;
1841 psize
= pfrag
->size
- offset
;
1842 psize
= min_t(size_t, psize
, msg_data_left(msg
));
1843 total_ts
= psize
+ frag_truesize
;
1845 if (!sk_wmem_schedule(sk
, total_ts
))
1846 goto wait_for_memory
;
1848 ret
= do_copy_data_nocache(sk
, psize
, &msg
->msg_iter
,
1849 page_address(dfrag
->page
) + offset
);
1853 /* data successfully copied into the write queue */
1854 sk_forward_alloc_add(sk
, -total_ts
);
1856 dfrag
->data_len
+= psize
;
1857 frag_truesize
+= psize
;
1858 pfrag
->offset
+= frag_truesize
;
1859 WRITE_ONCE(msk
->write_seq
, msk
->write_seq
+ psize
);
1861 /* charge data on mptcp pending queue to the msk socket
1862 * Note: we charge such data both to sk and ssk
1864 sk_wmem_queued_add(sk
, frag_truesize
);
1865 if (!dfrag_collapsed
) {
1866 get_page(dfrag
->page
);
1867 list_add_tail(&dfrag
->list
, &msk
->rtx_queue
);
1868 if (!msk
->first_pending
)
1869 WRITE_ONCE(msk
->first_pending
, dfrag
);
1871 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk
,
1872 dfrag
->data_seq
, dfrag
->data_len
, dfrag
->already_sent
,
1878 mptcp_set_nospace(sk
);
1879 __mptcp_push_pending(sk
, msg
->msg_flags
);
1880 ret
= sk_stream_wait_memory(sk
, &timeo
);
1886 __mptcp_push_pending(sk
, msg
->msg_flags
);
1896 copied
= sk_stream_error(sk
, msg
->msg_flags
, ret
);
1900 static int __mptcp_recvmsg_mskq(struct mptcp_sock
*msk
,
1902 size_t len
, int flags
,
1903 struct scm_timestamping_internal
*tss
,
1906 struct sk_buff
*skb
, *tmp
;
1909 skb_queue_walk_safe(&msk
->receive_queue
, skb
, tmp
) {
1910 u32 offset
= MPTCP_SKB_CB(skb
)->offset
;
1911 u32 data_len
= skb
->len
- offset
;
1912 u32 count
= min_t(size_t, len
- copied
, data_len
);
1915 if (!(flags
& MSG_TRUNC
)) {
1916 err
= skb_copy_datagram_msg(skb
, offset
, msg
, count
);
1917 if (unlikely(err
< 0)) {
1924 if (MPTCP_SKB_CB(skb
)->has_rxtstamp
) {
1925 tcp_update_recv_tstamps(skb
, tss
);
1926 *cmsg_flags
|= MPTCP_CMSG_TS
;
1931 if (count
< data_len
) {
1932 if (!(flags
& MSG_PEEK
)) {
1933 MPTCP_SKB_CB(skb
)->offset
+= count
;
1934 MPTCP_SKB_CB(skb
)->map_seq
+= count
;
1935 msk
->bytes_consumed
+= count
;
1940 if (!(flags
& MSG_PEEK
)) {
1941 /* we will bulk release the skb memory later */
1942 skb
->destructor
= NULL
;
1943 WRITE_ONCE(msk
->rmem_released
, msk
->rmem_released
+ skb
->truesize
);
1944 __skb_unlink(skb
, &msk
->receive_queue
);
1946 msk
->bytes_consumed
+= count
;
1956 /* receive buffer autotuning. See tcp_rcv_space_adjust for more information.
1958 * Only difference: Use highest rtt estimate of the subflows in use.
1960 static void mptcp_rcv_space_adjust(struct mptcp_sock
*msk
, int copied
)
1962 struct mptcp_subflow_context
*subflow
;
1963 struct sock
*sk
= (struct sock
*)msk
;
1964 u8 scaling_ratio
= U8_MAX
;
1965 u32 time
, advmss
= 1;
1968 msk_owned_by_me(msk
);
1973 msk
->rcvq_space
.copied
+= copied
;
1975 mstamp
= div_u64(tcp_clock_ns(), NSEC_PER_USEC
);
1976 time
= tcp_stamp_us_delta(mstamp
, msk
->rcvq_space
.time
);
1978 rtt_us
= msk
->rcvq_space
.rtt_us
;
1979 if (rtt_us
&& time
< (rtt_us
>> 3))
1983 mptcp_for_each_subflow(msk
, subflow
) {
1984 const struct tcp_sock
*tp
;
1988 tp
= tcp_sk(mptcp_subflow_tcp_sock(subflow
));
1990 sf_rtt_us
= READ_ONCE(tp
->rcv_rtt_est
.rtt_us
);
1991 sf_advmss
= READ_ONCE(tp
->advmss
);
1993 rtt_us
= max(sf_rtt_us
, rtt_us
);
1994 advmss
= max(sf_advmss
, advmss
);
1995 scaling_ratio
= min(tp
->scaling_ratio
, scaling_ratio
);
1998 msk
->rcvq_space
.rtt_us
= rtt_us
;
1999 msk
->scaling_ratio
= scaling_ratio
;
2000 if (time
< (rtt_us
>> 3) || rtt_us
== 0)
2003 if (msk
->rcvq_space
.copied
<= msk
->rcvq_space
.space
)
2006 if (READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_moderate_rcvbuf
) &&
2007 !(sk
->sk_userlocks
& SOCK_RCVBUF_LOCK
)) {
2011 rcvwin
= ((u64
)msk
->rcvq_space
.copied
<< 1) + 16 * advmss
;
2013 grow
= rcvwin
* (msk
->rcvq_space
.copied
- msk
->rcvq_space
.space
);
2015 do_div(grow
, msk
->rcvq_space
.space
);
2016 rcvwin
+= (grow
<< 1);
2018 rcvbuf
= min_t(u64
, __tcp_space_from_win(scaling_ratio
, rcvwin
),
2019 READ_ONCE(sock_net(sk
)->ipv4
.sysctl_tcp_rmem
[2]));
2021 if (rcvbuf
> sk
->sk_rcvbuf
) {
2024 window_clamp
= __tcp_win_from_space(scaling_ratio
, rcvbuf
);
2025 WRITE_ONCE(sk
->sk_rcvbuf
, rcvbuf
);
2027 /* Make subflows follow along. If we do not do this, we
2028 * get drops at subflow level if skbs can't be moved to
2029 * the mptcp rx queue fast enough (announced rcv_win can
2030 * exceed ssk->sk_rcvbuf).
2032 mptcp_for_each_subflow(msk
, subflow
) {
2036 ssk
= mptcp_subflow_tcp_sock(subflow
);
2037 slow
= lock_sock_fast(ssk
);
2038 WRITE_ONCE(ssk
->sk_rcvbuf
, rcvbuf
);
2039 tcp_sk(ssk
)->window_clamp
= window_clamp
;
2040 tcp_cleanup_rbuf(ssk
, 1);
2041 unlock_sock_fast(ssk
, slow
);
2046 msk
->rcvq_space
.space
= msk
->rcvq_space
.copied
;
2048 msk
->rcvq_space
.copied
= 0;
2049 msk
->rcvq_space
.time
= mstamp
;
2052 static void __mptcp_update_rmem(struct sock
*sk
)
2054 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2056 if (!msk
->rmem_released
)
2059 atomic_sub(msk
->rmem_released
, &sk
->sk_rmem_alloc
);
2060 mptcp_rmem_uncharge(sk
, msk
->rmem_released
);
2061 WRITE_ONCE(msk
->rmem_released
, 0);
2064 static void __mptcp_splice_receive_queue(struct sock
*sk
)
2066 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2068 skb_queue_splice_tail_init(&sk
->sk_receive_queue
, &msk
->receive_queue
);
2071 static bool __mptcp_move_skbs(struct mptcp_sock
*msk
)
2073 struct sock
*sk
= (struct sock
*)msk
;
2074 unsigned int moved
= 0;
2078 struct sock
*ssk
= mptcp_subflow_recv_lookup(msk
);
2081 /* we can have data pending in the subflows only if the msk
2082 * receive buffer was full at subflow_data_ready() time,
2083 * that is an unlikely slow path.
2088 slowpath
= lock_sock_fast(ssk
);
2089 mptcp_data_lock(sk
);
2090 __mptcp_update_rmem(sk
);
2091 done
= __mptcp_move_skbs_from_subflow(msk
, ssk
, &moved
);
2092 mptcp_data_unlock(sk
);
2094 if (unlikely(ssk
->sk_err
))
2095 __mptcp_error_report(sk
);
2096 unlock_sock_fast(ssk
, slowpath
);
2099 /* acquire the data lock only if some input data is pending */
2101 if (!RB_EMPTY_ROOT(&msk
->out_of_order_queue
) ||
2102 !skb_queue_empty_lockless(&sk
->sk_receive_queue
)) {
2103 mptcp_data_lock(sk
);
2104 __mptcp_update_rmem(sk
);
2105 ret
|= __mptcp_ofo_queue(msk
);
2106 __mptcp_splice_receive_queue(sk
);
2107 mptcp_data_unlock(sk
);
2110 mptcp_check_data_fin((struct sock
*)msk
);
2111 return !skb_queue_empty(&msk
->receive_queue
);
2114 static unsigned int mptcp_inq_hint(const struct sock
*sk
)
2116 const struct mptcp_sock
*msk
= mptcp_sk(sk
);
2117 const struct sk_buff
*skb
;
2119 skb
= skb_peek(&msk
->receive_queue
);
2121 u64 hint_val
= msk
->ack_seq
- MPTCP_SKB_CB(skb
)->map_seq
;
2123 if (hint_val
>= INT_MAX
)
2126 return (unsigned int)hint_val
;
2129 if (sk
->sk_state
== TCP_CLOSE
|| (sk
->sk_shutdown
& RCV_SHUTDOWN
))
2135 static int mptcp_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
2136 int flags
, int *addr_len
)
2138 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2139 struct scm_timestamping_internal tss
;
2140 int copied
= 0, cmsg_flags
= 0;
2144 /* MSG_ERRQUEUE is really a no-op till we support IP_RECVERR */
2145 if (unlikely(flags
& MSG_ERRQUEUE
))
2146 return inet_recv_error(sk
, msg
, len
, addr_len
);
2149 if (unlikely(sk
->sk_state
== TCP_LISTEN
)) {
2154 timeo
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
2156 len
= min_t(size_t, len
, INT_MAX
);
2157 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
2159 if (unlikely(msk
->recvmsg_inq
))
2160 cmsg_flags
= MPTCP_CMSG_INQ
;
2162 while (copied
< len
) {
2165 bytes_read
= __mptcp_recvmsg_mskq(msk
, msg
, len
- copied
, flags
, &tss
, &cmsg_flags
);
2166 if (unlikely(bytes_read
< 0)) {
2168 copied
= bytes_read
;
2172 copied
+= bytes_read
;
2174 /* be sure to advertise window change */
2175 mptcp_cleanup_rbuf(msk
);
2177 if (skb_queue_empty(&msk
->receive_queue
) && __mptcp_move_skbs(msk
))
2180 /* only the master socket status is relevant here. The exit
2181 * conditions mirror closely tcp_recvmsg()
2183 if (copied
>= target
)
2188 sk
->sk_state
== TCP_CLOSE
||
2189 (sk
->sk_shutdown
& RCV_SHUTDOWN
) ||
2191 signal_pending(current
))
2195 copied
= sock_error(sk
);
2199 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
2200 /* race breaker: the shutdown could be after the
2201 * previous receive queue check
2203 if (__mptcp_move_skbs(msk
))
2208 if (sk
->sk_state
== TCP_CLOSE
) {
2218 if (signal_pending(current
)) {
2219 copied
= sock_intr_errno(timeo
);
2224 pr_debug("block timeout %ld", timeo
);
2225 sk_wait_data(sk
, &timeo
, NULL
);
2229 if (cmsg_flags
&& copied
>= 0) {
2230 if (cmsg_flags
& MPTCP_CMSG_TS
)
2231 tcp_recv_timestamp(msg
, sk
, &tss
);
2233 if (cmsg_flags
& MPTCP_CMSG_INQ
) {
2234 unsigned int inq
= mptcp_inq_hint(sk
);
2236 put_cmsg(msg
, SOL_TCP
, TCP_CM_INQ
, sizeof(inq
), &inq
);
2240 pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
2241 msk
, skb_queue_empty_lockless(&sk
->sk_receive_queue
),
2242 skb_queue_empty(&msk
->receive_queue
), copied
);
2243 if (!(flags
& MSG_PEEK
))
2244 mptcp_rcv_space_adjust(msk
, copied
);
2250 static void mptcp_retransmit_timer(struct timer_list
*t
)
2252 struct inet_connection_sock
*icsk
= from_timer(icsk
, t
,
2253 icsk_retransmit_timer
);
2254 struct sock
*sk
= &icsk
->icsk_inet
.sk
;
2255 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2258 if (!sock_owned_by_user(sk
)) {
2259 /* we need a process context to retransmit */
2260 if (!test_and_set_bit(MPTCP_WORK_RTX
, &msk
->flags
))
2261 mptcp_schedule_work(sk
);
2263 /* delegate our work to tcp_release_cb() */
2264 __set_bit(MPTCP_RETRANSMIT
, &msk
->cb_flags
);
2270 static void mptcp_tout_timer(struct timer_list
*t
)
2272 struct sock
*sk
= from_timer(sk
, t
, sk_timer
);
2274 mptcp_schedule_work(sk
);
2278 /* Find an idle subflow. Return NULL if there is unacked data at tcp
2281 * A backup subflow is returned only if that is the only kind available.
2283 struct sock
*mptcp_subflow_get_retrans(struct mptcp_sock
*msk
)
2285 struct sock
*backup
= NULL
, *pick
= NULL
;
2286 struct mptcp_subflow_context
*subflow
;
2287 int min_stale_count
= INT_MAX
;
2289 mptcp_for_each_subflow(msk
, subflow
) {
2290 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
2292 if (!__mptcp_subflow_active(subflow
))
2295 /* still data outstanding at TCP level? skip this */
2296 if (!tcp_rtx_and_write_queues_empty(ssk
)) {
2297 mptcp_pm_subflow_chk_stale(msk
, ssk
);
2298 min_stale_count
= min_t(int, min_stale_count
, subflow
->stale_count
);
2302 if (subflow
->backup
) {
2315 /* use backup only if there are no progresses anywhere */
2316 return min_stale_count
> 1 ? backup
: NULL
;
2319 bool __mptcp_retransmit_pending_data(struct sock
*sk
)
2321 struct mptcp_data_frag
*cur
, *rtx_head
;
2322 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2324 if (__mptcp_check_fallback(msk
))
2327 if (tcp_rtx_and_write_queues_empty(sk
))
2330 /* the closing socket has some data untransmitted and/or unacked:
2331 * some data in the mptcp rtx queue has not really xmitted yet.
2332 * keep it simple and re-inject the whole mptcp level rtx queue
2334 mptcp_data_lock(sk
);
2335 __mptcp_clean_una_wakeup(sk
);
2336 rtx_head
= mptcp_rtx_head(sk
);
2338 mptcp_data_unlock(sk
);
2342 msk
->recovery_snd_nxt
= msk
->snd_nxt
;
2343 msk
->recovery
= true;
2344 mptcp_data_unlock(sk
);
2346 msk
->first_pending
= rtx_head
;
2349 /* be sure to clear the "sent status" on all re-injected fragments */
2350 list_for_each_entry(cur
, &msk
->rtx_queue
, list
) {
2351 if (!cur
->already_sent
)
2353 cur
->already_sent
= 0;
2359 /* flags for __mptcp_close_ssk() */
2360 #define MPTCP_CF_PUSH BIT(1)
2361 #define MPTCP_CF_FASTCLOSE BIT(2)
2363 /* be sure to send a reset only if the caller asked for it, also
2364 * clean completely the subflow status when the subflow reaches
2367 static void __mptcp_subflow_disconnect(struct sock
*ssk
,
2368 struct mptcp_subflow_context
*subflow
,
2371 if (((1 << ssk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
)) ||
2372 (flags
& MPTCP_CF_FASTCLOSE
)) {
2373 /* The MPTCP code never wait on the subflow sockets, TCP-level
2374 * disconnect should never fail
2376 WARN_ON_ONCE(tcp_disconnect(ssk
, 0));
2377 mptcp_subflow_ctx_reset(subflow
);
2379 tcp_shutdown(ssk
, SEND_SHUTDOWN
);
2383 /* subflow sockets can be either outgoing (connect) or incoming
2386 * Outgoing subflows use in-kernel sockets.
2387 * Incoming subflows do not have their own 'struct socket' allocated,
2388 * so we need to use tcp_close() after detaching them from the mptcp
2391 static void __mptcp_close_ssk(struct sock
*sk
, struct sock
*ssk
,
2392 struct mptcp_subflow_context
*subflow
,
2395 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2396 bool dispose_it
, need_push
= false;
2398 /* If the first subflow moved to a close state before accept, e.g. due
2399 * to an incoming reset or listener shutdown, the subflow socket is
2400 * already deleted by inet_child_forget() and the mptcp socket can't
2403 if (msk
->in_accept_queue
&& msk
->first
== ssk
&&
2404 (sock_flag(sk
, SOCK_DEAD
) || sock_flag(ssk
, SOCK_DEAD
))) {
2405 /* ensure later check in mptcp_worker() will dispose the msk */
2406 sock_set_flag(sk
, SOCK_DEAD
);
2407 mptcp_set_close_tout(sk
, tcp_jiffies32
- (mptcp_close_timeout(sk
) + 1));
2408 lock_sock_nested(ssk
, SINGLE_DEPTH_NESTING
);
2409 mptcp_subflow_drop_ctx(ssk
);
2413 dispose_it
= msk
->free_first
|| ssk
!= msk
->first
;
2415 list_del(&subflow
->node
);
2417 lock_sock_nested(ssk
, SINGLE_DEPTH_NESTING
);
2419 if ((flags
& MPTCP_CF_FASTCLOSE
) && !__mptcp_check_fallback(msk
)) {
2420 /* be sure to force the tcp_close path
2421 * to generate the egress reset
2423 ssk
->sk_lingertime
= 0;
2424 sock_set_flag(ssk
, SOCK_LINGER
);
2425 subflow
->send_fastclose
= 1;
2428 need_push
= (flags
& MPTCP_CF_PUSH
) && __mptcp_retransmit_pending_data(sk
);
2430 __mptcp_subflow_disconnect(ssk
, subflow
, flags
);
2436 subflow
->disposable
= 1;
2438 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2439 * the ssk has been already destroyed, we just need to release the
2440 * reference owned by msk;
2442 if (!inet_csk(ssk
)->icsk_ulp_ops
) {
2443 WARN_ON_ONCE(!sock_flag(ssk
, SOCK_DEAD
));
2444 kfree_rcu(subflow
, rcu
);
2446 /* otherwise tcp will dispose of the ssk and subflow ctx */
2447 __tcp_close(ssk
, 0);
2449 /* close acquired an extra ref */
2454 __mptcp_subflow_error_report(sk
, ssk
);
2459 if (ssk
== msk
->first
)
2460 WRITE_ONCE(msk
->first
, NULL
);
2463 __mptcp_sync_sndbuf(sk
);
2465 __mptcp_push_pending(sk
, 0);
2467 /* Catch every 'all subflows closed' scenario, including peers silently
2468 * closing them, e.g. due to timeout.
2469 * For established sockets, allow an additional timeout before closing,
2470 * as the protocol can still create more subflows.
2472 if (list_is_singular(&msk
->conn_list
) && msk
->first
&&
2473 inet_sk_state_load(msk
->first
) == TCP_CLOSE
) {
2474 if (sk
->sk_state
!= TCP_ESTABLISHED
||
2475 msk
->in_accept_queue
|| sock_flag(sk
, SOCK_DEAD
)) {
2476 inet_sk_state_store(sk
, TCP_CLOSE
);
2477 mptcp_close_wake_up(sk
);
2479 mptcp_start_tout_timer(sk
);
2484 void mptcp_close_ssk(struct sock
*sk
, struct sock
*ssk
,
2485 struct mptcp_subflow_context
*subflow
)
2487 if (sk
->sk_state
== TCP_ESTABLISHED
)
2488 mptcp_event(MPTCP_EVENT_SUB_CLOSED
, mptcp_sk(sk
), ssk
, GFP_KERNEL
);
2490 /* subflow aborted before reaching the fully_established status
2491 * attempt the creation of the next subflow
2493 mptcp_pm_subflow_check_next(mptcp_sk(sk
), subflow
);
2495 __mptcp_close_ssk(sk
, ssk
, subflow
, MPTCP_CF_PUSH
);
2498 static unsigned int mptcp_sync_mss(struct sock
*sk
, u32 pmtu
)
2503 static void __mptcp_close_subflow(struct sock
*sk
)
2505 struct mptcp_subflow_context
*subflow
, *tmp
;
2506 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2510 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
) {
2511 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
2513 if (inet_sk_state_load(ssk
) != TCP_CLOSE
)
2516 /* 'subflow_data_ready' will re-sched once rx queue is empty */
2517 if (!skb_queue_empty_lockless(&ssk
->sk_receive_queue
))
2520 mptcp_close_ssk(sk
, ssk
, subflow
);
2525 static bool mptcp_close_tout_expired(const struct sock
*sk
)
2527 if (!inet_csk(sk
)->icsk_mtup
.probe_timestamp
||
2528 sk
->sk_state
== TCP_CLOSE
)
2531 return time_after32(tcp_jiffies32
,
2532 inet_csk(sk
)->icsk_mtup
.probe_timestamp
+ mptcp_close_timeout(sk
));
2535 static void mptcp_check_fastclose(struct mptcp_sock
*msk
)
2537 struct mptcp_subflow_context
*subflow
, *tmp
;
2538 struct sock
*sk
= (struct sock
*)msk
;
2540 if (likely(!READ_ONCE(msk
->rcv_fastclose
)))
2543 mptcp_token_destroy(msk
);
2545 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
) {
2546 struct sock
*tcp_sk
= mptcp_subflow_tcp_sock(subflow
);
2549 slow
= lock_sock_fast(tcp_sk
);
2550 if (tcp_sk
->sk_state
!= TCP_CLOSE
) {
2551 tcp_send_active_reset(tcp_sk
, GFP_ATOMIC
);
2552 tcp_set_state(tcp_sk
, TCP_CLOSE
);
2554 unlock_sock_fast(tcp_sk
, slow
);
2557 /* Mirror the tcp_reset() error propagation */
2558 switch (sk
->sk_state
) {
2560 WRITE_ONCE(sk
->sk_err
, ECONNREFUSED
);
2562 case TCP_CLOSE_WAIT
:
2563 WRITE_ONCE(sk
->sk_err
, EPIPE
);
2568 WRITE_ONCE(sk
->sk_err
, ECONNRESET
);
2571 inet_sk_state_store(sk
, TCP_CLOSE
);
2572 WRITE_ONCE(sk
->sk_shutdown
, SHUTDOWN_MASK
);
2573 smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
2574 set_bit(MPTCP_WORK_CLOSE_SUBFLOW
, &msk
->flags
);
2576 /* the calling mptcp_worker will properly destroy the socket */
2577 if (sock_flag(sk
, SOCK_DEAD
))
2580 sk
->sk_state_change(sk
);
2581 sk_error_report(sk
);
2584 static void __mptcp_retrans(struct sock
*sk
)
2586 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2587 struct mptcp_subflow_context
*subflow
;
2588 struct mptcp_sendmsg_info info
= {};
2589 struct mptcp_data_frag
*dfrag
;
2594 mptcp_clean_una_wakeup(sk
);
2596 /* first check ssk: need to kick "stale" logic */
2597 err
= mptcp_sched_get_retrans(msk
);
2598 dfrag
= mptcp_rtx_head(sk
);
2600 if (mptcp_data_fin_enabled(msk
)) {
2601 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2603 icsk
->icsk_retransmits
++;
2604 mptcp_set_datafin_timeout(sk
);
2605 mptcp_send_ack(msk
);
2610 if (!mptcp_send_head(sk
))
2619 mptcp_for_each_subflow(msk
, subflow
) {
2620 if (READ_ONCE(subflow
->scheduled
)) {
2623 mptcp_subflow_set_scheduled(subflow
, false);
2625 ssk
= mptcp_subflow_tcp_sock(subflow
);
2629 /* limit retransmission to the bytes already sent on some subflows */
2631 info
.limit
= READ_ONCE(msk
->csum_enabled
) ? dfrag
->data_len
:
2632 dfrag
->already_sent
;
2633 while (info
.sent
< info
.limit
) {
2634 ret
= mptcp_sendmsg_frag(sk
, ssk
, dfrag
, &info
);
2638 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_RETRANSSEGS
);
2643 len
= max(copied
, len
);
2644 tcp_push(ssk
, 0, info
.mss_now
, tcp_sk(ssk
)->nonagle
,
2646 WRITE_ONCE(msk
->allow_infinite_fallback
, false);
2653 msk
->bytes_retrans
+= len
;
2654 dfrag
->already_sent
= max(dfrag
->already_sent
, len
);
2657 mptcp_check_and_set_pending(sk
);
2659 if (!mptcp_rtx_timer_pending(sk
))
2660 mptcp_reset_rtx_timer(sk
);
2663 /* schedule the timeout timer for the relevant event: either close timeout
2664 * or mp_fail timeout. The close timeout takes precedence on the mp_fail one
2666 void mptcp_reset_tout_timer(struct mptcp_sock
*msk
, unsigned long fail_tout
)
2668 struct sock
*sk
= (struct sock
*)msk
;
2669 unsigned long timeout
, close_timeout
;
2671 if (!fail_tout
&& !inet_csk(sk
)->icsk_mtup
.probe_timestamp
)
2674 close_timeout
= inet_csk(sk
)->icsk_mtup
.probe_timestamp
- tcp_jiffies32
+ jiffies
+
2675 mptcp_close_timeout(sk
);
2677 /* the close timeout takes precedence on the fail one, and here at least one of
2680 timeout
= inet_csk(sk
)->icsk_mtup
.probe_timestamp
? close_timeout
: fail_tout
;
2682 sk_reset_timer(sk
, &sk
->sk_timer
, timeout
);
2685 static void mptcp_mp_fail_no_response(struct mptcp_sock
*msk
)
2687 struct sock
*ssk
= msk
->first
;
2693 pr_debug("MP_FAIL doesn't respond, reset the subflow");
2695 slow
= lock_sock_fast(ssk
);
2696 mptcp_subflow_reset(ssk
);
2697 WRITE_ONCE(mptcp_subflow_ctx(ssk
)->fail_tout
, 0);
2698 unlock_sock_fast(ssk
, slow
);
2701 static void mptcp_do_fastclose(struct sock
*sk
)
2703 struct mptcp_subflow_context
*subflow
, *tmp
;
2704 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2706 inet_sk_state_store(sk
, TCP_CLOSE
);
2707 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
)
2708 __mptcp_close_ssk(sk
, mptcp_subflow_tcp_sock(subflow
),
2709 subflow
, MPTCP_CF_FASTCLOSE
);
2712 static void mptcp_worker(struct work_struct
*work
)
2714 struct mptcp_sock
*msk
= container_of(work
, struct mptcp_sock
, work
);
2715 struct sock
*sk
= (struct sock
*)msk
;
2716 unsigned long fail_tout
;
2720 state
= sk
->sk_state
;
2721 if (unlikely((1 << state
) & (TCPF_CLOSE
| TCPF_LISTEN
)))
2724 mptcp_check_fastclose(msk
);
2726 mptcp_pm_nl_work(msk
);
2728 mptcp_check_send_data_fin(sk
);
2729 mptcp_check_data_fin_ack(sk
);
2730 mptcp_check_data_fin(sk
);
2732 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW
, &msk
->flags
))
2733 __mptcp_close_subflow(sk
);
2735 if (mptcp_close_tout_expired(sk
)) {
2736 mptcp_do_fastclose(sk
);
2737 mptcp_close_wake_up(sk
);
2740 if (sock_flag(sk
, SOCK_DEAD
) && sk
->sk_state
== TCP_CLOSE
) {
2741 __mptcp_destroy_sock(sk
);
2745 if (test_and_clear_bit(MPTCP_WORK_RTX
, &msk
->flags
))
2746 __mptcp_retrans(sk
);
2748 fail_tout
= msk
->first
? READ_ONCE(mptcp_subflow_ctx(msk
->first
)->fail_tout
) : 0;
2749 if (fail_tout
&& time_after(jiffies
, fail_tout
))
2750 mptcp_mp_fail_no_response(msk
);
2757 static void __mptcp_init_sock(struct sock
*sk
)
2759 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2761 INIT_LIST_HEAD(&msk
->conn_list
);
2762 INIT_LIST_HEAD(&msk
->join_list
);
2763 INIT_LIST_HEAD(&msk
->rtx_queue
);
2764 INIT_WORK(&msk
->work
, mptcp_worker
);
2765 __skb_queue_head_init(&msk
->receive_queue
);
2766 msk
->out_of_order_queue
= RB_ROOT
;
2767 msk
->first_pending
= NULL
;
2768 msk
->rmem_fwd_alloc
= 0;
2769 WRITE_ONCE(msk
->rmem_released
, 0);
2770 msk
->timer_ival
= TCP_RTO_MIN
;
2771 msk
->scaling_ratio
= TCP_DEFAULT_SCALING_RATIO
;
2773 WRITE_ONCE(msk
->first
, NULL
);
2774 inet_csk(sk
)->icsk_sync_mss
= mptcp_sync_mss
;
2775 WRITE_ONCE(msk
->csum_enabled
, mptcp_is_checksum_enabled(sock_net(sk
)));
2776 WRITE_ONCE(msk
->allow_infinite_fallback
, true);
2777 msk
->recovery
= false;
2778 msk
->subflow_id
= 1;
2780 mptcp_pm_data_init(msk
);
2782 /* re-use the csk retrans timer for MPTCP-level retrans */
2783 timer_setup(&msk
->sk
.icsk_retransmit_timer
, mptcp_retransmit_timer
, 0);
2784 timer_setup(&sk
->sk_timer
, mptcp_tout_timer
, 0);
2787 static void mptcp_ca_reset(struct sock
*sk
)
2789 struct inet_connection_sock
*icsk
= inet_csk(sk
);
2791 tcp_assign_congestion_control(sk
);
2792 strcpy(mptcp_sk(sk
)->ca_name
, icsk
->icsk_ca_ops
->name
);
2794 /* no need to keep a reference to the ops, the name will suffice */
2795 tcp_cleanup_congestion_control(sk
);
2796 icsk
->icsk_ca_ops
= NULL
;
2799 static int mptcp_init_sock(struct sock
*sk
)
2801 struct net
*net
= sock_net(sk
);
2804 __mptcp_init_sock(sk
);
2806 if (!mptcp_is_enabled(net
))
2807 return -ENOPROTOOPT
;
2809 if (unlikely(!net
->mib
.mptcp_statistics
) && !mptcp_mib_alloc(net
))
2812 ret
= mptcp_init_sched(mptcp_sk(sk
),
2813 mptcp_sched_find(mptcp_get_scheduler(net
)));
2817 set_bit(SOCK_CUSTOM_SOCKOPT
, &sk
->sk_socket
->flags
);
2819 /* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
2820 * propagate the correct value
2824 sk_sockets_allocated_inc(sk
);
2825 sk
->sk_rcvbuf
= READ_ONCE(net
->ipv4
.sysctl_tcp_rmem
[1]);
2826 sk
->sk_sndbuf
= READ_ONCE(net
->ipv4
.sysctl_tcp_wmem
[1]);
2831 static void __mptcp_clear_xmit(struct sock
*sk
)
2833 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2834 struct mptcp_data_frag
*dtmp
, *dfrag
;
2836 WRITE_ONCE(msk
->first_pending
, NULL
);
2837 list_for_each_entry_safe(dfrag
, dtmp
, &msk
->rtx_queue
, list
)
2838 dfrag_clear(sk
, dfrag
);
2841 void mptcp_cancel_work(struct sock
*sk
)
2843 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2845 if (cancel_work_sync(&msk
->work
))
2849 void mptcp_subflow_shutdown(struct sock
*sk
, struct sock
*ssk
, int how
)
2853 switch (ssk
->sk_state
) {
2855 if (!(how
& RCV_SHUTDOWN
))
2859 WARN_ON_ONCE(tcp_disconnect(ssk
, O_NONBLOCK
));
2862 if (__mptcp_check_fallback(mptcp_sk(sk
))) {
2863 pr_debug("Fallback");
2864 ssk
->sk_shutdown
|= how
;
2865 tcp_shutdown(ssk
, how
);
2867 /* simulate the data_fin ack reception to let the state
2868 * machine move forward
2870 WRITE_ONCE(mptcp_sk(sk
)->snd_una
, mptcp_sk(sk
)->snd_nxt
);
2871 mptcp_schedule_work(sk
);
2873 pr_debug("Sending DATA_FIN on subflow %p", ssk
);
2875 if (!mptcp_rtx_timer_pending(sk
))
2876 mptcp_reset_rtx_timer(sk
);
2884 static const unsigned char new_state
[16] = {
2885 /* current state: new state: action: */
2886 [0 /* (Invalid) */] = TCP_CLOSE
,
2887 [TCP_ESTABLISHED
] = TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2888 [TCP_SYN_SENT
] = TCP_CLOSE
,
2889 [TCP_SYN_RECV
] = TCP_FIN_WAIT1
| TCP_ACTION_FIN
,
2890 [TCP_FIN_WAIT1
] = TCP_FIN_WAIT1
,
2891 [TCP_FIN_WAIT2
] = TCP_FIN_WAIT2
,
2892 [TCP_TIME_WAIT
] = TCP_CLOSE
, /* should not happen ! */
2893 [TCP_CLOSE
] = TCP_CLOSE
,
2894 [TCP_CLOSE_WAIT
] = TCP_LAST_ACK
| TCP_ACTION_FIN
,
2895 [TCP_LAST_ACK
] = TCP_LAST_ACK
,
2896 [TCP_LISTEN
] = TCP_CLOSE
,
2897 [TCP_CLOSING
] = TCP_CLOSING
,
2898 [TCP_NEW_SYN_RECV
] = TCP_CLOSE
, /* should not happen ! */
2901 static int mptcp_close_state(struct sock
*sk
)
2903 int next
= (int)new_state
[sk
->sk_state
];
2904 int ns
= next
& TCP_STATE_MASK
;
2906 inet_sk_state_store(sk
, ns
);
2908 return next
& TCP_ACTION_FIN
;
2911 static void mptcp_check_send_data_fin(struct sock
*sk
)
2913 struct mptcp_subflow_context
*subflow
;
2914 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2916 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
2917 msk
, msk
->snd_data_fin_enable
, !!mptcp_send_head(sk
),
2918 msk
->snd_nxt
, msk
->write_seq
);
2920 /* we still need to enqueue subflows or not really shutting down,
2923 if (!msk
->snd_data_fin_enable
|| msk
->snd_nxt
+ 1 != msk
->write_seq
||
2924 mptcp_send_head(sk
))
2927 WRITE_ONCE(msk
->snd_nxt
, msk
->write_seq
);
2929 mptcp_for_each_subflow(msk
, subflow
) {
2930 struct sock
*tcp_sk
= mptcp_subflow_tcp_sock(subflow
);
2932 mptcp_subflow_shutdown(sk
, tcp_sk
, SEND_SHUTDOWN
);
2936 static void __mptcp_wr_shutdown(struct sock
*sk
)
2938 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2940 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
2941 msk
, msk
->snd_data_fin_enable
, sk
->sk_shutdown
, sk
->sk_state
,
2942 !!mptcp_send_head(sk
));
2944 /* will be ignored by fallback sockets */
2945 WRITE_ONCE(msk
->write_seq
, msk
->write_seq
+ 1);
2946 WRITE_ONCE(msk
->snd_data_fin_enable
, 1);
2948 mptcp_check_send_data_fin(sk
);
2951 static void __mptcp_destroy_sock(struct sock
*sk
)
2953 struct mptcp_sock
*msk
= mptcp_sk(sk
);
2955 pr_debug("msk=%p", msk
);
2959 mptcp_stop_rtx_timer(sk
);
2960 sk_stop_timer(sk
, &sk
->sk_timer
);
2962 mptcp_release_sched(msk
);
2964 sk
->sk_prot
->destroy(sk
);
2966 WARN_ON_ONCE(msk
->rmem_fwd_alloc
);
2967 WARN_ON_ONCE(msk
->rmem_released
);
2968 sk_stream_kill_queues(sk
);
2969 xfrm_sk_free_policy(sk
);
2974 void __mptcp_unaccepted_force_close(struct sock
*sk
)
2976 sock_set_flag(sk
, SOCK_DEAD
);
2977 mptcp_do_fastclose(sk
);
2978 __mptcp_destroy_sock(sk
);
2981 static __poll_t
mptcp_check_readable(struct sock
*sk
)
2983 return mptcp_epollin_ready(sk
) ? EPOLLIN
| EPOLLRDNORM
: 0;
2986 static void mptcp_check_listen_stop(struct sock
*sk
)
2990 if (inet_sk_state_load(sk
) != TCP_LISTEN
)
2993 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
2994 ssk
= mptcp_sk(sk
)->first
;
2995 if (WARN_ON_ONCE(!ssk
|| inet_sk_state_load(ssk
) != TCP_LISTEN
))
2998 lock_sock_nested(ssk
, SINGLE_DEPTH_NESTING
);
2999 tcp_set_state(ssk
, TCP_CLOSE
);
3000 mptcp_subflow_queue_clean(sk
, ssk
);
3001 inet_csk_listen_stop(ssk
);
3002 mptcp_event_pm_listener(ssk
, MPTCP_EVENT_LISTENER_CLOSED
);
3006 bool __mptcp_close(struct sock
*sk
, long timeout
)
3008 struct mptcp_subflow_context
*subflow
;
3009 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3010 bool do_cancel_work
= false;
3011 int subflows_alive
= 0;
3013 WRITE_ONCE(sk
->sk_shutdown
, SHUTDOWN_MASK
);
3015 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
)) {
3016 mptcp_check_listen_stop(sk
);
3017 inet_sk_state_store(sk
, TCP_CLOSE
);
3021 if (mptcp_data_avail(msk
) || timeout
< 0) {
3022 /* If the msk has read data, or the caller explicitly ask it,
3023 * do the MPTCP equivalent of TCP reset, aka MPTCP fastclose
3025 mptcp_do_fastclose(sk
);
3027 } else if (mptcp_close_state(sk
)) {
3028 __mptcp_wr_shutdown(sk
);
3031 sk_stream_wait_close(sk
, timeout
);
3034 /* orphan all the subflows */
3035 mptcp_for_each_subflow(msk
, subflow
) {
3036 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
3037 bool slow
= lock_sock_fast_nested(ssk
);
3039 subflows_alive
+= ssk
->sk_state
!= TCP_CLOSE
;
3041 /* since the close timeout takes precedence on the fail one,
3044 if (ssk
== msk
->first
)
3045 subflow
->fail_tout
= 0;
3047 /* detach from the parent socket, but allow data_ready to
3048 * push incoming data into the mptcp stack, to properly ack it
3050 ssk
->sk_socket
= NULL
;
3052 unlock_sock_fast(ssk
, slow
);
3056 /* all the subflows are closed, only timeout can change the msk
3057 * state, let's not keep resources busy for no reasons
3059 if (subflows_alive
== 0)
3060 inet_sk_state_store(sk
, TCP_CLOSE
);
3063 pr_debug("msk=%p state=%d", sk
, sk
->sk_state
);
3065 mptcp_event(MPTCP_EVENT_CLOSED
, msk
, NULL
, GFP_KERNEL
);
3067 if (sk
->sk_state
== TCP_CLOSE
) {
3068 __mptcp_destroy_sock(sk
);
3069 do_cancel_work
= true;
3071 mptcp_start_tout_timer(sk
);
3074 return do_cancel_work
;
3077 static void mptcp_close(struct sock
*sk
, long timeout
)
3079 bool do_cancel_work
;
3083 do_cancel_work
= __mptcp_close(sk
, timeout
);
3086 mptcp_cancel_work(sk
);
3091 static void mptcp_copy_inaddrs(struct sock
*msk
, const struct sock
*ssk
)
3093 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3094 const struct ipv6_pinfo
*ssk6
= inet6_sk(ssk
);
3095 struct ipv6_pinfo
*msk6
= inet6_sk(msk
);
3097 msk
->sk_v6_daddr
= ssk
->sk_v6_daddr
;
3098 msk
->sk_v6_rcv_saddr
= ssk
->sk_v6_rcv_saddr
;
3101 msk6
->saddr
= ssk6
->saddr
;
3102 msk6
->flow_label
= ssk6
->flow_label
;
3106 inet_sk(msk
)->inet_num
= inet_sk(ssk
)->inet_num
;
3107 inet_sk(msk
)->inet_dport
= inet_sk(ssk
)->inet_dport
;
3108 inet_sk(msk
)->inet_sport
= inet_sk(ssk
)->inet_sport
;
3109 inet_sk(msk
)->inet_daddr
= inet_sk(ssk
)->inet_daddr
;
3110 inet_sk(msk
)->inet_saddr
= inet_sk(ssk
)->inet_saddr
;
3111 inet_sk(msk
)->inet_rcv_saddr
= inet_sk(ssk
)->inet_rcv_saddr
;
3114 static int mptcp_disconnect(struct sock
*sk
, int flags
)
3116 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3118 /* We are on the fastopen error path. We can't call straight into the
3119 * subflows cleanup code due to lock nesting (we are already under
3120 * msk->firstsocket lock).
3122 if (msk
->fastopening
)
3125 mptcp_check_listen_stop(sk
);
3126 inet_sk_state_store(sk
, TCP_CLOSE
);
3128 mptcp_stop_rtx_timer(sk
);
3129 mptcp_stop_tout_timer(sk
);
3132 mptcp_event(MPTCP_EVENT_CLOSED
, msk
, NULL
, GFP_KERNEL
);
3134 /* msk->subflow is still intact, the following will not free the first
3137 mptcp_destroy_common(msk
, MPTCP_CF_FASTCLOSE
);
3138 WRITE_ONCE(msk
->flags
, 0);
3140 msk
->push_pending
= 0;
3141 msk
->recovery
= false;
3142 msk
->can_ack
= false;
3143 msk
->fully_established
= false;
3144 msk
->rcv_data_fin
= false;
3145 msk
->snd_data_fin_enable
= false;
3146 msk
->rcv_fastclose
= false;
3147 msk
->use_64bit_ack
= false;
3148 msk
->bytes_consumed
= 0;
3149 WRITE_ONCE(msk
->csum_enabled
, mptcp_is_checksum_enabled(sock_net(sk
)));
3150 mptcp_pm_data_reset(msk
);
3152 msk
->bytes_acked
= 0;
3153 msk
->bytes_received
= 0;
3154 msk
->bytes_sent
= 0;
3155 msk
->bytes_retrans
= 0;
3157 WRITE_ONCE(sk
->sk_shutdown
, 0);
3158 sk_error_report(sk
);
3162 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3163 static struct ipv6_pinfo
*mptcp_inet6_sk(const struct sock
*sk
)
3165 unsigned int offset
= sizeof(struct mptcp6_sock
) - sizeof(struct ipv6_pinfo
);
3167 return (struct ipv6_pinfo
*)(((u8
*)sk
) + offset
);
3171 struct sock
*mptcp_sk_clone_init(const struct sock
*sk
,
3172 const struct mptcp_options_received
*mp_opt
,
3174 struct request_sock
*req
)
3176 struct mptcp_subflow_request_sock
*subflow_req
= mptcp_subflow_rsk(req
);
3177 struct sock
*nsk
= sk_clone_lock(sk
, GFP_ATOMIC
);
3178 struct mptcp_sock
*msk
;
3183 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3184 if (nsk
->sk_family
== AF_INET6
)
3185 inet_sk(nsk
)->pinet6
= mptcp_inet6_sk(nsk
);
3188 __mptcp_init_sock(nsk
);
3190 msk
= mptcp_sk(nsk
);
3191 msk
->local_key
= subflow_req
->local_key
;
3192 msk
->token
= subflow_req
->token
;
3193 msk
->in_accept_queue
= 1;
3194 WRITE_ONCE(msk
->fully_established
, false);
3195 if (mp_opt
->suboptions
& OPTION_MPTCP_CSUMREQD
)
3196 WRITE_ONCE(msk
->csum_enabled
, true);
3198 msk
->write_seq
= subflow_req
->idsn
+ 1;
3199 msk
->snd_nxt
= msk
->write_seq
;
3200 msk
->snd_una
= msk
->write_seq
;
3201 msk
->wnd_end
= msk
->snd_nxt
+ req
->rsk_rcv_wnd
;
3202 msk
->setsockopt_seq
= mptcp_sk(sk
)->setsockopt_seq
;
3203 mptcp_init_sched(msk
, mptcp_sk(sk
)->sched
);
3205 /* passive msk is created after the first/MPC subflow */
3206 msk
->subflow_id
= 2;
3208 sock_reset_flag(nsk
, SOCK_RCU_FREE
);
3209 security_inet_csk_clone(nsk
, req
);
3211 /* this can't race with mptcp_close(), as the msk is
3212 * not yet exposted to user-space
3214 inet_sk_state_store(nsk
, TCP_ESTABLISHED
);
3216 /* The msk maintain a ref to each subflow in the connections list */
3217 WRITE_ONCE(msk
->first
, ssk
);
3218 list_add(&mptcp_subflow_ctx(ssk
)->node
, &msk
->conn_list
);
3221 /* new mpc subflow takes ownership of the newly
3222 * created mptcp socket
3224 mptcp_token_accept(subflow_req
, msk
);
3226 /* set msk addresses early to ensure mptcp_pm_get_local_id()
3227 * uses the correct data
3229 mptcp_copy_inaddrs(nsk
, ssk
);
3230 __mptcp_propagate_sndbuf(nsk
, ssk
);
3232 mptcp_rcv_space_init(msk
, ssk
);
3233 bh_unlock_sock(nsk
);
3235 /* note: the newly allocated socket refcount is 2 now */
3239 void mptcp_rcv_space_init(struct mptcp_sock
*msk
, const struct sock
*ssk
)
3241 const struct tcp_sock
*tp
= tcp_sk(ssk
);
3243 msk
->rcvq_space
.copied
= 0;
3244 msk
->rcvq_space
.rtt_us
= 0;
3246 msk
->rcvq_space
.time
= tp
->tcp_mstamp
;
3248 /* initial rcv_space offering made to peer */
3249 msk
->rcvq_space
.space
= min_t(u32
, tp
->rcv_wnd
,
3250 TCP_INIT_CWND
* tp
->advmss
);
3251 if (msk
->rcvq_space
.space
== 0)
3252 msk
->rcvq_space
.space
= TCP_INIT_CWND
* TCP_MSS_DEFAULT
;
3254 WRITE_ONCE(msk
->wnd_end
, msk
->snd_nxt
+ tcp_sk(ssk
)->snd_wnd
);
3257 static struct sock
*mptcp_accept(struct sock
*ssk
, int flags
, int *err
,
3262 pr_debug("ssk=%p, listener=%p", ssk
, mptcp_subflow_ctx(ssk
));
3263 newsk
= inet_csk_accept(ssk
, flags
, err
, kern
);
3267 pr_debug("newsk=%p, subflow is mptcp=%d", newsk
, sk_is_mptcp(newsk
));
3268 if (sk_is_mptcp(newsk
)) {
3269 struct mptcp_subflow_context
*subflow
;
3270 struct sock
*new_mptcp_sock
;
3272 subflow
= mptcp_subflow_ctx(newsk
);
3273 new_mptcp_sock
= subflow
->conn
;
3275 /* is_mptcp should be false if subflow->conn is missing, see
3276 * subflow_syn_recv_sock()
3278 if (WARN_ON_ONCE(!new_mptcp_sock
)) {
3279 tcp_sk(newsk
)->is_mptcp
= 0;
3283 newsk
= new_mptcp_sock
;
3284 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_MPCAPABLEPASSIVEACK
);
3286 MPTCP_INC_STATS(sock_net(ssk
),
3287 MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK
);
3291 newsk
->sk_kern_sock
= kern
;
3295 void mptcp_destroy_common(struct mptcp_sock
*msk
, unsigned int flags
)
3297 struct mptcp_subflow_context
*subflow
, *tmp
;
3298 struct sock
*sk
= (struct sock
*)msk
;
3300 __mptcp_clear_xmit(sk
);
3302 /* join list will be eventually flushed (with rst) at sock lock release time */
3303 mptcp_for_each_subflow_safe(msk
, subflow
, tmp
)
3304 __mptcp_close_ssk(sk
, mptcp_subflow_tcp_sock(subflow
), subflow
, flags
);
3306 /* move to sk_receive_queue, sk_stream_kill_queues will purge it */
3307 mptcp_data_lock(sk
);
3308 skb_queue_splice_tail_init(&msk
->receive_queue
, &sk
->sk_receive_queue
);
3309 __skb_queue_purge(&sk
->sk_receive_queue
);
3310 skb_rbtree_purge(&msk
->out_of_order_queue
);
3311 mptcp_data_unlock(sk
);
3313 /* move all the rx fwd alloc into the sk_mem_reclaim_final in
3314 * inet_sock_destruct() will dispose it
3316 sk_forward_alloc_add(sk
, msk
->rmem_fwd_alloc
);
3317 WRITE_ONCE(msk
->rmem_fwd_alloc
, 0);
3318 mptcp_token_destroy(msk
);
3319 mptcp_pm_free_anno_list(msk
);
3320 mptcp_free_local_addr_list(msk
);
3323 static void mptcp_destroy(struct sock
*sk
)
3325 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3327 /* allow the following to close even the initial subflow */
3328 msk
->free_first
= 1;
3329 mptcp_destroy_common(msk
, 0);
3330 sk_sockets_allocated_dec(sk
);
3333 void __mptcp_data_acked(struct sock
*sk
)
3335 if (!sock_owned_by_user(sk
))
3336 __mptcp_clean_una(sk
);
3338 __set_bit(MPTCP_CLEAN_UNA
, &mptcp_sk(sk
)->cb_flags
);
3340 if (mptcp_pending_data_fin_ack(sk
))
3341 mptcp_schedule_work(sk
);
3344 void __mptcp_check_push(struct sock
*sk
, struct sock
*ssk
)
3346 if (!mptcp_send_head(sk
))
3349 if (!sock_owned_by_user(sk
))
3350 __mptcp_subflow_push_pending(sk
, ssk
, false);
3352 __set_bit(MPTCP_PUSH_PENDING
, &mptcp_sk(sk
)->cb_flags
);
3355 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
3356 BIT(MPTCP_RETRANSMIT) | \
3357 BIT(MPTCP_FLUSH_JOIN_LIST))
3359 /* processes deferred events and flush wmem */
3360 static void mptcp_release_cb(struct sock
*sk
)
3361 __must_hold(&sk
->sk_lock
.slock
)
3363 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3366 unsigned long flags
= (msk
->cb_flags
& MPTCP_FLAGS_PROCESS_CTX_NEED
) |
3368 struct list_head join_list
;
3373 INIT_LIST_HEAD(&join_list
);
3374 list_splice_init(&msk
->join_list
, &join_list
);
3376 /* the following actions acquire the subflow socket lock
3378 * 1) can't be invoked in atomic scope
3379 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
3380 * datapath acquires the msk socket spinlock while helding
3381 * the subflow socket lock
3383 msk
->push_pending
= 0;
3384 msk
->cb_flags
&= ~flags
;
3385 spin_unlock_bh(&sk
->sk_lock
.slock
);
3387 if (flags
& BIT(MPTCP_FLUSH_JOIN_LIST
))
3388 __mptcp_flush_join_list(sk
, &join_list
);
3389 if (flags
& BIT(MPTCP_PUSH_PENDING
))
3390 __mptcp_push_pending(sk
, 0);
3391 if (flags
& BIT(MPTCP_RETRANSMIT
))
3392 __mptcp_retrans(sk
);
3395 spin_lock_bh(&sk
->sk_lock
.slock
);
3398 if (__test_and_clear_bit(MPTCP_CLEAN_UNA
, &msk
->cb_flags
))
3399 __mptcp_clean_una_wakeup(sk
);
3400 if (unlikely(msk
->cb_flags
)) {
3401 /* be sure to set the current sk state before tacking actions
3402 * depending on sk_state, that is processing MPTCP_ERROR_REPORT
3404 if (__test_and_clear_bit(MPTCP_CONNECTED
, &msk
->cb_flags
))
3405 __mptcp_set_connected(sk
);
3406 if (__test_and_clear_bit(MPTCP_ERROR_REPORT
, &msk
->cb_flags
))
3407 __mptcp_error_report(sk
);
3408 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF
, &msk
->cb_flags
))
3409 __mptcp_sync_sndbuf(sk
);
3412 __mptcp_update_rmem(sk
);
3415 /* MP_JOIN client subflow must wait for 4th ack before sending any data:
3416 * TCP can't schedule delack timer before the subflow is fully established.
3417 * MPTCP uses the delack timer to do 3rd ack retransmissions
3419 static void schedule_3rdack_retransmission(struct sock
*ssk
)
3421 struct inet_connection_sock
*icsk
= inet_csk(ssk
);
3422 struct tcp_sock
*tp
= tcp_sk(ssk
);
3423 unsigned long timeout
;
3425 if (mptcp_subflow_ctx(ssk
)->fully_established
)
3428 /* reschedule with a timeout above RTT, as we must look only for drop */
3430 timeout
= usecs_to_jiffies(tp
->srtt_us
>> (3 - 1));
3432 timeout
= TCP_TIMEOUT_INIT
;
3435 WARN_ON_ONCE(icsk
->icsk_ack
.pending
& ICSK_ACK_TIMER
);
3436 icsk
->icsk_ack
.pending
|= ICSK_ACK_SCHED
| ICSK_ACK_TIMER
;
3437 icsk
->icsk_ack
.timeout
= timeout
;
3438 sk_reset_timer(ssk
, &icsk
->icsk_delack_timer
, timeout
);
3441 void mptcp_subflow_process_delegated(struct sock
*ssk
, long status
)
3443 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
3444 struct sock
*sk
= subflow
->conn
;
3446 if (status
& BIT(MPTCP_DELEGATE_SEND
)) {
3447 mptcp_data_lock(sk
);
3448 if (!sock_owned_by_user(sk
))
3449 __mptcp_subflow_push_pending(sk
, ssk
, true);
3451 __set_bit(MPTCP_PUSH_PENDING
, &mptcp_sk(sk
)->cb_flags
);
3452 mptcp_data_unlock(sk
);
3454 if (status
& BIT(MPTCP_DELEGATE_SNDBUF
)) {
3455 mptcp_data_lock(sk
);
3456 if (!sock_owned_by_user(sk
))
3457 __mptcp_sync_sndbuf(sk
);
3459 __set_bit(MPTCP_SYNC_SNDBUF
, &mptcp_sk(sk
)->cb_flags
);
3460 mptcp_data_unlock(sk
);
3462 if (status
& BIT(MPTCP_DELEGATE_ACK
))
3463 schedule_3rdack_retransmission(ssk
);
3466 static int mptcp_hash(struct sock
*sk
)
3468 /* should never be called,
3469 * we hash the TCP subflows not the master socket
3475 static void mptcp_unhash(struct sock
*sk
)
3477 /* called from sk_common_release(), but nothing to do here */
3480 static int mptcp_get_port(struct sock
*sk
, unsigned short snum
)
3482 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3484 pr_debug("msk=%p, ssk=%p", msk
, msk
->first
);
3485 if (WARN_ON_ONCE(!msk
->first
))
3488 return inet_csk_get_port(msk
->first
, snum
);
3491 void mptcp_finish_connect(struct sock
*ssk
)
3493 struct mptcp_subflow_context
*subflow
;
3494 struct mptcp_sock
*msk
;
3497 subflow
= mptcp_subflow_ctx(ssk
);
3501 pr_debug("msk=%p, token=%u", sk
, subflow
->token
);
3503 subflow
->map_seq
= subflow
->iasn
;
3504 subflow
->map_subflow_seq
= 1;
3506 /* the socket is not connected yet, no msk/subflow ops can access/race
3507 * accessing the field below
3509 WRITE_ONCE(msk
->local_key
, subflow
->local_key
);
3510 WRITE_ONCE(msk
->write_seq
, subflow
->idsn
+ 1);
3511 WRITE_ONCE(msk
->snd_nxt
, msk
->write_seq
);
3512 WRITE_ONCE(msk
->snd_una
, msk
->write_seq
);
3514 mptcp_pm_new_connection(msk
, ssk
, 0);
3516 mptcp_rcv_space_init(msk
, ssk
);
3519 void mptcp_sock_graft(struct sock
*sk
, struct socket
*parent
)
3521 write_lock_bh(&sk
->sk_callback_lock
);
3522 rcu_assign_pointer(sk
->sk_wq
, &parent
->wq
);
3523 sk_set_socket(sk
, parent
);
3524 sk
->sk_uid
= SOCK_INODE(parent
)->i_uid
;
3525 write_unlock_bh(&sk
->sk_callback_lock
);
3528 bool mptcp_finish_join(struct sock
*ssk
)
3530 struct mptcp_subflow_context
*subflow
= mptcp_subflow_ctx(ssk
);
3531 struct mptcp_sock
*msk
= mptcp_sk(subflow
->conn
);
3532 struct sock
*parent
= (void *)msk
;
3535 pr_debug("msk=%p, subflow=%p", msk
, subflow
);
3537 /* mptcp socket already closing? */
3538 if (!mptcp_is_fully_established(parent
)) {
3539 subflow
->reset_reason
= MPTCP_RST_EMPTCP
;
3543 /* active subflow, already present inside the conn_list */
3544 if (!list_empty(&subflow
->node
)) {
3545 mptcp_subflow_joined(msk
, ssk
);
3546 mptcp_propagate_sndbuf(parent
, ssk
);
3550 if (!mptcp_pm_allow_new_subflow(msk
))
3551 goto err_prohibited
;
3553 /* If we can't acquire msk socket lock here, let the release callback
3556 mptcp_data_lock(parent
);
3557 if (!sock_owned_by_user(parent
)) {
3558 ret
= __mptcp_finish_join(msk
, ssk
);
3561 list_add_tail(&subflow
->node
, &msk
->conn_list
);
3565 list_add_tail(&subflow
->node
, &msk
->join_list
);
3566 __set_bit(MPTCP_FLUSH_JOIN_LIST
, &msk
->cb_flags
);
3568 mptcp_data_unlock(parent
);
3572 subflow
->reset_reason
= MPTCP_RST_EPROHIBIT
;
3579 static void mptcp_shutdown(struct sock
*sk
, int how
)
3581 pr_debug("sk=%p, how=%d", sk
, how
);
3583 if ((how
& SEND_SHUTDOWN
) && mptcp_close_state(sk
))
3584 __mptcp_wr_shutdown(sk
);
3587 static int mptcp_forward_alloc_get(const struct sock
*sk
)
3589 return READ_ONCE(sk
->sk_forward_alloc
) +
3590 READ_ONCE(mptcp_sk(sk
)->rmem_fwd_alloc
);
3593 static int mptcp_ioctl_outq(const struct mptcp_sock
*msk
, u64 v
)
3595 const struct sock
*sk
= (void *)msk
;
3598 if (sk
->sk_state
== TCP_LISTEN
)
3601 if ((1 << sk
->sk_state
) & (TCPF_SYN_SENT
| TCPF_SYN_RECV
))
3604 delta
= msk
->write_seq
- v
;
3605 if (__mptcp_check_fallback(msk
) && msk
->first
) {
3606 struct tcp_sock
*tp
= tcp_sk(msk
->first
);
3608 /* the first subflow is disconnected after close - see
3609 * __mptcp_close_ssk(). tcp_disconnect() moves the write_seq
3610 * so ignore that status, too.
3612 if (!((1 << msk
->first
->sk_state
) &
3613 (TCPF_SYN_SENT
| TCPF_SYN_RECV
| TCPF_CLOSE
)))
3614 delta
+= READ_ONCE(tp
->write_seq
) - tp
->snd_una
;
3616 if (delta
> INT_MAX
)
3622 static int mptcp_ioctl(struct sock
*sk
, int cmd
, int *karg
)
3624 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3629 if (sk
->sk_state
== TCP_LISTEN
)
3633 __mptcp_move_skbs(msk
);
3634 *karg
= mptcp_inq_hint(sk
);
3638 slow
= lock_sock_fast(sk
);
3639 *karg
= mptcp_ioctl_outq(msk
, READ_ONCE(msk
->snd_una
));
3640 unlock_sock_fast(sk
, slow
);
3643 slow
= lock_sock_fast(sk
);
3644 *karg
= mptcp_ioctl_outq(msk
, msk
->snd_nxt
);
3645 unlock_sock_fast(sk
, slow
);
3648 return -ENOIOCTLCMD
;
3654 static void mptcp_subflow_early_fallback(struct mptcp_sock
*msk
,
3655 struct mptcp_subflow_context
*subflow
)
3657 subflow
->request_mptcp
= 0;
3658 __mptcp_do_fallback(msk
);
3661 static int mptcp_connect(struct sock
*sk
, struct sockaddr
*uaddr
, int addr_len
)
3663 struct mptcp_subflow_context
*subflow
;
3664 struct mptcp_sock
*msk
= mptcp_sk(sk
);
3668 ssk
= __mptcp_nmpc_sk(msk
);
3670 return PTR_ERR(ssk
);
3672 inet_sk_state_store(sk
, TCP_SYN_SENT
);
3673 subflow
= mptcp_subflow_ctx(ssk
);
3674 #ifdef CONFIG_TCP_MD5SIG
3675 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
3678 if (rcu_access_pointer(tcp_sk(ssk
)->md5sig_info
))
3679 mptcp_subflow_early_fallback(msk
, subflow
);
3681 if (subflow
->request_mptcp
&& mptcp_token_new_connect(ssk
)) {
3682 MPTCP_INC_STATS(sock_net(ssk
), MPTCP_MIB_TOKENFALLBACKINIT
);
3683 mptcp_subflow_early_fallback(msk
, subflow
);
3685 if (likely(!__mptcp_check_fallback(msk
)))
3686 MPTCP_INC_STATS(sock_net(sk
), MPTCP_MIB_MPCAPABLEACTIVE
);
3688 /* if reaching here via the fastopen/sendmsg path, the caller already
3689 * acquired the subflow socket lock, too.
3691 if (!msk
->fastopening
)
3694 /* the following mirrors closely a very small chunk of code from
3695 * __inet_stream_connect()
3697 if (ssk
->sk_state
!= TCP_CLOSE
)
3700 if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk
)) {
3701 err
= ssk
->sk_prot
->pre_connect(ssk
, uaddr
, addr_len
);
3706 err
= ssk
->sk_prot
->connect(ssk
, uaddr
, addr_len
);
3710 inet_assign_bit(DEFER_CONNECT
, sk
, inet_test_bit(DEFER_CONNECT
, ssk
));
3713 if (!msk
->fastopening
)
3716 /* on successful connect, the msk state will be moved to established by
3717 * subflow_finish_connect()
3719 if (unlikely(err
)) {
3720 /* avoid leaving a dangling token in an unconnected socket */
3721 mptcp_token_destroy(msk
);
3722 inet_sk_state_store(sk
, TCP_CLOSE
);
3726 mptcp_copy_inaddrs(sk
, ssk
);
3730 static struct proto mptcp_prot
= {
3732 .owner
= THIS_MODULE
,
3733 .init
= mptcp_init_sock
,
3734 .connect
= mptcp_connect
,
3735 .disconnect
= mptcp_disconnect
,
3736 .close
= mptcp_close
,
3737 .accept
= mptcp_accept
,
3738 .setsockopt
= mptcp_setsockopt
,
3739 .getsockopt
= mptcp_getsockopt
,
3740 .shutdown
= mptcp_shutdown
,
3741 .destroy
= mptcp_destroy
,
3742 .sendmsg
= mptcp_sendmsg
,
3743 .ioctl
= mptcp_ioctl
,
3744 .recvmsg
= mptcp_recvmsg
,
3745 .release_cb
= mptcp_release_cb
,
3747 .unhash
= mptcp_unhash
,
3748 .get_port
= mptcp_get_port
,
3749 .forward_alloc_get
= mptcp_forward_alloc_get
,
3750 .sockets_allocated
= &mptcp_sockets_allocated
,
3752 .memory_allocated
= &tcp_memory_allocated
,
3753 .per_cpu_fw_alloc
= &tcp_memory_per_cpu_fw_alloc
,
3755 .memory_pressure
= &tcp_memory_pressure
,
3756 .sysctl_wmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_wmem
),
3757 .sysctl_rmem_offset
= offsetof(struct net
, ipv4
.sysctl_tcp_rmem
),
3758 .sysctl_mem
= sysctl_tcp_mem
,
3759 .obj_size
= sizeof(struct mptcp_sock
),
3760 .slab_flags
= SLAB_TYPESAFE_BY_RCU
,
3761 .no_autobind
= true,
3764 static int mptcp_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
3766 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
3767 struct sock
*ssk
, *sk
= sock
->sk
;
3771 ssk
= __mptcp_nmpc_sk(msk
);
3777 if (sk
->sk_family
== AF_INET
)
3778 err
= inet_bind_sk(ssk
, uaddr
, addr_len
);
3779 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
3780 else if (sk
->sk_family
== AF_INET6
)
3781 err
= inet6_bind_sk(ssk
, uaddr
, addr_len
);
3784 mptcp_copy_inaddrs(sk
, ssk
);
3791 static int mptcp_listen(struct socket
*sock
, int backlog
)
3793 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
3794 struct sock
*sk
= sock
->sk
;
3798 pr_debug("msk=%p", msk
);
3803 if (sock
->state
!= SS_UNCONNECTED
|| sock
->type
!= SOCK_STREAM
)
3806 ssk
= __mptcp_nmpc_sk(msk
);
3812 inet_sk_state_store(sk
, TCP_LISTEN
);
3813 sock_set_flag(sk
, SOCK_RCU_FREE
);
3816 err
= __inet_listen_sk(ssk
, backlog
);
3818 inet_sk_state_store(sk
, inet_sk_state_load(ssk
));
3821 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
3822 mptcp_copy_inaddrs(sk
, ssk
);
3823 mptcp_event_pm_listener(ssk
, MPTCP_EVENT_LISTENER_CREATED
);
3831 static int mptcp_stream_accept(struct socket
*sock
, struct socket
*newsock
,
3832 int flags
, bool kern
)
3834 struct mptcp_sock
*msk
= mptcp_sk(sock
->sk
);
3835 struct sock
*ssk
, *newsk
;
3838 pr_debug("msk=%p", msk
);
3840 /* Buggy applications can call accept on socket states other then LISTEN
3841 * but no need to allocate the first subflow just to error out.
3843 ssk
= READ_ONCE(msk
->first
);
3847 newsk
= mptcp_accept(ssk
, flags
, &err
, kern
);
3853 __inet_accept(sock
, newsock
, newsk
);
3854 if (!mptcp_is_tcpsk(newsock
->sk
)) {
3855 struct mptcp_sock
*msk
= mptcp_sk(newsk
);
3856 struct mptcp_subflow_context
*subflow
;
3858 set_bit(SOCK_CUSTOM_SOCKOPT
, &newsock
->flags
);
3859 msk
->in_accept_queue
= 0;
3861 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
3862 * This is needed so NOSPACE flag can be set from tcp stack.
3864 mptcp_for_each_subflow(msk
, subflow
) {
3865 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
3867 if (!ssk
->sk_socket
)
3868 mptcp_sock_graft(ssk
, newsock
);
3871 /* Do late cleanup for the first subflow as necessary. Also
3872 * deal with bad peers not doing a complete shutdown.
3874 if (unlikely(inet_sk_state_load(msk
->first
) == TCP_CLOSE
)) {
3875 __mptcp_close_ssk(newsk
, msk
->first
,
3876 mptcp_subflow_ctx(msk
->first
), 0);
3877 if (unlikely(list_is_singular(&msk
->conn_list
)))
3878 inet_sk_state_store(newsk
, TCP_CLOSE
);
3881 release_sock(newsk
);
3886 static __poll_t
mptcp_check_writeable(struct mptcp_sock
*msk
)
3888 struct sock
*sk
= (struct sock
*)msk
;
3890 if (sk_stream_is_writeable(sk
))
3891 return EPOLLOUT
| EPOLLWRNORM
;
3893 mptcp_set_nospace(sk
);
3894 smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
3895 if (sk_stream_is_writeable(sk
))
3896 return EPOLLOUT
| EPOLLWRNORM
;
3901 static __poll_t
mptcp_poll(struct file
*file
, struct socket
*sock
,
3902 struct poll_table_struct
*wait
)
3904 struct sock
*sk
= sock
->sk
;
3905 struct mptcp_sock
*msk
;
3911 sock_poll_wait(file
, sock
, wait
);
3913 state
= inet_sk_state_load(sk
);
3914 pr_debug("msk=%p state=%d flags=%lx", msk
, state
, msk
->flags
);
3915 if (state
== TCP_LISTEN
) {
3916 struct sock
*ssk
= READ_ONCE(msk
->first
);
3918 if (WARN_ON_ONCE(!ssk
))
3921 return inet_csk_listen_poll(ssk
);
3924 shutdown
= READ_ONCE(sk
->sk_shutdown
);
3925 if (shutdown
== SHUTDOWN_MASK
|| state
== TCP_CLOSE
)
3927 if (shutdown
& RCV_SHUTDOWN
)
3928 mask
|= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
3930 if (state
!= TCP_SYN_SENT
&& state
!= TCP_SYN_RECV
) {
3931 mask
|= mptcp_check_readable(sk
);
3932 if (shutdown
& SEND_SHUTDOWN
)
3933 mask
|= EPOLLOUT
| EPOLLWRNORM
;
3935 mask
|= mptcp_check_writeable(msk
);
3936 } else if (state
== TCP_SYN_SENT
&&
3937 inet_test_bit(DEFER_CONNECT
, sk
)) {
3938 /* cf tcp_poll() note about TFO */
3939 mask
|= EPOLLOUT
| EPOLLWRNORM
;
3942 /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
3944 if (READ_ONCE(sk
->sk_err
))
3950 static const struct proto_ops mptcp_stream_ops
= {
3952 .owner
= THIS_MODULE
,
3953 .release
= inet_release
,
3955 .connect
= inet_stream_connect
,
3956 .socketpair
= sock_no_socketpair
,
3957 .accept
= mptcp_stream_accept
,
3958 .getname
= inet_getname
,
3960 .ioctl
= inet_ioctl
,
3961 .gettstamp
= sock_gettstamp
,
3962 .listen
= mptcp_listen
,
3963 .shutdown
= inet_shutdown
,
3964 .setsockopt
= sock_common_setsockopt
,
3965 .getsockopt
= sock_common_getsockopt
,
3966 .sendmsg
= inet_sendmsg
,
3967 .recvmsg
= inet_recvmsg
,
3968 .mmap
= sock_no_mmap
,
3969 .set_rcvlowat
= mptcp_set_rcvlowat
,
3972 static struct inet_protosw mptcp_protosw
= {
3973 .type
= SOCK_STREAM
,
3974 .protocol
= IPPROTO_MPTCP
,
3975 .prot
= &mptcp_prot
,
3976 .ops
= &mptcp_stream_ops
,
3977 .flags
= INET_PROTOSW_ICSK
,
3980 static int mptcp_napi_poll(struct napi_struct
*napi
, int budget
)
3982 struct mptcp_delegated_action
*delegated
;
3983 struct mptcp_subflow_context
*subflow
;
3986 delegated
= container_of(napi
, struct mptcp_delegated_action
, napi
);
3987 while ((subflow
= mptcp_subflow_delegated_next(delegated
)) != NULL
) {
3988 struct sock
*ssk
= mptcp_subflow_tcp_sock(subflow
);
3990 bh_lock_sock_nested(ssk
);
3991 if (!sock_owned_by_user(ssk
)) {
3992 mptcp_subflow_process_delegated(ssk
, xchg(&subflow
->delegated_status
, 0));
3994 /* tcp_release_cb_override already processed
3995 * the action or will do at next release_sock().
3996 * In both case must dequeue the subflow here - on the same
3997 * CPU that scheduled it.
4000 clear_bit(MPTCP_DELEGATE_SCHEDULED
, &subflow
->delegated_status
);
4002 bh_unlock_sock(ssk
);
4005 if (++work_done
== budget
)
4009 /* always provide a 0 'work_done' argument, so that napi_complete_done
4010 * will not try accessing the NULL napi->dev ptr
4012 napi_complete_done(napi
, 0);
4016 void __init
mptcp_proto_init(void)
4018 struct mptcp_delegated_action
*delegated
;
4021 mptcp_prot
.h
.hashinfo
= tcp_prot
.h
.hashinfo
;
4023 if (percpu_counter_init(&mptcp_sockets_allocated
, 0, GFP_KERNEL
))
4024 panic("Failed to allocate MPTCP pcpu counter\n");
4026 init_dummy_netdev(&mptcp_napi_dev
);
4027 for_each_possible_cpu(cpu
) {
4028 delegated
= per_cpu_ptr(&mptcp_delegated_actions
, cpu
);
4029 INIT_LIST_HEAD(&delegated
->head
);
4030 netif_napi_add_tx(&mptcp_napi_dev
, &delegated
->napi
,
4032 napi_enable(&delegated
->napi
);
4035 mptcp_subflow_init();
4040 if (proto_register(&mptcp_prot
, 1) != 0)
4041 panic("Failed to register MPTCP proto.\n");
4043 inet_register_protosw(&mptcp_protosw
);
4045 BUILD_BUG_ON(sizeof(struct mptcp_skb_cb
) > sizeof_field(struct sk_buff
, cb
));
4048 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
4049 static const struct proto_ops mptcp_v6_stream_ops
= {
4051 .owner
= THIS_MODULE
,
4052 .release
= inet6_release
,
4054 .connect
= inet_stream_connect
,
4055 .socketpair
= sock_no_socketpair
,
4056 .accept
= mptcp_stream_accept
,
4057 .getname
= inet6_getname
,
4059 .ioctl
= inet6_ioctl
,
4060 .gettstamp
= sock_gettstamp
,
4061 .listen
= mptcp_listen
,
4062 .shutdown
= inet_shutdown
,
4063 .setsockopt
= sock_common_setsockopt
,
4064 .getsockopt
= sock_common_getsockopt
,
4065 .sendmsg
= inet6_sendmsg
,
4066 .recvmsg
= inet6_recvmsg
,
4067 .mmap
= sock_no_mmap
,
4068 #ifdef CONFIG_COMPAT
4069 .compat_ioctl
= inet6_compat_ioctl
,
4071 .set_rcvlowat
= mptcp_set_rcvlowat
,
4074 static struct proto mptcp_v6_prot
;
4076 static struct inet_protosw mptcp_v6_protosw
= {
4077 .type
= SOCK_STREAM
,
4078 .protocol
= IPPROTO_MPTCP
,
4079 .prot
= &mptcp_v6_prot
,
4080 .ops
= &mptcp_v6_stream_ops
,
4081 .flags
= INET_PROTOSW_ICSK
,
4084 int __init
mptcp_proto_v6_init(void)
4088 mptcp_v6_prot
= mptcp_prot
;
4089 strcpy(mptcp_v6_prot
.name
, "MPTCPv6");
4090 mptcp_v6_prot
.slab
= NULL
;
4091 mptcp_v6_prot
.obj_size
= sizeof(struct mptcp6_sock
);
4092 mptcp_v6_prot
.ipv6_pinfo_offset
= offsetof(struct mptcp6_sock
, np
);
4094 err
= proto_register(&mptcp_v6_prot
, 1);
4098 err
= inet6_register_protosw(&mptcp_v6_protosw
);
4100 proto_unregister(&mptcp_v6_prot
);