return copied;
}
+static void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
+{
+ const struct tcp_sock *tp = tcp_sk(ssk);
+
+ msk->rcvspace_init = 1;
+ msk->rcvq_space.copied = 0;
+ msk->rcvq_space.rtt_us = 0;
+
+ /* initial rcv_space offering made to peer */
+ msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
+ TCP_INIT_CWND * tp->advmss);
+ if (msk->rcvq_space.space == 0)
+ msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
+}
+
/* receive buffer autotuning. See tcp_rcv_space_adjust for more information.
*
* Only difference: Use highest rtt estimate of the subflows in use.
return nsk;
}
-void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
-{
- const struct tcp_sock *tp = tcp_sk(ssk);
-
- msk->rcvspace_init = 1;
- msk->rcvq_space.copied = 0;
- msk->rcvq_space.rtt_us = 0;
-
- /* initial rcv_space offering made to peer */
- msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
- TCP_INIT_CWND * tp->advmss);
- if (msk->rcvq_space.space == 0)
- msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
-}
-
static void mptcp_destroy(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
}
-void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk);
void mptcp_data_ready(struct sock *sk, struct sock *ssk);
bool mptcp_finish_join(struct sock *sk);
bool mptcp_schedule_work(struct sock *sk);
subflow = mptcp_subflow_ctx(ssk);
__mptcp_propagate_sndbuf(sk, ssk);
- if (!msk->rcvspace_init)
- mptcp_rcv_space_init(msk, ssk);
if (sk->sk_state == TCP_SYN_SENT) {
/* subflow->idsn is always available is TCP_SYN_SENT state,