return moved;
}
+static void mptcp_rcv_rtt_update(struct mptcp_sock *msk,
+ struct mptcp_subflow_context *subflow)
+{
+ const struct tcp_sock *tp = tcp_sk(subflow->tcp_sock);
+ u32 rtt_us = tp->rcv_rtt_est.rtt_us;
+ int id;
+
+ /* Update once per subflow per rcvwnd to avoid touching the msk
+ * too often.
+ */
+ if (!rtt_us || tp->rcv_rtt_est.seq == subflow->prev_rtt_seq)
+ return;
+
+ subflow->prev_rtt_seq = tp->rcv_rtt_est.seq;
+
+ /* Pairs with READ_ONCE() in mptcp_rtt_us_est(). */
+ id = msk->rcv_rtt_est.next_sample;
+ WRITE_ONCE(msk->rcv_rtt_est.samples[id], rtt_us);
+ if (++msk->rcv_rtt_est.next_sample == MPTCP_RTT_SAMPLES)
+ msk->rcv_rtt_est.next_sample = 0;
+
+ /* EWMA among the incoming subflows */
+ msk->scaling_ratio = ((msk->scaling_ratio << 3) - msk->scaling_ratio +
+ tp->scaling_ratio) >> 3;
+}
+
void mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
return;
mptcp_data_lock(sk);
+ mptcp_rcv_rtt_update(msk, subflow);
if (!sock_owned_by_user(sk)) {
/* Wake-up the reader only for in-sequence data */
if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
msk->rcvspace_init = 1;
msk->rcvq_space.copied = 0;
- msk->rcvq_space.rtt_us = 0;
/* initial rcv_space offering made to peer */
msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
/* receive buffer autotuning. See tcp_rcv_space_adjust for more information.
*
- * Only difference: Use highest rtt estimate of the subflows in use.
+ * Only difference: Use lowest rtt estimate of the subflows in use, see
+ * mptcp_rcv_rtt_update() and mptcp_rtt_us_est().
*/
static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
{
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
- u8 scaling_ratio = U8_MAX;
- u32 time, advmss = 1;
- u64 rtt_us, mstamp;
+ u32 time, rtt_us;
+ u64 mstamp;
msk_owned_by_me(msk);
mstamp = mptcp_stamp();
time = tcp_stamp_us_delta(mstamp, READ_ONCE(msk->rcvq_space.time));
- rtt_us = msk->rcvq_space.rtt_us;
- if (rtt_us && time < (rtt_us >> 3))
- return;
-
- rtt_us = 0;
- mptcp_for_each_subflow(msk, subflow) {
- const struct tcp_sock *tp;
- u64 sf_rtt_us;
- u32 sf_advmss;
-
- tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
-
- sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
- sf_advmss = READ_ONCE(tp->advmss);
-
- rtt_us = max(sf_rtt_us, rtt_us);
- advmss = max(sf_advmss, advmss);
- scaling_ratio = min(tp->scaling_ratio, scaling_ratio);
- }
-
- msk->rcvq_space.rtt_us = rtt_us;
- msk->scaling_ratio = scaling_ratio;
- if (time < (rtt_us >> 3) || rtt_us == 0)
+ rtt_us = mptcp_rtt_us_est(msk);
+ if (rtt_us == U32_MAX || time < (rtt_us >> 3))
return;
if (msk->rcvq_space.copied <= msk->rcvq_space.space)
msk->timer_ival = TCP_RTO_MIN;
msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
msk->backlog_len = 0;
+ mptcp_init_rtt_est(msk);
WRITE_ONCE(msk->first, NULL);
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
msk->bytes_retrans = 0;
msk->rcvspace_init = 0;
msk->fastclosing = 0;
+ mptcp_init_rtt_est(msk);
/* for fallback's sake */
WRITE_ONCE(msk->ack_seq, 0);
struct page *page;
};
+/* Arbitrary compromise between as low as possible to react timely to subflow
+ * close event and as big as possible to avoid being fouled by biased large
+ * samples due to peer sending data on a different subflow WRT to the incoming
+ * ack.
+ */
+#define MPTCP_RTT_SAMPLES 5
+
/* MPTCP connection sock */
struct mptcp_sock {
/* inet_connection_sock must be the first member */
*/
struct mptcp_pm_data pm;
struct mptcp_sched_ops *sched;
+
+ /* Most recent rtt_us observed by in use incoming subflows. */
+ struct {
+ u32 samples[MPTCP_RTT_SAMPLES];
+ u32 next_sample;
+ } rcv_rtt_est;
+
struct {
int space; /* bytes copied in last measurement window */
int copied; /* bytes copied in this measurement window */
u64 time; /* start time of measurement window */
- u64 rtt_us; /* last maximum rtt of subflows */
} rcvq_space;
u8 scaling_ratio;
bool allow_subflows;
return msk->first_pending;
}
+static inline void mptcp_init_rtt_est(struct mptcp_sock *msk)
+{
+ int i;
+
+ for (i = 0; i < MPTCP_RTT_SAMPLES; ++i)
+ msk->rcv_rtt_est.samples[i] = U32_MAX;
+ msk->rcv_rtt_est.next_sample = 0;
+ msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
+}
+
+static inline u32 mptcp_rtt_us_est(const struct mptcp_sock *msk)
+{
+ u32 rtt_us = READ_ONCE(msk->rcv_rtt_est.samples[0]);
+ int i;
+
+ /* Lockless access of collected samples. */
+ for (i = 1; i < MPTCP_RTT_SAMPLES; ++i)
+ rtt_us = min(rtt_us, READ_ONCE(msk->rcv_rtt_est.samples[i]));
+ return rtt_us;
+}
+
static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
u32 map_data_len;
__wsum map_data_csum;
u32 map_csum_len;
+ u32 prev_rtt_seq;
u32 request_mptcp : 1, /* send MP_CAPABLE */
request_join : 1, /* send MP_JOIN */
request_bkup : 1,