{
struct mptcp_sock *msk = mptcp_sk(sk);
struct sk_buff *tail = NULL;
+ struct sock *ssk = skb->sk;
bool fragstolen;
int delta;
tail = list_last_entry(&msk->backlog_list, struct sk_buff, list);
if (tail && MPTCP_SKB_CB(skb)->map_seq == MPTCP_SKB_CB(tail)->end_seq &&
- skb->sk == tail->sk &&
+ ssk == tail->sk &&
__mptcp_try_coalesce(sk, tail, skb, &fragstolen, &delta)) {
skb->truesize -= delta;
kfree_skb_partial(skb, fragstolen);
__mptcp_subflow_lend_fwdmem(subflow, delta);
- WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta);
- return;
+ goto account;
}
list_add_tail(&skb->list, &msk->backlog_list);
mptcp_subflow_lend_fwdmem(subflow, skb);
- WRITE_ONCE(msk->backlog_len, msk->backlog_len + skb->truesize);
+ delta = skb->truesize;
+
+account:
+ WRITE_ONCE(msk->backlog_len, msk->backlog_len + delta);
+
+ /* Possibly not accept()ed yet, keep track of memory not CG
+ * accounted, mptcp_graft_subflows() will handle it.
+ */
+ if (!mem_cgroup_from_sk(ssk))
+ msk->backlog_unaccounted += delta;
}
static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
- struct sock *ssk)
+ struct sock *ssk, bool own_msk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = (struct sock *)msk;
struct sk_buff *skb;
bool fin;
- if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
- break;
-
/* try to move as much data as available */
map_remaining = subflow->map_data_len -
mptcp_subflow_get_map_offset(subflow);
mptcp_init_skb(ssk, skb, offset, len);
- if (true) {
+ if (own_msk && sk_rmem_alloc_get(sk) < sk->sk_rcvbuf) {
mptcp_subflow_lend_fwdmem(subflow, skb);
ret |= __mptcp_move_skb(sk, skb);
} else {
struct sock *sk = (struct sock *)msk;
bool moved;
- moved = __mptcp_move_skbs_from_subflow(msk, ssk);
+ moved = __mptcp_move_skbs_from_subflow(msk, ssk, true);
__mptcp_ofo_queue(msk);
if (unlikely(ssk->sk_err))
__mptcp_subflow_error_report(sk, ssk);
if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk))
sk->sk_data_ready(sk);
} else {
- __set_bit(MPTCP_DEQUEUE, &mptcp_sk(sk)->cb_flags);
+ __mptcp_move_skbs_from_subflow(msk, ssk, false);
}
mptcp_data_unlock(sk);
}
msk->rcvq_space.time = mstamp;
}
-static struct mptcp_subflow_context *
-__mptcp_first_ready_from(struct mptcp_sock *msk,
- struct mptcp_subflow_context *subflow)
+static bool __mptcp_move_skbs(struct sock *sk, struct list_head *skbs, u32 *delta)
{
- struct mptcp_subflow_context *start_subflow = subflow;
+ struct sk_buff *skb = list_first_entry(skbs, struct sk_buff, list);
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ bool moved = false;
+
+ *delta = 0;
+ while (1) {
+ /* If the msk recvbuf is full stop, don't drop */
+ if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
+ break;
- while (!READ_ONCE(subflow->data_avail)) {
- subflow = mptcp_next_subflow(msk, subflow);
- if (subflow == start_subflow)
- return NULL;
+ prefetch(skb->next);
+ list_del(&skb->list);
+ *delta += skb->truesize;
+
+ moved |= __mptcp_move_skb(sk, skb);
+ if (list_empty(skbs))
+ break;
+
+ skb = list_first_entry(skbs, struct sk_buff, list);
}
- return subflow;
+
+ __mptcp_ofo_queue(msk);
+ if (moved)
+ mptcp_check_data_fin((struct sock *)msk);
+ return moved;
}
-static bool __mptcp_move_skbs(struct sock *sk)
+static bool mptcp_can_spool_backlog(struct sock *sk, struct list_head *skbs)
{
- struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
- bool ret = false;
- if (list_empty(&msk->conn_list))
+ /* After CG initialization, subflows should never add skb before
+ * gaining the CG themself.
+ */
+ DEBUG_NET_WARN_ON_ONCE(msk->backlog_unaccounted && sk->sk_socket &&
+ mem_cgroup_from_sk(sk));
+
+ /* Don't spool the backlog if the rcvbuf is full. */
+ if (list_empty(&msk->backlog_list) ||
+ sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
return false;
- subflow = list_first_entry(&msk->conn_list,
- struct mptcp_subflow_context, node);
- for (;;) {
- struct sock *ssk;
- bool slowpath;
+ INIT_LIST_HEAD(skbs);
+ list_splice_init(&msk->backlog_list, skbs);
+ return true;
+}
- /*
- * As an optimization avoid traversing the subflows list
- * and ev. acquiring the subflow socket lock before baling out
- */
- if (sk_rmem_alloc_get(sk) > sk->sk_rcvbuf)
- break;
+static void mptcp_backlog_spooled(struct sock *sk, u32 moved,
+ struct list_head *skbs)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
- subflow = __mptcp_first_ready_from(msk, subflow);
- if (!subflow)
- break;
+ WRITE_ONCE(msk->backlog_len, msk->backlog_len - moved);
+ list_splice(skbs, &msk->backlog_list);
+}
- ssk = mptcp_subflow_tcp_sock(subflow);
- slowpath = lock_sock_fast(ssk);
- ret = __mptcp_move_skbs_from_subflow(msk, ssk) || ret;
- if (unlikely(ssk->sk_err))
- __mptcp_error_report(sk);
- unlock_sock_fast(ssk, slowpath);
+static bool mptcp_move_skbs(struct sock *sk)
+{
+ struct list_head skbs;
+ bool enqueued = false;
+ u32 moved;
- subflow = mptcp_next_subflow(msk, subflow);
- }
+ mptcp_data_lock(sk);
+ while (mptcp_can_spool_backlog(sk, &skbs)) {
+ mptcp_data_unlock(sk);
+ enqueued |= __mptcp_move_skbs(sk, &skbs, &moved);
- __mptcp_ofo_queue(msk);
- if (ret)
- mptcp_check_data_fin((struct sock *)msk);
- return ret;
+ mptcp_data_lock(sk);
+ mptcp_backlog_spooled(sk, moved, &skbs);
+ }
+ mptcp_data_unlock(sk);
+ return enqueued;
}
static unsigned int mptcp_inq_hint(const struct sock *sk)
copied += bytes_read;
- if (skb_queue_empty(&sk->sk_receive_queue) && __mptcp_move_skbs(sk))
+ if (!list_empty(&msk->backlog_list) && mptcp_move_skbs(sk))
continue;
/* only the MPTCP socket status is relevant here. The exit
#define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
BIT(MPTCP_RETRANSMIT) | \
- BIT(MPTCP_FLUSH_JOIN_LIST) | \
- BIT(MPTCP_DEQUEUE))
+ BIT(MPTCP_FLUSH_JOIN_LIST))
/* processes deferred events and flush wmem */
static void mptcp_release_cb(struct sock *sk)
for (;;) {
unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
- struct list_head join_list;
+ struct list_head join_list, skbs;
+ bool spool_bl;
+ u32 moved;
- if (!flags)
+ spool_bl = mptcp_can_spool_backlog(sk, &skbs);
+ if (!flags && !spool_bl)
break;
INIT_LIST_HEAD(&join_list);
__mptcp_push_pending(sk, 0);
if (flags & BIT(MPTCP_RETRANSMIT))
__mptcp_retrans(sk);
- if ((flags & BIT(MPTCP_DEQUEUE)) && __mptcp_move_skbs(sk)) {
+ if (spool_bl && __mptcp_move_skbs(sk, &skbs, &moved)) {
/* notify ack seq update */
mptcp_cleanup_rbuf(msk, 0);
sk->sk_data_ready(sk);
cond_resched();
spin_lock_bh(&sk->sk_lock.slock);
+ if (spool_bl)
+ mptcp_backlog_spooled(sk, moved, &skbs);
}
if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
return -EINVAL;
lock_sock(sk);
- if (__mptcp_move_skbs(sk))
+ if (mptcp_move_skbs(sk))
mptcp_cleanup_rbuf(msk, 0);
*karg = mptcp_inq_hint(sk);
release_sock(sk);
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
+ if (mem_cgroup_sockets_enabled) {
+ LIST_HEAD(join_list);
+
+ /* Subflows joining after __inet_accept() will get the
+ * mem CG properly initialized at mptcp_finish_join() time,
+ * but subflows pending in join_list need explicit
+ * initialization before flushing `backlog_unaccounted`
+ * or MPTCP can later unexpectedly observe unaccounted memory.
+ */
+ mptcp_data_lock(sk);
+ list_splice_init(&msk->join_list, &join_list);
+ mptcp_data_unlock(sk);
+
+ __mptcp_flush_join_list(sk, &join_list);
+ }
+
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
if (!ssk->sk_socket)
mptcp_sock_graft(ssk, sk->sk_socket);
+ if (!mem_cgroup_sk_enabled(sk))
+ goto unlock;
+
__mptcp_inherit_cgrp_data(sk, ssk);
__mptcp_inherit_memcg(sk, ssk, GFP_KERNEL);
+
+unlock:
release_sock(ssk);
}
+
+ if (mem_cgroup_sk_enabled(sk)) {
+ gfp_t gfp = GFP_KERNEL | __GFP_NOFAIL;
+ int amt;
+
+ /* Account the backlog memory; prior accept() is aware of
+ * fwd and rmem only.
+ */
+ mptcp_data_lock(sk);
+ amt = sk_mem_pages(sk->sk_forward_alloc +
+ msk->backlog_unaccounted +
+ atomic_read(&sk->sk_rmem_alloc)) -
+ sk_mem_pages(sk->sk_forward_alloc +
+ atomic_read(&sk->sk_rmem_alloc));
+ msk->backlog_unaccounted = 0;
+ mptcp_data_unlock(sk);
+
+ if (amt)
+ mem_cgroup_sk_charge(sk, amt, gfp);
+ }
}
static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,