]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
tcp: drop secpath at the same time as we currently drop dst
authorSabrina Dubroca <sd@queasysnail.net>
Mon, 17 Feb 2025 10:23:35 +0000 (11:23 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 7 Mar 2025 15:56:36 +0000 (16:56 +0100)
[ Upstream commit 9b6412e6979f6f9e0632075f8f008937b5cd4efd ]

Xiumei reported hitting the WARN in xfrm6_tunnel_net_exit while
running tests that boil down to:
 - create a pair of netns
 - run a basic TCP test over ipcomp6
 - delete the pair of netns

The xfrm_state found on spi_byaddr was not deleted at the time we
delete the netns, because we still have a reference on it. This
lingering reference comes from a secpath (which holds a ref on the
xfrm_state), which is still attached to an skb. This skb is not
leaked, it ends up on sk_receive_queue and then gets defer-free'd by
skb_attempt_defer_free.

The problem happens when we defer freeing an skb (push it on one CPU's
defer_list), and don't flush that list before the netns is deleted. In
that case, we still have a reference on the xfrm_state that we don't
expect at this point.

We already drop the skb's dst in the TCP receive path when it's no
longer needed, so let's also drop the secpath. At this point,
tcp_filter has already called into the LSM hooks that may require the
secpath, so it should not be needed anymore. However, in some of those
places, the MPTCP extension has just been attached to the skb, so we
cannot simply drop all extensions.

Fixes: 68822bdf76f1 ("net: generalize skb freeing deferral to per-cpu lists")
Reported-by: Xiumei Mu <xmu@redhat.com>
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/5055ba8f8f72bdcb602faa299faca73c280b7735.1739743613.git.sd@queasysnail.net
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
include/net/tcp.h
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c

index a770210fda9bc1b3ca9f477ce525f50e46e14e7f..14a00cdd31f42a0fafd8798f4b6a2898fc378c95 100644 (file)
@@ -40,6 +40,7 @@
 #include <net/inet_ecn.h>
 #include <net/dst.h>
 #include <net/mptcp.h>
+#include <net/xfrm.h>
 
 #include <linux/seq_file.h>
 #include <linux/memcontrol.h>
@@ -640,6 +641,19 @@ void tcp_fin(struct sock *sk);
 void tcp_check_space(struct sock *sk);
 void tcp_sack_compress_send_ack(struct sock *sk);
 
+static inline void tcp_cleanup_skb(struct sk_buff *skb)
+{
+       skb_dst_drop(skb);
+       secpath_reset(skb);
+}
+
+static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
+{
+       DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
+       DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
+       __skb_queue_tail(&sk->sk_receive_queue, skb);
+}
+
 /* tcp_timer.c */
 void tcp_init_xmit_timers(struct sock *);
 static inline void tcp_clear_xmit_timers(struct sock *sk)
index d0b7ded591bd463bae725c08989ec08c551602b5..cb01c770d8cf515a7fe06f48b9ea6a952990a175 100644 (file)
@@ -178,7 +178,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
        if (!skb)
                return;
 
-       skb_dst_drop(skb);
+       tcp_cleanup_skb(skb);
        /* segs_in has been initialized to 1 in tcp_create_openreq_child().
         * Hence, reset segs_in to 0 before calling tcp_segs_in()
         * to avoid double counting.  Also, tcp_segs_in() expects
@@ -195,7 +195,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
        TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
 
        tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-       __skb_queue_tail(&sk->sk_receive_queue, skb);
+       tcp_add_receive_queue(sk, skb);
        tp->syn_data_acked = 1;
 
        /* u64_stats_update_begin(&tp->syncp) not needed here,
index 2379ee5511645ff139f07a30a8bbdf5dba873081..3b81f6df829ffbb1e3f0530f5cd4c9798b35786b 100644 (file)
@@ -4836,7 +4836,7 @@ static void tcp_ofo_queue(struct sock *sk)
                tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
                fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
                if (!eaten)
-                       __skb_queue_tail(&sk->sk_receive_queue, skb);
+                       tcp_add_receive_queue(sk, skb);
                else
                        kfree_skb_partial(skb, fragstolen);
 
@@ -5027,7 +5027,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
                                  skb, fragstolen)) ? 1 : 0;
        tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
        if (!eaten) {
-               __skb_queue_tail(&sk->sk_receive_queue, skb);
+               tcp_add_receive_queue(sk, skb);
                skb_set_owner_r(skb, sk);
        }
        return eaten;
@@ -5110,7 +5110,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
                __kfree_skb(skb);
                return;
        }
-       skb_dst_drop(skb);
+       tcp_cleanup_skb(skb);
        __skb_pull(skb, tcp_hdr(skb)->doff * 4);
 
        reason = SKB_DROP_REASON_NOT_SPECIFIED;
@@ -6041,7 +6041,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
                        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
                        /* Bulk data transfer: receiver */
-                       skb_dst_drop(skb);
+                       tcp_cleanup_skb(skb);
                        __skb_pull(skb, tcp_header_len);
                        eaten = tcp_queue_rcv(sk, skb, &fragstolen);
 
index 805b1a9eca1c5613cfda8d256ae78c7b4c9d13e2..7647f1ec0584e3242583cf8298c9123d9dfb4ddf 100644 (file)
@@ -1791,7 +1791,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
         */
        skb_condense(skb);
 
-       skb_dst_drop(skb);
+       tcp_cleanup_skb(skb);
 
        if (unlikely(tcp_checksum_complete(skb))) {
                bh_unlock_sock(sk);