From: Greg Kroah-Hartman Date: Wed, 7 Jun 2023 19:55:54 +0000 (+0200) Subject: drop some unneeded 5.10 and 5.15 network patches X-Git-Tag: v4.14.317~5 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=695c4b774dea3dc0a3fc13f392e89bc80fc745fb;p=thirdparty%2Fkernel%2Fstable-queue.git drop some unneeded 5.10 and 5.15 network patches --- diff --git a/queue-5.10/series b/queue-5.10/series index 82409a8e789..1123173ab97 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -24,9 +24,6 @@ net-sched-reserve-tc_h_ingress-tc_h_clsact-for-ingre.patch net-sched-prohibit-regrafting-ingress-or-clsact-qdis.patch net-sched-fix-null-pointer-dereference-in-mq_attach.patch net-netlink-fix-netlink_list_memberships-length-repo.patch -sock-expose-so_timestamp-options-for-mptcp.patch -sock-expose-so_timestamping-options-for-mptcp.patch -tcp-remove-sk_-tr-x_skb_cache.patch udp6-fix-race-condition-in-udp6_sendmsg-connect.patch net-mlx5-read-embedded-cpu-after-init-bit-cleared.patch net-sched-flower-fix-possible-oob-write-in-fl_set_ge.patch diff --git a/queue-5.10/sock-expose-so_timestamp-options-for-mptcp.patch b/queue-5.10/sock-expose-so_timestamp-options-for-mptcp.patch deleted file mode 100644 index 8387c2ff389..00000000000 --- a/queue-5.10/sock-expose-so_timestamp-options-for-mptcp.patch +++ /dev/null @@ -1,87 +0,0 @@ -From 96be4aecebc993609750ce66ca31fc6396932e5d Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Thu, 3 Jun 2021 16:24:27 -0700 -Subject: sock: expose so_timestamp options for mptcp - -From: Florian Westphal - -[ Upstream commit 371087aa476ab0ac0072303ac94a3bba2d7b0a1d ] - -This exports SO_TIMESTAMP_* function for re-use by MPTCP. - -Without this there is too much copy & paste needed to support -this from mptcp setsockopt path. - -Acked-by: Paolo Abeni -Signed-off-by: Florian Westphal -Signed-off-by: Mat Martineau -Signed-off-by: David S. Miller -Stable-dep-of: 448a5ce1120c ("udp6: Fix race condition in udp6_sendmsg & connect") -Signed-off-by: Sasha Levin ---- - include/net/sock.h | 1 + - net/core/sock.c | 26 +++++++++++++++++++------- - 2 files changed, 20 insertions(+), 7 deletions(-) - -diff --git a/include/net/sock.h b/include/net/sock.h -index 3da0601b573ed..09cd879d0dda1 100644 ---- a/include/net/sock.h -+++ b/include/net/sock.h -@@ -2775,6 +2775,7 @@ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif) - void sock_def_readable(struct sock *sk); - - int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk); -+void sock_set_timestamp(struct sock *sk, int optname, bool valbool); - void sock_enable_timestamps(struct sock *sk); - void sock_no_linger(struct sock *sk); - void sock_set_keepalive(struct sock *sk); -diff --git a/net/core/sock.c b/net/core/sock.c -index c5ae520d4a69c..7d421b0f863f9 100644 ---- a/net/core/sock.c -+++ b/net/core/sock.c -@@ -768,6 +768,24 @@ void sock_enable_timestamps(struct sock *sk) - } - EXPORT_SYMBOL(sock_enable_timestamps); - -+void sock_set_timestamp(struct sock *sk, int optname, bool valbool) -+{ -+ switch (optname) { -+ case SO_TIMESTAMP_OLD: -+ __sock_set_timestamps(sk, valbool, false, false); -+ break; -+ case SO_TIMESTAMP_NEW: -+ __sock_set_timestamps(sk, valbool, true, false); -+ break; -+ case SO_TIMESTAMPNS_OLD: -+ __sock_set_timestamps(sk, valbool, false, true); -+ break; -+ case SO_TIMESTAMPNS_NEW: -+ __sock_set_timestamps(sk, valbool, true, true); -+ break; -+ } -+} -+ - void sock_set_keepalive(struct sock *sk) - { - lock_sock(sk); -@@ -989,16 +1007,10 @@ int sock_setsockopt(struct socket *sock, int level, int optname, - break; - - case SO_TIMESTAMP_OLD: -- __sock_set_timestamps(sk, valbool, false, false); -- break; - case SO_TIMESTAMP_NEW: -- __sock_set_timestamps(sk, valbool, true, false); -- break; - case SO_TIMESTAMPNS_OLD: -- __sock_set_timestamps(sk, valbool, false, true); -- break; - case SO_TIMESTAMPNS_NEW: -- __sock_set_timestamps(sk, valbool, true, true); -+ sock_set_timestamp(sk, valbool, optname); - break; - case SO_TIMESTAMPING_NEW: - case SO_TIMESTAMPING_OLD: --- -2.39.2 - diff --git a/queue-5.10/sock-expose-so_timestamping-options-for-mptcp.patch b/queue-5.10/sock-expose-so_timestamping-options-for-mptcp.patch deleted file mode 100644 index 0f246c5b840..00000000000 --- a/queue-5.10/sock-expose-so_timestamping-options-for-mptcp.patch +++ /dev/null @@ -1,130 +0,0 @@ -From 1022e0aacca2382b0b688397a993c595fa9a3a71 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Thu, 3 Jun 2021 16:24:28 -0700 -Subject: sock: expose so_timestamping options for mptcp - -From: Florian Westphal - -[ Upstream commit ced122d90f52eb6ff37272e32941845d46ac64c6 ] - -Similar to previous patch: expose SO_TIMESTAMPING helper so we do not -have to copy & paste this into the mptcp core. - -Acked-by: Paolo Abeni -Signed-off-by: Florian Westphal -Signed-off-by: Mat Martineau -Signed-off-by: David S. Miller -Stable-dep-of: 448a5ce1120c ("udp6: Fix race condition in udp6_sendmsg & connect") -Signed-off-by: Sasha Levin ---- - include/net/sock.h | 2 ++ - net/core/sock.c | 71 +++++++++++++++++++++++----------------------- - 2 files changed, 38 insertions(+), 35 deletions(-) - -diff --git a/include/net/sock.h b/include/net/sock.h -index 09cd879d0dda1..71bd4f6741f1e 100644 ---- a/include/net/sock.h -+++ b/include/net/sock.h -@@ -2776,6 +2776,8 @@ void sock_def_readable(struct sock *sk); - - int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk); - void sock_set_timestamp(struct sock *sk, int optname, bool valbool); -+int sock_set_timestamping(struct sock *sk, int optname, int val); -+ - void sock_enable_timestamps(struct sock *sk); - void sock_no_linger(struct sock *sk); - void sock_set_keepalive(struct sock *sk); -diff --git a/net/core/sock.c b/net/core/sock.c -index 7d421b0f863f9..b8f8252d36819 100644 ---- a/net/core/sock.c -+++ b/net/core/sock.c -@@ -786,6 +786,40 @@ void sock_set_timestamp(struct sock *sk, int optname, bool valbool) - } - } - -+int sock_set_timestamping(struct sock *sk, int optname, int val) -+{ -+ if (val & ~SOF_TIMESTAMPING_MASK) -+ return -EINVAL; -+ -+ if (val & SOF_TIMESTAMPING_OPT_ID && -+ !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { -+ if (sk->sk_protocol == IPPROTO_TCP && -+ sk->sk_type == SOCK_STREAM) { -+ if ((1 << sk->sk_state) & -+ (TCPF_CLOSE | TCPF_LISTEN)) -+ return -EINVAL; -+ sk->sk_tskey = tcp_sk(sk)->snd_una; -+ } else { -+ sk->sk_tskey = 0; -+ } -+ } -+ -+ if (val & SOF_TIMESTAMPING_OPT_STATS && -+ !(val & SOF_TIMESTAMPING_OPT_TSONLY)) -+ return -EINVAL; -+ -+ sk->sk_tsflags = val; -+ sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); -+ -+ if (val & SOF_TIMESTAMPING_RX_SOFTWARE) -+ sock_enable_timestamp(sk, -+ SOCK_TIMESTAMPING_RX_SOFTWARE); -+ else -+ sock_disable_timestamp(sk, -+ (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); -+ return 0; -+} -+ - void sock_set_keepalive(struct sock *sk) - { - lock_sock(sk); -@@ -1012,43 +1046,10 @@ int sock_setsockopt(struct socket *sock, int level, int optname, - case SO_TIMESTAMPNS_NEW: - sock_set_timestamp(sk, valbool, optname); - break; -+ - case SO_TIMESTAMPING_NEW: - case SO_TIMESTAMPING_OLD: -- if (val & ~SOF_TIMESTAMPING_MASK) { -- ret = -EINVAL; -- break; -- } -- -- if (val & SOF_TIMESTAMPING_OPT_ID && -- !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { -- if (sk->sk_protocol == IPPROTO_TCP && -- sk->sk_type == SOCK_STREAM) { -- if ((1 << sk->sk_state) & -- (TCPF_CLOSE | TCPF_LISTEN)) { -- ret = -EINVAL; -- break; -- } -- sk->sk_tskey = tcp_sk(sk)->snd_una; -- } else { -- sk->sk_tskey = 0; -- } -- } -- -- if (val & SOF_TIMESTAMPING_OPT_STATS && -- !(val & SOF_TIMESTAMPING_OPT_TSONLY)) { -- ret = -EINVAL; -- break; -- } -- -- sk->sk_tsflags = val; -- sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); -- -- if (val & SOF_TIMESTAMPING_RX_SOFTWARE) -- sock_enable_timestamp(sk, -- SOCK_TIMESTAMPING_RX_SOFTWARE); -- else -- sock_disable_timestamp(sk, -- (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); -+ ret = sock_set_timestamping(sk, optname, val); - break; - - case SO_RCVLOWAT: --- -2.39.2 - diff --git a/queue-5.10/tcp-remove-sk_-tr-x_skb_cache.patch b/queue-5.10/tcp-remove-sk_-tr-x_skb_cache.patch deleted file mode 100644 index e4f6555ec6c..00000000000 --- a/queue-5.10/tcp-remove-sk_-tr-x_skb_cache.patch +++ /dev/null @@ -1,294 +0,0 @@ -From 9057e4395d8c2432bef26bad16c64a0a376298b9 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Wed, 22 Sep 2021 19:26:43 +0200 -Subject: tcp: remove sk_{tr}x_skb_cache - -From: Eric Dumazet - -[ Upstream commit d8b81175e412c7abebdb5b37d8a84d5fd19b1aad ] - -This reverts the following patches : - -- commit 2e05fcae83c4 ("tcp: fix compile error if !CONFIG_SYSCTL") -- commit 4f661542a402 ("tcp: fix zerocopy and notsent_lowat issues") -- commit 472c2e07eef0 ("tcp: add one skb cache for tx") -- commit 8b27dae5a2e8 ("tcp: add one skb cache for rx") - -Having a cache of one skb (in each direction) per TCP socket is fragile, -since it can cause a significant increase of memory needs, -and not good enough for high speed flows anyway where more than one skb -is needed. - -We want instead to add a generic infrastructure, with more flexible -per-cpu caches, for alien NUMA nodes. - -Acked-by: Paolo Abeni -Acked-by: Mat Martineau -Signed-off-by: Eric Dumazet -Signed-off-by: David S. Miller -Stable-dep-of: 448a5ce1120c ("udp6: Fix race condition in udp6_sendmsg & connect") -Signed-off-by: Sasha Levin ---- - Documentation/networking/ip-sysctl.rst | 8 -------- - include/net/sock.h | 19 ------------------- - net/ipv4/af_inet.c | 4 ---- - net/ipv4/sysctl_net_ipv4.c | 12 ------------ - net/ipv4/tcp.c | 26 -------------------------- - net/ipv4/tcp_ipv4.c | 6 ------ - net/ipv6/tcp_ipv6.c | 6 ------ - 7 files changed, 81 deletions(-) - -diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst -index df26cf4110ef5..7a58e8c8edb24 100644 ---- a/Documentation/networking/ip-sysctl.rst -+++ b/Documentation/networking/ip-sysctl.rst -@@ -916,14 +916,6 @@ tcp_challenge_ack_limit - INTEGER - in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks) - Default: 1000 - --tcp_rx_skb_cache - BOOLEAN -- Controls a per TCP socket cache of one skb, that might help -- performance of some workloads. This might be dangerous -- on systems with a lot of TCP sockets, since it increases -- memory usage. -- -- Default: 0 (disabled) -- - UDP variables - ============= - -diff --git a/include/net/sock.h b/include/net/sock.h -index 71bd4f6741f1e..eb96f39a19f35 100644 ---- a/include/net/sock.h -+++ b/include/net/sock.h -@@ -258,7 +258,6 @@ struct bpf_local_storage; - * @sk_dst_cache: destination cache - * @sk_dst_pending_confirm: need to confirm neighbour - * @sk_policy: flow policy -- * @sk_rx_skb_cache: cache copy of recently accessed RX skb - * @sk_receive_queue: incoming packets - * @sk_wmem_alloc: transmit queue bytes committed - * @sk_tsq_flags: TCP Small Queues flags -@@ -320,7 +319,6 @@ struct bpf_local_storage; - * @sk_peek_off: current peek_offset value - * @sk_send_head: front of stuff to transmit - * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head] -- * @sk_tx_skb_cache: cache copy of recently accessed TX skb - * @sk_security: used by security modules - * @sk_mark: generic packet mark - * @sk_cgrp_data: cgroup data for this cgroup -@@ -386,7 +384,6 @@ struct sock { - atomic_t sk_drops; - int sk_rcvlowat; - struct sk_buff_head sk_error_queue; -- struct sk_buff *sk_rx_skb_cache; - struct sk_buff_head sk_receive_queue; - /* - * The backlog queue is special, it is always used with -@@ -436,7 +433,6 @@ struct sock { - struct sk_buff *sk_send_head; - struct rb_root tcp_rtx_queue; - }; -- struct sk_buff *sk_tx_skb_cache; - struct sk_buff_head sk_write_queue; - __s32 sk_peek_off; - int sk_write_pending; -@@ -1557,18 +1553,10 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) - __sk_mem_reclaim(sk, 1 << 20); - } - --DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); - static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) - { - sk_wmem_queued_add(sk, -skb->truesize); - sk_mem_uncharge(sk, skb->truesize); -- if (static_branch_unlikely(&tcp_tx_skb_cache_key) && -- !sk->sk_tx_skb_cache && !skb_cloned(skb)) { -- skb_ext_reset(skb); -- skb_zcopy_clear(skb, true); -- sk->sk_tx_skb_cache = skb; -- return; -- } - __kfree_skb(skb); - } - -@@ -2579,7 +2567,6 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) - &skb_shinfo(skb)->tskey); - } - --DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); - /** - * sk_eat_skb - Release a skb if it is no longer needed - * @sk: socket to eat this skb from -@@ -2591,12 +2578,6 @@ DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); - static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) - { - __skb_unlink(skb, &sk->sk_receive_queue); -- if (static_branch_unlikely(&tcp_rx_skb_cache_key) && -- !sk->sk_rx_skb_cache) { -- sk->sk_rx_skb_cache = skb; -- skb_orphan(skb); -- return; -- } - __kfree_skb(skb); - } - -diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c -index acb4887351daf..6a8d17c891e5e 100644 ---- a/net/ipv4/af_inet.c -+++ b/net/ipv4/af_inet.c -@@ -133,10 +133,6 @@ void inet_sock_destruct(struct sock *sk) - struct inet_sock *inet = inet_sk(sk); - - __skb_queue_purge(&sk->sk_receive_queue); -- if (sk->sk_rx_skb_cache) { -- __kfree_skb(sk->sk_rx_skb_cache); -- sk->sk_rx_skb_cache = NULL; -- } - __skb_queue_purge(&sk->sk_error_queue); - - sk_mem_reclaim(sk); -diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c -index 3a34e9768bff0..f9bdba88269aa 100644 ---- a/net/ipv4/sysctl_net_ipv4.c -+++ b/net/ipv4/sysctl_net_ipv4.c -@@ -522,18 +522,6 @@ static struct ctl_table ipv4_table[] = { - .extra1 = &sysctl_fib_sync_mem_min, - .extra2 = &sysctl_fib_sync_mem_max, - }, -- { -- .procname = "tcp_rx_skb_cache", -- .data = &tcp_rx_skb_cache_key.key, -- .mode = 0644, -- .proc_handler = proc_do_static_key, -- }, -- { -- .procname = "tcp_tx_skb_cache", -- .data = &tcp_tx_skb_cache_key.key, -- .mode = 0644, -- .proc_handler = proc_do_static_key, -- }, - { } - }; - -diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c -index 82abbf1929851..2eb044c55855f 100644 ---- a/net/ipv4/tcp.c -+++ b/net/ipv4/tcp.c -@@ -318,11 +318,6 @@ struct tcp_splice_state { - unsigned long tcp_memory_pressure __read_mostly; - EXPORT_SYMBOL_GPL(tcp_memory_pressure); - --DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); --EXPORT_SYMBOL(tcp_rx_skb_cache_key); -- --DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); -- - void tcp_enter_memory_pressure(struct sock *sk) - { - unsigned long val; -@@ -870,18 +865,6 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - { - struct sk_buff *skb; - -- if (likely(!size)) { -- skb = sk->sk_tx_skb_cache; -- if (skb) { -- skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); -- sk->sk_tx_skb_cache = NULL; -- pskb_trim(skb, 0); -- INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); -- skb_shinfo(skb)->tx_flags = 0; -- memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb)); -- return skb; -- } -- } - /* The TCP header must be at least 32-bit aligned. */ - size = ALIGN(size, 4); - -@@ -2728,11 +2711,6 @@ void tcp_write_queue_purge(struct sock *sk) - sk_wmem_free_skb(sk, skb); - } - tcp_rtx_queue_purge(sk); -- skb = sk->sk_tx_skb_cache; -- if (skb) { -- __kfree_skb(skb); -- sk->sk_tx_skb_cache = NULL; -- } - INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); - sk_mem_reclaim(sk); - tcp_clear_all_retrans_hints(tcp_sk(sk)); -@@ -2775,10 +2753,6 @@ int tcp_disconnect(struct sock *sk, int flags) - - tcp_clear_xmit_timers(sk); - __skb_queue_purge(&sk->sk_receive_queue); -- if (sk->sk_rx_skb_cache) { -- __kfree_skb(sk->sk_rx_skb_cache); -- sk->sk_rx_skb_cache = NULL; -- } - WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); - tp->urg_data = 0; - tcp_write_queue_purge(sk); -diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c -index 270b20e0907c2..2a0878179b5ec 100644 ---- a/net/ipv4/tcp_ipv4.c -+++ b/net/ipv4/tcp_ipv4.c -@@ -1944,7 +1944,6 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, - int tcp_v4_rcv(struct sk_buff *skb) - { - struct net *net = dev_net(skb->dev); -- struct sk_buff *skb_to_free; - int sdif = inet_sdif(skb); - int dif = inet_iif(skb); - const struct iphdr *iph; -@@ -2079,17 +2078,12 @@ int tcp_v4_rcv(struct sk_buff *skb) - tcp_segs_in(tcp_sk(sk), skb); - ret = 0; - if (!sock_owned_by_user(sk)) { -- skb_to_free = sk->sk_rx_skb_cache; -- sk->sk_rx_skb_cache = NULL; - ret = tcp_v4_do_rcv(sk, skb); - } else { - if (tcp_add_backlog(sk, skb)) - goto discard_and_relse; -- skb_to_free = NULL; - } - bh_unlock_sock(sk); -- if (skb_to_free) -- __kfree_skb(skb_to_free); - - put_and_return: - if (refcounted) -diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c -index fe29bc66aeac7..1d118a953abe6 100644 ---- a/net/ipv6/tcp_ipv6.c -+++ b/net/ipv6/tcp_ipv6.c -@@ -1602,7 +1602,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, - - INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) - { -- struct sk_buff *skb_to_free; - int sdif = inet6_sdif(skb); - int dif = inet6_iif(skb); - const struct tcphdr *th; -@@ -1730,17 +1729,12 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) - tcp_segs_in(tcp_sk(sk), skb); - ret = 0; - if (!sock_owned_by_user(sk)) { -- skb_to_free = sk->sk_rx_skb_cache; -- sk->sk_rx_skb_cache = NULL; - ret = tcp_v6_do_rcv(sk, skb); - } else { - if (tcp_add_backlog(sk, skb)) - goto discard_and_relse; -- skb_to_free = NULL; - } - bh_unlock_sock(sk); -- if (skb_to_free) -- __kfree_skb(skb_to_free); - put_and_return: - if (refcounted) - sock_put(sk); --- -2.39.2 - diff --git a/queue-5.10/udp6-fix-race-condition-in-udp6_sendmsg-connect.patch b/queue-5.10/udp6-fix-race-condition-in-udp6_sendmsg-connect.patch index 1040330c0cf..d2b2446e21b 100644 --- a/queue-5.10/udp6-fix-race-condition-in-udp6_sendmsg-connect.patch +++ b/queue-5.10/udp6-fix-race-condition-in-udp6_sendmsg-connect.patch @@ -35,14 +35,12 @@ Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- - net/core/sock.c | 2 +- + net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -diff --git a/net/core/sock.c b/net/core/sock.c -index b8f8252d36819..2fba329e8c7a5 100644 --- a/net/core/sock.c +++ b/net/core/sock.c -@@ -2029,7 +2029,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) +@@ -2016,7 +2016,6 @@ void sk_setup_caps(struct sock *sk, stru { u32 max_segs = 1; @@ -50,7 +48,7 @@ index b8f8252d36819..2fba329e8c7a5 100644 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps; if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; -@@ -2044,6 +2043,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) +@@ -2031,6 +2030,7 @@ void sk_setup_caps(struct sock *sk, stru } } sk->sk_gso_max_segs = max_segs; @@ -58,6 +56,3 @@ index b8f8252d36819..2fba329e8c7a5 100644 } EXPORT_SYMBOL_GPL(sk_setup_caps); --- -2.39.2 - diff --git a/queue-5.15/bpf-sockmap-use-stricter-sk-state-checks-in-sk_looku.patch b/queue-5.15/bpf-sockmap-use-stricter-sk-state-checks-in-sk_looku.patch deleted file mode 100644 index 63887d18a3e..00000000000 --- a/queue-5.15/bpf-sockmap-use-stricter-sk-state-checks-in-sk_looku.patch +++ /dev/null @@ -1,93 +0,0 @@ -From 73f99197b24d6a978a0abe975f0239f3c20c13ca Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Wed, 3 Nov 2021 13:47:32 -0700 -Subject: bpf, sockmap: Use stricter sk state checks in sk_lookup_assign - -From: John Fastabend - -[ Upstream commit 40a34121ac1dc52ed9cd34a8f4e48e32517a52fd ] - -In order to fix an issue with sockets in TCP sockmap redirect cases we plan -to allow CLOSE state sockets to exist in the sockmap. However, the check in -bpf_sk_lookup_assign() currently only invalidates sockets in the -TCP_ESTABLISHED case relying on the checks on sockmap insert to ensure we -never SOCK_CLOSE state sockets in the map. - -To prepare for this change we flip the logic in bpf_sk_lookup_assign() to -explicitly test for the accepted cases. Namely, a tcp socket in TCP_LISTEN -or a udp socket in TCP_CLOSE state. This also makes the code more resilent -to future changes. - -Suggested-by: Jakub Sitnicki -Signed-off-by: John Fastabend -Signed-off-by: Daniel Borkmann -Reviewed-by: Jakub Sitnicki -Link: https://lore.kernel.org/bpf/20211103204736.248403-2-john.fastabend@gmail.com -Stable-dep-of: 448a5ce1120c ("udp6: Fix race condition in udp6_sendmsg & connect") -Signed-off-by: Sasha Levin ---- - include/linux/skmsg.h | 12 ++++++++++++ - net/core/filter.c | 6 ++++-- - net/core/sock_map.c | 6 ------ - 3 files changed, 16 insertions(+), 8 deletions(-) - -diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h -index 6e18ca234f812..fe2fbf1685494 100644 ---- a/include/linux/skmsg.h -+++ b/include/linux/skmsg.h -@@ -505,6 +505,18 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock) - return !!psock->saved_data_ready; - } - -+static inline bool sk_is_tcp(const struct sock *sk) -+{ -+ return sk->sk_type == SOCK_STREAM && -+ sk->sk_protocol == IPPROTO_TCP; -+} -+ -+static inline bool sk_is_udp(const struct sock *sk) -+{ -+ return sk->sk_type == SOCK_DGRAM && -+ sk->sk_protocol == IPPROTO_UDP; -+} -+ - #if IS_ENABLED(CONFIG_NET_SOCK_MSG) - - #define BPF_F_STRPARSER (1UL << 1) -diff --git a/net/core/filter.c b/net/core/filter.c -index 519315a1acf3a..a28c51f4ab232 100644 ---- a/net/core/filter.c -+++ b/net/core/filter.c -@@ -10508,8 +10508,10 @@ BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx, - return -EINVAL; - if (unlikely(sk && sk_is_refcounted(sk))) - return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */ -- if (unlikely(sk && sk->sk_state == TCP_ESTABLISHED)) -- return -ESOCKTNOSUPPORT; /* reject connected sockets */ -+ if (unlikely(sk && sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN)) -+ return -ESOCKTNOSUPPORT; /* only accept TCP socket in LISTEN */ -+ if (unlikely(sk && sk_is_udp(sk) && sk->sk_state != TCP_CLOSE)) -+ return -ESOCKTNOSUPPORT; /* only accept UDP socket in CLOSE */ - - /* Check if socket is suitable for packet L3/L4 protocol */ - if (sk && sk->sk_protocol != ctx->protocol) -diff --git a/net/core/sock_map.c b/net/core/sock_map.c -index 86b4e8909ad1e..60774b79f2e3a 100644 ---- a/net/core/sock_map.c -+++ b/net/core/sock_map.c -@@ -523,12 +523,6 @@ static bool sock_map_op_okay(const struct bpf_sock_ops_kern *ops) - ops->op == BPF_SOCK_OPS_TCP_LISTEN_CB; - } - --static bool sk_is_tcp(const struct sock *sk) --{ -- return sk->sk_type == SOCK_STREAM && -- sk->sk_protocol == IPPROTO_TCP; --} -- - static bool sock_map_redirect_allowed(const struct sock *sk) - { - if (sk_is_tcp(sk)) --- -2.39.2 - diff --git a/queue-5.15/series b/queue-5.15/series index 7219532bd8b..2435efdfc8c 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -29,8 +29,6 @@ net-sched-reserve-tc_h_ingress-tc_h_clsact-for-ingre.patch net-sched-prohibit-regrafting-ingress-or-clsact-qdis.patch net-sched-fix-null-pointer-dereference-in-mq_attach.patch net-netlink-fix-netlink_list_memberships-length-repo.patch -tcp-remove-sk_-tr-x_skb_cache.patch -bpf-sockmap-use-stricter-sk-state-checks-in-sk_looku.patch udp6-fix-race-condition-in-udp6_sendmsg-connect.patch net-mlx5e-fix-error-handling-in-mlx5e_refresh_tirs.patch net-mlx5-read-embedded-cpu-after-init-bit-cleared.patch diff --git a/queue-5.15/tcp-remove-sk_-tr-x_skb_cache.patch b/queue-5.15/tcp-remove-sk_-tr-x_skb_cache.patch deleted file mode 100644 index c25ec997c31..00000000000 --- a/queue-5.15/tcp-remove-sk_-tr-x_skb_cache.patch +++ /dev/null @@ -1,294 +0,0 @@ -From a838f15e78adc8752ce13a6661ccef7216e20478 Mon Sep 17 00:00:00 2001 -From: Sasha Levin -Date: Wed, 22 Sep 2021 19:26:43 +0200 -Subject: tcp: remove sk_{tr}x_skb_cache - -From: Eric Dumazet - -[ Upstream commit d8b81175e412c7abebdb5b37d8a84d5fd19b1aad ] - -This reverts the following patches : - -- commit 2e05fcae83c4 ("tcp: fix compile error if !CONFIG_SYSCTL") -- commit 4f661542a402 ("tcp: fix zerocopy and notsent_lowat issues") -- commit 472c2e07eef0 ("tcp: add one skb cache for tx") -- commit 8b27dae5a2e8 ("tcp: add one skb cache for rx") - -Having a cache of one skb (in each direction) per TCP socket is fragile, -since it can cause a significant increase of memory needs, -and not good enough for high speed flows anyway where more than one skb -is needed. - -We want instead to add a generic infrastructure, with more flexible -per-cpu caches, for alien NUMA nodes. - -Acked-by: Paolo Abeni -Acked-by: Mat Martineau -Signed-off-by: Eric Dumazet -Signed-off-by: David S. Miller -Stable-dep-of: 448a5ce1120c ("udp6: Fix race condition in udp6_sendmsg & connect") -Signed-off-by: Sasha Levin ---- - Documentation/networking/ip-sysctl.rst | 8 -------- - include/net/sock.h | 19 ------------------- - net/ipv4/af_inet.c | 4 ---- - net/ipv4/sysctl_net_ipv4.c | 12 ------------ - net/ipv4/tcp.c | 26 -------------------------- - net/ipv4/tcp_ipv4.c | 6 ------ - net/ipv6/tcp_ipv6.c | 6 ------ - 7 files changed, 81 deletions(-) - -diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst -index 7890b395e629b..a4d0082bd3b90 100644 ---- a/Documentation/networking/ip-sysctl.rst -+++ b/Documentation/networking/ip-sysctl.rst -@@ -991,14 +991,6 @@ tcp_challenge_ack_limit - INTEGER - in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks) - Default: 1000 - --tcp_rx_skb_cache - BOOLEAN -- Controls a per TCP socket cache of one skb, that might help -- performance of some workloads. This might be dangerous -- on systems with a lot of TCP sockets, since it increases -- memory usage. -- -- Default: 0 (disabled) -- - UDP variables - ============= - -diff --git a/include/net/sock.h b/include/net/sock.h -index 104d80d850e41..d0683b3c57456 100644 ---- a/include/net/sock.h -+++ b/include/net/sock.h -@@ -261,7 +261,6 @@ struct bpf_local_storage; - * @sk_dst_cache: destination cache - * @sk_dst_pending_confirm: need to confirm neighbour - * @sk_policy: flow policy -- * @sk_rx_skb_cache: cache copy of recently accessed RX skb - * @sk_receive_queue: incoming packets - * @sk_wmem_alloc: transmit queue bytes committed - * @sk_tsq_flags: TCP Small Queues flags -@@ -328,7 +327,6 @@ struct bpf_local_storage; - * @sk_peek_off: current peek_offset value - * @sk_send_head: front of stuff to transmit - * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head] -- * @sk_tx_skb_cache: cache copy of recently accessed TX skb - * @sk_security: used by security modules - * @sk_mark: generic packet mark - * @sk_cgrp_data: cgroup data for this cgroup -@@ -394,7 +392,6 @@ struct sock { - atomic_t sk_drops; - int sk_rcvlowat; - struct sk_buff_head sk_error_queue; -- struct sk_buff *sk_rx_skb_cache; - struct sk_buff_head sk_receive_queue; - /* - * The backlog queue is special, it is always used with -@@ -447,7 +444,6 @@ struct sock { - struct sk_buff *sk_send_head; - struct rb_root tcp_rtx_queue; - }; -- struct sk_buff *sk_tx_skb_cache; - struct sk_buff_head sk_write_queue; - __s32 sk_peek_off; - int sk_write_pending; -@@ -1596,18 +1592,10 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) - __sk_mem_reclaim(sk, 1 << 20); - } - --DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); - static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) - { - sk_wmem_queued_add(sk, -skb->truesize); - sk_mem_uncharge(sk, skb->truesize); -- if (static_branch_unlikely(&tcp_tx_skb_cache_key) && -- !sk->sk_tx_skb_cache && !skb_cloned(skb)) { -- skb_ext_reset(skb); -- skb_zcopy_clear(skb, true); -- sk->sk_tx_skb_cache = skb; -- return; -- } - __kfree_skb(skb); - } - -@@ -2667,7 +2655,6 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) - &skb_shinfo(skb)->tskey); - } - --DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); - /** - * sk_eat_skb - Release a skb if it is no longer needed - * @sk: socket to eat this skb from -@@ -2679,12 +2666,6 @@ DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); - static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb) - { - __skb_unlink(skb, &sk->sk_receive_queue); -- if (static_branch_unlikely(&tcp_rx_skb_cache_key) && -- !sk->sk_rx_skb_cache) { -- sk->sk_rx_skb_cache = skb; -- skb_orphan(skb); -- return; -- } - __kfree_skb(skb); - } - -diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c -index e46b11507edc2..4d1c55723ad56 100644 ---- a/net/ipv4/af_inet.c -+++ b/net/ipv4/af_inet.c -@@ -133,10 +133,6 @@ void inet_sock_destruct(struct sock *sk) - struct inet_sock *inet = inet_sk(sk); - - __skb_queue_purge(&sk->sk_receive_queue); -- if (sk->sk_rx_skb_cache) { -- __kfree_skb(sk->sk_rx_skb_cache); -- sk->sk_rx_skb_cache = NULL; -- } - __skb_queue_purge(&sk->sk_error_queue); - - sk_mem_reclaim(sk); -diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c -index 1f22e72074fdc..1aa3ecaf456c7 100644 ---- a/net/ipv4/sysctl_net_ipv4.c -+++ b/net/ipv4/sysctl_net_ipv4.c -@@ -540,18 +540,6 @@ static struct ctl_table ipv4_table[] = { - .extra1 = &sysctl_fib_sync_mem_min, - .extra2 = &sysctl_fib_sync_mem_max, - }, -- { -- .procname = "tcp_rx_skb_cache", -- .data = &tcp_rx_skb_cache_key.key, -- .mode = 0644, -- .proc_handler = proc_do_static_key, -- }, -- { -- .procname = "tcp_tx_skb_cache", -- .data = &tcp_tx_skb_cache_key.key, -- .mode = 0644, -- .proc_handler = proc_do_static_key, -- }, - { } - }; - -diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c -index fc0fa1f2ca9b1..bd7930b2be278 100644 ---- a/net/ipv4/tcp.c -+++ b/net/ipv4/tcp.c -@@ -325,11 +325,6 @@ struct tcp_splice_state { - unsigned long tcp_memory_pressure __read_mostly; - EXPORT_SYMBOL_GPL(tcp_memory_pressure); - --DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key); --EXPORT_SYMBOL(tcp_rx_skb_cache_key); -- --DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key); -- - void tcp_enter_memory_pressure(struct sock *sk) - { - unsigned long val; -@@ -866,18 +861,6 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, - { - struct sk_buff *skb; - -- if (likely(!size)) { -- skb = sk->sk_tx_skb_cache; -- if (skb) { -- skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); -- sk->sk_tx_skb_cache = NULL; -- pskb_trim(skb, 0); -- INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); -- skb_shinfo(skb)->tx_flags = 0; -- memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb)); -- return skb; -- } -- } - /* The TCP header must be at least 32-bit aligned. */ - size = ALIGN(size, 4); - -@@ -2952,11 +2935,6 @@ void tcp_write_queue_purge(struct sock *sk) - sk_wmem_free_skb(sk, skb); - } - tcp_rtx_queue_purge(sk); -- skb = sk->sk_tx_skb_cache; -- if (skb) { -- __kfree_skb(skb); -- sk->sk_tx_skb_cache = NULL; -- } - INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); - sk_mem_reclaim(sk); - tcp_clear_all_retrans_hints(tcp_sk(sk)); -@@ -2999,10 +2977,6 @@ int tcp_disconnect(struct sock *sk, int flags) - - tcp_clear_xmit_timers(sk); - __skb_queue_purge(&sk->sk_receive_queue); -- if (sk->sk_rx_skb_cache) { -- __kfree_skb(sk->sk_rx_skb_cache); -- sk->sk_rx_skb_cache = NULL; -- } - WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); - tp->urg_data = 0; - tcp_write_queue_purge(sk); -diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c -index db05ab4287e30..45d12484552fc 100644 ---- a/net/ipv4/tcp_ipv4.c -+++ b/net/ipv4/tcp_ipv4.c -@@ -1975,7 +1975,6 @@ static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph, - int tcp_v4_rcv(struct sk_buff *skb) - { - struct net *net = dev_net(skb->dev); -- struct sk_buff *skb_to_free; - int sdif = inet_sdif(skb); - int dif = inet_iif(skb); - const struct iphdr *iph; -@@ -2124,17 +2123,12 @@ int tcp_v4_rcv(struct sk_buff *skb) - tcp_segs_in(tcp_sk(sk), skb); - ret = 0; - if (!sock_owned_by_user(sk)) { -- skb_to_free = sk->sk_rx_skb_cache; -- sk->sk_rx_skb_cache = NULL; - ret = tcp_v4_do_rcv(sk, skb); - } else { - if (tcp_add_backlog(sk, skb)) - goto discard_and_relse; -- skb_to_free = NULL; - } - bh_unlock_sock(sk); -- if (skb_to_free) -- __kfree_skb(skb_to_free); - - put_and_return: - if (refcounted) -diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c -index b6f5a4474d8bc..7b81ab2b57011 100644 ---- a/net/ipv6/tcp_ipv6.c -+++ b/net/ipv6/tcp_ipv6.c -@@ -1626,7 +1626,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, - - INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) - { -- struct sk_buff *skb_to_free; - int sdif = inet6_sdif(skb); - int dif = inet6_iif(skb); - const struct tcphdr *th; -@@ -1762,17 +1761,12 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) - tcp_segs_in(tcp_sk(sk), skb); - ret = 0; - if (!sock_owned_by_user(sk)) { -- skb_to_free = sk->sk_rx_skb_cache; -- sk->sk_rx_skb_cache = NULL; - ret = tcp_v6_do_rcv(sk, skb); - } else { - if (tcp_add_backlog(sk, skb)) - goto discard_and_relse; -- skb_to_free = NULL; - } - bh_unlock_sock(sk); -- if (skb_to_free) -- __kfree_skb(skb_to_free); - put_and_return: - if (refcounted) - sock_put(sk); --- -2.39.2 - diff --git a/queue-5.15/udp6-fix-race-condition-in-udp6_sendmsg-connect.patch b/queue-5.15/udp6-fix-race-condition-in-udp6_sendmsg-connect.patch index 73d8b056fd8..7d9a5f944d1 100644 --- a/queue-5.15/udp6-fix-race-condition-in-udp6_sendmsg-connect.patch +++ b/queue-5.15/udp6-fix-race-condition-in-udp6_sendmsg-connect.patch @@ -35,14 +35,12 @@ Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- - net/core/sock.c | 2 +- + net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -diff --git a/net/core/sock.c b/net/core/sock.c -index ff7e8fc80731d..93fb3d64f48ee 100644 --- a/net/core/sock.c +++ b/net/core/sock.c -@@ -2165,7 +2165,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) +@@ -2165,7 +2165,6 @@ void sk_setup_caps(struct sock *sk, stru { u32 max_segs = 1; @@ -50,7 +48,7 @@ index ff7e8fc80731d..93fb3d64f48ee 100644 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps; if (sk->sk_route_caps & NETIF_F_GSO) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; -@@ -2180,6 +2179,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) +@@ -2180,6 +2179,7 @@ void sk_setup_caps(struct sock *sk, stru } } sk->sk_gso_max_segs = max_segs; @@ -58,6 +56,3 @@ index ff7e8fc80731d..93fb3d64f48ee 100644 } EXPORT_SYMBOL_GPL(sk_setup_caps); --- -2.39.2 -