From: Greg Kroah-Hartman Date: Sat, 5 Mar 2022 12:15:55 +0000 (+0100) Subject: 4.9-stable patches X-Git-Tag: v4.9.305~76 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=78c873003b9f470148792465635423f2b20ec0a7;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: netfilter-nf_queue-don-t-assume-sk-is-full-socket.patch netfilter-nf_queue-fix-possible-use-after-free.patch xfrm-fix-mtu-regression.patch --- diff --git a/queue-4.9/netfilter-nf_queue-don-t-assume-sk-is-full-socket.patch b/queue-4.9/netfilter-nf_queue-don-t-assume-sk-is-full-socket.patch new file mode 100644 index 00000000000..3fae8b8963c --- /dev/null +++ b/queue-4.9/netfilter-nf_queue-don-t-assume-sk-is-full-socket.patch @@ -0,0 +1,52 @@ +From 747670fd9a2d1b7774030dba65ca022ba442ce71 Mon Sep 17 00:00:00 2001 +From: Florian Westphal +Date: Fri, 25 Feb 2022 14:02:41 +0100 +Subject: netfilter: nf_queue: don't assume sk is full socket + +From: Florian Westphal + +commit 747670fd9a2d1b7774030dba65ca022ba442ce71 upstream. + +There is no guarantee that state->sk refers to a full socket. + +If refcount transitions to 0, sock_put calls sk_free which then ends up +with garbage fields. + +I'd like to thank Oleksandr Natalenko and Jiri Benc for considerable +debug work and pointing out state->sk oddities. + +Fixes: ca6fb0651883 ("tcp: attach SYNACK messages to request sockets instead of listener") +Tested-by: Oleksandr Natalenko +Signed-off-by: Florian Westphal +Signed-off-by: Greg Kroah-Hartman +--- + net/netfilter/nf_queue.c | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +--- a/net/netfilter/nf_queue.c ++++ b/net/netfilter/nf_queue.c +@@ -44,6 +44,15 @@ void nf_unregister_queue_handler(struct + } + EXPORT_SYMBOL(nf_unregister_queue_handler); + ++static void nf_queue_sock_put(struct sock *sk) ++{ ++#ifdef CONFIG_INET ++ sock_gen_put(sk); ++#else ++ sock_put(sk); ++#endif ++} ++ + void nf_queue_entry_release_refs(struct nf_queue_entry *entry) + { + struct nf_hook_state *state = &entry->state; +@@ -54,7 +63,7 @@ void nf_queue_entry_release_refs(struct + if (state->out) + dev_put(state->out); + if (state->sk) +- sock_put(state->sk); ++ nf_queue_sock_put(state->sk); + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + if (entry->skb->nf_bridge) { + struct net_device *physdev; diff --git a/queue-4.9/netfilter-nf_queue-fix-possible-use-after-free.patch b/queue-4.9/netfilter-nf_queue-fix-possible-use-after-free.patch new file mode 100644 index 00000000000..d5c459e3b17 --- /dev/null +++ b/queue-4.9/netfilter-nf_queue-fix-possible-use-after-free.patch @@ -0,0 +1,99 @@ +From c3873070247d9e3c7a6b0cf9bf9b45e8018427b1 Mon Sep 17 00:00:00 2001 +From: Florian Westphal +Date: Mon, 28 Feb 2022 06:22:22 +0100 +Subject: netfilter: nf_queue: fix possible use-after-free + +From: Florian Westphal + +commit c3873070247d9e3c7a6b0cf9bf9b45e8018427b1 upstream. + +Eric Dumazet says: + The sock_hold() side seems suspect, because there is no guarantee + that sk_refcnt is not already 0. + +On failure, we cannot queue the packet and need to indicate an +error. The packet will be dropped by the caller. + +v2: split skb prefetch hunk into separate change + +Fixes: 271b72c7fa82c ("udp: RCU handling for Unicast packets.") +Reported-by: Eric Dumazet +Reviewed-by: Eric Dumazet +Signed-off-by: Florian Westphal +Signed-off-by: Greg Kroah-Hartman +--- + include/net/netfilter/nf_queue.h | 2 +- + net/netfilter/nf_queue.c | 12 ++++++++++-- + net/netfilter/nfnetlink_queue.c | 12 +++++++++--- + 3 files changed, 20 insertions(+), 6 deletions(-) + +--- a/include/net/netfilter/nf_queue.h ++++ b/include/net/netfilter/nf_queue.h +@@ -31,7 +31,7 @@ void nf_register_queue_handler(struct ne + void nf_unregister_queue_handler(struct net *net); + void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); + +-void nf_queue_entry_get_refs(struct nf_queue_entry *entry); ++bool nf_queue_entry_get_refs(struct nf_queue_entry *entry); + void nf_queue_entry_release_refs(struct nf_queue_entry *entry); + + static inline void init_hashrandom(u32 *jhash_initval) +--- a/net/netfilter/nf_queue.c ++++ b/net/netfilter/nf_queue.c +@@ -80,10 +80,13 @@ void nf_queue_entry_release_refs(struct + EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); + + /* Bump dev refs so they don't vanish while packet is out */ +-void nf_queue_entry_get_refs(struct nf_queue_entry *entry) ++bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) + { + struct nf_hook_state *state = &entry->state; + ++ if (state->sk && !atomic_inc_not_zero(&state->sk->sk_refcnt)) ++ return false; ++ + if (state->in) + dev_hold(state->in); + if (state->out) +@@ -102,6 +105,7 @@ void nf_queue_entry_get_refs(struct nf_q + dev_hold(physdev); + } + #endif ++ return true; + } + EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); + +@@ -148,7 +152,11 @@ static int __nf_queue(struct sk_buff *sk + .size = sizeof(*entry) + afinfo->route_key_size, + }; + +- nf_queue_entry_get_refs(entry); ++ if (!nf_queue_entry_get_refs(entry)) { ++ kfree(entry); ++ return -ENOTCONN; ++ } ++ + skb_dst_force(skb); + afinfo->saveroute(skb, entry); + status = qh->outfn(entry, queuenum); +--- a/net/netfilter/nfnetlink_queue.c ++++ b/net/netfilter/nfnetlink_queue.c +@@ -673,9 +673,15 @@ static struct nf_queue_entry * + nf_queue_entry_dup(struct nf_queue_entry *e) + { + struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC); +- if (entry) +- nf_queue_entry_get_refs(entry); +- return entry; ++ ++ if (!entry) ++ return NULL; ++ ++ if (nf_queue_entry_get_refs(entry)) ++ return entry; ++ ++ kfree(entry); ++ return NULL; + } + + #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) diff --git a/queue-4.9/series b/queue-4.9/series index f59b87b4172..ec1d1f47832 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -10,3 +10,6 @@ usb-gadget-don-t-release-an-existing-dev-buf.patch usb-gadget-clear-related-members-when-goto-fail.patch ata-pata_hpt37x-fix-pci-clock-detection.patch asoc-ops-shift-tested-values-in-snd_soc_put_volsw-by-min.patch +xfrm-fix-mtu-regression.patch +netfilter-nf_queue-don-t-assume-sk-is-full-socket.patch +netfilter-nf_queue-fix-possible-use-after-free.patch diff --git a/queue-4.9/xfrm-fix-mtu-regression.patch b/queue-4.9/xfrm-fix-mtu-regression.patch new file mode 100644 index 00000000000..33fffba837e --- /dev/null +++ b/queue-4.9/xfrm-fix-mtu-regression.patch @@ -0,0 +1,79 @@ +From 6596a0229541270fb8d38d989f91b78838e5e9da Mon Sep 17 00:00:00 2001 +From: Jiri Bohac +Date: Wed, 19 Jan 2022 10:22:53 +0100 +Subject: xfrm: fix MTU regression + +From: Jiri Bohac + +commit 6596a0229541270fb8d38d989f91b78838e5e9da upstream. + +Commit 749439bfac6e1a2932c582e2699f91d329658196 ("ipv6: fix udpv6 +sendmsg crash caused by too small MTU") breaks PMTU for xfrm. + +A Packet Too Big ICMPv6 message received in response to an ESP +packet will prevent all further communication through the tunnel +if the reported MTU minus the ESP overhead is smaller than 1280. + +E.g. in a case of a tunnel-mode ESP with sha256/aes the overhead +is 92 bytes. Receiving a PTB with MTU of 1371 or less will result +in all further packets in the tunnel dropped. A ping through the +tunnel fails with "ping: sendmsg: Invalid argument". + +Apparently the MTU on the xfrm route is smaller than 1280 and +fails the check inside ip6_setup_cork() added by 749439bf. + +We found this by debugging USGv6/ipv6ready failures. Failing +tests are: "Phase-2 Interoperability Test Scenario IPsec" / +5.3.11 and 5.4.11 (Tunnel Mode: Fragmentation). + +Commit b515d2637276a3810d6595e10ab02c13bfd0b63a ("xfrm: +xfrm_state_mtu should return at least 1280 for ipv6") attempted +to fix this but caused another regression in TCP MSS calculations +and had to be reverted. + +The patch below fixes the situation by dropping the MTU +check and instead checking for the underflows described in the +749439bf commit message. + +Signed-off-by: Jiri Bohac +Fixes: 749439bfac6e ("ipv6: fix udpv6 sendmsg crash caused by too small MTU") +Signed-off-by: Steffen Klassert +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv6/ip6_output.c | 11 +++++++---- + 1 file changed, 7 insertions(+), 4 deletions(-) + +--- a/net/ipv6/ip6_output.c ++++ b/net/ipv6/ip6_output.c +@@ -1274,8 +1274,6 @@ static int ip6_setup_cork(struct sock *s + if (np->frag_size) + mtu = np->frag_size; + } +- if (mtu < IPV6_MIN_MTU) +- return -EINVAL; + cork->base.fragsize = mtu; + if (dst_allfrag(rt->dst.path)) + cork->base.flags |= IPCORK_ALLFRAG; +@@ -1324,8 +1322,6 @@ static int __ip6_append_data(struct sock + + fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + + (opt ? opt->opt_nflen : 0); +- maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - +- sizeof(struct frag_hdr); + + headersize = sizeof(struct ipv6hdr) + + (opt ? opt->opt_flen + opt->opt_nflen : 0) + +@@ -1333,6 +1329,13 @@ static int __ip6_append_data(struct sock + sizeof(struct frag_hdr) : 0) + + rt->rt6i_nfheader_len; + ++ if (mtu < fragheaderlen || ++ ((mtu - fragheaderlen) & ~7) + fragheaderlen < sizeof(struct frag_hdr)) ++ goto emsgsize; ++ ++ maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - ++ sizeof(struct frag_hdr); ++ + /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit + * the first fragment + */