]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 25 Feb 2022 15:55:12 +0000 (16:55 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 25 Feb 2022 15:55:12 +0000 (16:55 +0100)
added patches:
bpf-do-not-try-bpf_msg_push_data-with-len-0.patch
drm-edid-always-set-rgb444.patch
gso-do-not-skip-outer-ip-header-in-case-of-ipip-and-net_failover.patch
net-__pskb_pull_tail-pskb_carve_frag_list-drop_monitor-friends.patch
net-force-inlining-of-checksum-functions-in-net-checksum.h.patch
net-ll_temac-check-the-return-value-of-devm_kmalloc.patch
net-mlx5e-fix-wrong-return-value-on-ioctl-eeprom-query-failure.patch
netfilter-nf_tables-fix-memory-leak-during-stateful-obj-update.patch
nfp-flower-fix-a-potential-leak-in-nfp_tunnel_add_shared_mac.patch
openvswitch-fix-setting-ipv6-fields-causing-hw-csum-failure.patch
perf-data-fix-double-free-in-perf_session__delete.patch
ping-remove-pr_err-from-ping_lookup.patch
tipc-fix-end-of-loop-tests-for-list_for_each_entry.patch

14 files changed:
queue-5.4/bpf-do-not-try-bpf_msg_push_data-with-len-0.patch [new file with mode: 0644]
queue-5.4/drm-edid-always-set-rgb444.patch [new file with mode: 0644]
queue-5.4/gso-do-not-skip-outer-ip-header-in-case-of-ipip-and-net_failover.patch [new file with mode: 0644]
queue-5.4/net-__pskb_pull_tail-pskb_carve_frag_list-drop_monitor-friends.patch [new file with mode: 0644]
queue-5.4/net-force-inlining-of-checksum-functions-in-net-checksum.h.patch [new file with mode: 0644]
queue-5.4/net-ll_temac-check-the-return-value-of-devm_kmalloc.patch [new file with mode: 0644]
queue-5.4/net-mlx5e-fix-wrong-return-value-on-ioctl-eeprom-query-failure.patch [new file with mode: 0644]
queue-5.4/netfilter-nf_tables-fix-memory-leak-during-stateful-obj-update.patch [new file with mode: 0644]
queue-5.4/nfp-flower-fix-a-potential-leak-in-nfp_tunnel_add_shared_mac.patch [new file with mode: 0644]
queue-5.4/openvswitch-fix-setting-ipv6-fields-causing-hw-csum-failure.patch [new file with mode: 0644]
queue-5.4/perf-data-fix-double-free-in-perf_session__delete.patch [new file with mode: 0644]
queue-5.4/ping-remove-pr_err-from-ping_lookup.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/tipc-fix-end-of-loop-tests-for-list_for_each_entry.patch [new file with mode: 0644]

diff --git a/queue-5.4/bpf-do-not-try-bpf_msg_push_data-with-len-0.patch b/queue-5.4/bpf-do-not-try-bpf_msg_push_data-with-len-0.patch
new file mode 100644 (file)
index 0000000..5522f52
--- /dev/null
@@ -0,0 +1,43 @@
+From 4a11678f683814df82fca9018d964771e02d7e6d Mon Sep 17 00:00:00 2001
+From: Felix Maurer <fmaurer@redhat.com>
+Date: Wed, 9 Feb 2022 16:55:26 +0100
+Subject: bpf: Do not try bpf_msg_push_data with len 0
+
+From: Felix Maurer <fmaurer@redhat.com>
+
+commit 4a11678f683814df82fca9018d964771e02d7e6d upstream.
+
+If bpf_msg_push_data() is called with len 0 (as it happens during
+selftests/bpf/test_sockmap), we do not need to do anything and can
+return early.
+
+Calling bpf_msg_push_data() with len 0 previously lead to a wrong ENOMEM
+error: we later called get_order(copy + len); if len was 0, copy + len
+was also often 0 and get_order() returned some undefined value (at the
+moment 52). alloc_pages() caught that and failed, but then bpf_msg_push_data()
+returned ENOMEM. This was wrong because we are most probably not out of
+memory and actually do not need any additional memory.
+
+Fixes: 6fff607e2f14b ("bpf: sk_msg program helper bpf_msg_push_data")
+Signed-off-by: Felix Maurer <fmaurer@redhat.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Yonghong Song <yhs@fb.com>
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Link: https://lore.kernel.org/bpf/df69012695c7094ccb1943ca02b4920db3537466.1644421921.git.fmaurer@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/filter.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2516,6 +2516,9 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_m
+       if (unlikely(flags))
+               return -EINVAL;
++      if (unlikely(len == 0))
++              return 0;
++
+       /* First find the starting scatterlist element */
+       i = msg->sg.start;
+       do {
diff --git a/queue-5.4/drm-edid-always-set-rgb444.patch b/queue-5.4/drm-edid-always-set-rgb444.patch
new file mode 100644 (file)
index 0000000..c44d318
--- /dev/null
@@ -0,0 +1,65 @@
+From ecbd4912a693b862e25cba0a6990a8c95b00721e Mon Sep 17 00:00:00 2001
+From: Maxime Ripard <maxime@cerno.tech>
+Date: Thu, 3 Feb 2022 12:54:16 +0100
+Subject: drm/edid: Always set RGB444
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maxime Ripard <maxime@cerno.tech>
+
+commit ecbd4912a693b862e25cba0a6990a8c95b00721e upstream.
+
+In order to fill the drm_display_info structure each time an EDID is
+read, the code currently will call drm_add_display_info with the parsed
+EDID.
+
+drm_add_display_info will then call drm_reset_display_info to reset all
+the fields to 0, and then set them to the proper value depending on the
+EDID.
+
+In the color_formats case, we will thus report that we don't support any
+color format, and then fill it back with RGB444 plus the additional
+formats described in the EDID Feature Support byte.
+
+However, since that byte only contains format-related bits since the 1.4
+specification, this doesn't happen if the EDID is following an earlier
+specification. In turn, it means that for one of these EDID, we end up
+with color_formats set to 0.
+
+The EDID 1.3 specification never really specifies what it means by RGB
+exactly, but since both HDMI and DVI will use RGB444, it's fairly safe
+to assume it's supposed to be RGB444.
+
+Let's move the addition of RGB444 to color_formats earlier in
+drm_add_display_info() so that it's always set for a digital display.
+
+Fixes: da05a5a71ad8 ("drm: parse color format support for digital displays")
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Reported-by: Matthias Reichl <hias@horus.com>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220203115416.1137308-1-maxime@cerno.tech
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/drm_edid.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -4659,6 +4659,7 @@ u32 drm_add_display_info(struct drm_conn
+       if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
+               return quirks;
++      info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+       drm_parse_cea_ext(connector, edid);
+       /*
+@@ -4707,7 +4708,6 @@ u32 drm_add_display_info(struct drm_conn
+       DRM_DEBUG("%s: Assigning EDID-1.4 digital sink color depth as %d bpc.\n",
+                         connector->name, info->bpc);
+-      info->color_formats |= DRM_COLOR_FORMAT_RGB444;
+       if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
+               info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
+       if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
diff --git a/queue-5.4/gso-do-not-skip-outer-ip-header-in-case-of-ipip-and-net_failover.patch b/queue-5.4/gso-do-not-skip-outer-ip-header-in-case-of-ipip-and-net_failover.patch
new file mode 100644 (file)
index 0000000..fa66e28
--- /dev/null
@@ -0,0 +1,103 @@
+From cc20cced0598d9a5ff91ae4ab147b3b5e99ee819 Mon Sep 17 00:00:00 2001
+From: Tao Liu <thomas.liu@ucloud.cn>
+Date: Fri, 18 Feb 2022 22:35:24 +0800
+Subject: gso: do not skip outer ip header in case of ipip and net_failover
+
+From: Tao Liu <thomas.liu@ucloud.cn>
+
+commit cc20cced0598d9a5ff91ae4ab147b3b5e99ee819 upstream.
+
+We encounter a tcp drop issue in our cloud environment. Packet GROed in
+host forwards to a VM virtio_net nic with net_failover enabled. VM acts
+as a IPVS LB with ipip encapsulation. The full path like:
+host gro -> vm virtio_net rx -> net_failover rx -> ipvs fullnat
+ -> ipip encap -> net_failover tx -> virtio_net tx
+
+When net_failover transmits a ipip pkt (gso_type = 0x0103, which means
+SKB_GSO_TCPV4, SKB_GSO_DODGY and SKB_GSO_IPXIP4), there is no gso
+did because it supports TSO and GSO_IPXIP4. But network_header points to
+inner ip header.
+
+Call Trace:
+ tcp4_gso_segment        ------> return NULL
+ inet_gso_segment        ------> inner iph, network_header points to
+ ipip_gso_segment
+ inet_gso_segment        ------> outer iph
+ skb_mac_gso_segment
+
+Afterwards virtio_net transmits the pkt, only inner ip header is modified.
+And the outer one just keeps unchanged. The pkt will be dropped in remote
+host.
+
+Call Trace:
+ inet_gso_segment        ------> inner iph, outer iph is skipped
+ skb_mac_gso_segment
+ __skb_gso_segment
+ validate_xmit_skb
+ validate_xmit_skb_list
+ sch_direct_xmit
+ __qdisc_run
+ __dev_queue_xmit        ------> virtio_net
+ dev_hard_start_xmit
+ __dev_queue_xmit        ------> net_failover
+ ip_finish_output2
+ ip_output
+ iptunnel_xmit
+ ip_tunnel_xmit
+ ipip_tunnel_xmit        ------> ipip
+ dev_hard_start_xmit
+ __dev_queue_xmit
+ ip_finish_output2
+ ip_output
+ ip_forward
+ ip_rcv
+ __netif_receive_skb_one_core
+ netif_receive_skb_internal
+ napi_gro_receive
+ receive_buf
+ virtnet_poll
+ net_rx_action
+
+The root cause of this issue is specific with the rare combination of
+SKB_GSO_DODGY and a tunnel device that adds an SKB_GSO_ tunnel option.
+SKB_GSO_DODGY is set from external virtio_net. We need to reset network
+header when callbacks.gso_segment() returns NULL.
+
+This patch also includes ipv6_gso_segment(), considering SIT, etc.
+
+Fixes: cb32f511a70b ("ipip: add GSO/TSO support")
+Signed-off-by: Tao Liu <thomas.liu@ucloud.cn>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/af_inet.c     |    5 ++++-
+ net/ipv6/ip6_offload.c |    2 ++
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1344,8 +1344,11 @@ struct sk_buff *inet_gso_segment(struct
+       }
+       ops = rcu_dereference(inet_offloads[proto]);
+-      if (likely(ops && ops->callbacks.gso_segment))
++      if (likely(ops && ops->callbacks.gso_segment)) {
+               segs = ops->callbacks.gso_segment(skb, features);
++              if (!segs)
++                      skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
++      }
+       if (IS_ERR_OR_NULL(segs))
+               goto out;
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -111,6 +111,8 @@ static struct sk_buff *ipv6_gso_segment(
+       if (likely(ops && ops->callbacks.gso_segment)) {
+               skb_reset_transport_header(skb);
+               segs = ops->callbacks.gso_segment(skb, features);
++              if (!segs)
++                      skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
+       }
+       if (IS_ERR_OR_NULL(segs))
diff --git a/queue-5.4/net-__pskb_pull_tail-pskb_carve_frag_list-drop_monitor-friends.patch b/queue-5.4/net-__pskb_pull_tail-pskb_carve_frag_list-drop_monitor-friends.patch
new file mode 100644 (file)
index 0000000..c10bca7
--- /dev/null
@@ -0,0 +1,42 @@
+From ef527f968ae05c6717c39f49c8709a7e2c19183a Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 20 Feb 2022 07:40:52 -0800
+Subject: net: __pskb_pull_tail() & pskb_carve_frag_list() drop_monitor friends
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit ef527f968ae05c6717c39f49c8709a7e2c19183a upstream.
+
+Whenever one of these functions pull all data from an skb in a frag_list,
+use consume_skb() instead of kfree_skb() to avoid polluting drop
+monitoring.
+
+Fixes: 6fa01ccd8830 ("skbuff: Add pskb_extract() helper function")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20220220154052.1308469-1-eric.dumazet@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2139,7 +2139,7 @@ void *__pskb_pull_tail(struct sk_buff *s
+               /* Free pulled out fragments. */
+               while ((list = skb_shinfo(skb)->frag_list) != insp) {
+                       skb_shinfo(skb)->frag_list = list->next;
+-                      kfree_skb(list);
++                      consume_skb(list);
+               }
+               /* And insert new clone at head. */
+               if (clone) {
+@@ -5846,7 +5846,7 @@ static int pskb_carve_frag_list(struct s
+       /* Free pulled out fragments. */
+       while ((list = shinfo->frag_list) != insp) {
+               shinfo->frag_list = list->next;
+-              kfree_skb(list);
++              consume_skb(list);
+       }
+       /* And insert new clone at head. */
+       if (clone) {
diff --git a/queue-5.4/net-force-inlining-of-checksum-functions-in-net-checksum.h.patch b/queue-5.4/net-force-inlining-of-checksum-functions-in-net-checksum.h.patch
new file mode 100644 (file)
index 0000000..98eded4
--- /dev/null
@@ -0,0 +1,232 @@
+From 5486f5bf790b5c664913076c3194b8f916a5c7ad Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+Date: Thu, 17 Feb 2022 14:35:49 +0100
+Subject: net: Force inlining of checksum functions in net/checksum.h
+
+From: Christophe Leroy <christophe.leroy@csgroup.eu>
+
+commit 5486f5bf790b5c664913076c3194b8f916a5c7ad upstream.
+
+All functions defined as static inline in net/checksum.h are
+meant to be inlined for performance reason.
+
+But since commit ac7c3e4ff401 ("compiler: enable
+CONFIG_OPTIMIZE_INLINING forcibly") the compiler is allowed to
+uninline functions when it wants.
+
+Fair enough in the general case, but for tiny performance critical
+checksum helpers that's counter-productive.
+
+The problem mainly arises when selecting CONFIG_CC_OPTIMISE_FOR_SIZE,
+Those helpers being 'static inline' in header files you suddenly find
+them duplicated many times in the resulting vmlinux.
+
+Here is a typical exemple when building powerpc pmac32_defconfig
+with CONFIG_CC_OPTIMISE_FOR_SIZE. csum_sub() appears 4 times:
+
+       c04a23cc <csum_sub>:
+       c04a23cc:       7c 84 20 f8     not     r4,r4
+       c04a23d0:       7c 63 20 14     addc    r3,r3,r4
+       c04a23d4:       7c 63 01 94     addze   r3,r3
+       c04a23d8:       4e 80 00 20     blr
+               ...
+       c04a2ce8:       4b ff f6 e5     bl      c04a23cc <csum_sub>
+               ...
+       c04a2d2c:       4b ff f6 a1     bl      c04a23cc <csum_sub>
+               ...
+       c04a2d54:       4b ff f6 79     bl      c04a23cc <csum_sub>
+               ...
+       c04a754c <csum_sub>:
+       c04a754c:       7c 84 20 f8     not     r4,r4
+       c04a7550:       7c 63 20 14     addc    r3,r3,r4
+       c04a7554:       7c 63 01 94     addze   r3,r3
+       c04a7558:       4e 80 00 20     blr
+               ...
+       c04ac930:       4b ff ac 1d     bl      c04a754c <csum_sub>
+               ...
+       c04ad264:       4b ff a2 e9     bl      c04a754c <csum_sub>
+               ...
+       c04e3b08 <csum_sub>:
+       c04e3b08:       7c 84 20 f8     not     r4,r4
+       c04e3b0c:       7c 63 20 14     addc    r3,r3,r4
+       c04e3b10:       7c 63 01 94     addze   r3,r3
+       c04e3b14:       4e 80 00 20     blr
+               ...
+       c04e5788:       4b ff e3 81     bl      c04e3b08 <csum_sub>
+               ...
+       c04e65c8:       4b ff d5 41     bl      c04e3b08 <csum_sub>
+               ...
+       c0512d34 <csum_sub>:
+       c0512d34:       7c 84 20 f8     not     r4,r4
+       c0512d38:       7c 63 20 14     addc    r3,r3,r4
+       c0512d3c:       7c 63 01 94     addze   r3,r3
+       c0512d40:       4e 80 00 20     blr
+               ...
+       c0512dfc:       4b ff ff 39     bl      c0512d34 <csum_sub>
+               ...
+       c05138bc:       4b ff f4 79     bl      c0512d34 <csum_sub>
+               ...
+
+Restore the expected behaviour by using __always_inline for all
+functions defined in net/checksum.h
+
+vmlinux size is even reduced by 256 bytes with this patch:
+
+          text    data     bss     dec     hex filename
+       6980022 2515362  194384 9689768  93daa8 vmlinux.before
+       6979862 2515266  194384 9689512  93d9a8 vmlinux.now
+
+Fixes: ac7c3e4ff401 ("compiler: enable CONFIG_OPTIMIZE_INLINING forcibly")
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/checksum.h |   41 +++++++++++++++++++++--------------------
+ 1 file changed, 21 insertions(+), 20 deletions(-)
+
+--- a/include/net/checksum.h
++++ b/include/net/checksum.h
+@@ -22,7 +22,7 @@
+ #include <asm/checksum.h>
+ #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+-static inline
++static __always_inline
+ __wsum csum_and_copy_from_user (const void __user *src, void *dst,
+                                     int len, __wsum sum, int *err_ptr)
+ {
+@@ -37,7 +37,7 @@ __wsum csum_and_copy_from_user (const vo
+ #endif
+ #ifndef HAVE_CSUM_COPY_USER
+-static __inline__ __wsum csum_and_copy_to_user
++static __always_inline __wsum csum_and_copy_to_user
+ (const void *src, void __user *dst, int len, __wsum sum, int *err_ptr)
+ {
+       sum = csum_partial(src, len, sum);
+@@ -54,7 +54,7 @@ static __inline__ __wsum csum_and_copy_t
+ #endif
+ #ifndef HAVE_ARCH_CSUM_ADD
+-static inline __wsum csum_add(__wsum csum, __wsum addend)
++static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
+ {
+       u32 res = (__force u32)csum;
+       res += (__force u32)addend;
+@@ -62,12 +62,12 @@ static inline __wsum csum_add(__wsum csu
+ }
+ #endif
+-static inline __wsum csum_sub(__wsum csum, __wsum addend)
++static __always_inline __wsum csum_sub(__wsum csum, __wsum addend)
+ {
+       return csum_add(csum, ~addend);
+ }
+-static inline __sum16 csum16_add(__sum16 csum, __be16 addend)
++static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend)
+ {
+       u16 res = (__force u16)csum;
+@@ -75,12 +75,12 @@ static inline __sum16 csum16_add(__sum16
+       return (__force __sum16)(res + (res < (__force u16)addend));
+ }
+-static inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
++static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend)
+ {
+       return csum16_add(csum, ~addend);
+ }
+-static inline __wsum
++static __always_inline __wsum
+ csum_block_add(__wsum csum, __wsum csum2, int offset)
+ {
+       u32 sum = (__force u32)csum2;
+@@ -92,36 +92,37 @@ csum_block_add(__wsum csum, __wsum csum2
+       return csum_add(csum, (__force __wsum)sum);
+ }
+-static inline __wsum
++static __always_inline __wsum
+ csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len)
+ {
+       return csum_block_add(csum, csum2, offset);
+ }
+-static inline __wsum
++static __always_inline __wsum
+ csum_block_sub(__wsum csum, __wsum csum2, int offset)
+ {
+       return csum_block_add(csum, ~csum2, offset);
+ }
+-static inline __wsum csum_unfold(__sum16 n)
++static __always_inline __wsum csum_unfold(__sum16 n)
+ {
+       return (__force __wsum)n;
+ }
+-static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum)
++static __always_inline
++__wsum csum_partial_ext(const void *buff, int len, __wsum sum)
+ {
+       return csum_partial(buff, len, sum);
+ }
+ #define CSUM_MANGLED_0 ((__force __sum16)0xffff)
+-static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
++static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff)
+ {
+       *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum)));
+ }
+-static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
++static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to)
+ {
+       __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from);
+@@ -134,7 +135,7 @@ static inline void csum_replace4(__sum16
+  *  m : old value of a 16bit field
+  *  m' : new value of a 16bit field
+  */
+-static inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
++static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new)
+ {
+       *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
+ }
+@@ -153,16 +154,16 @@ void inet_proto_csum_replace16(__sum16 *
+ void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
+                                    __wsum diff, bool pseudohdr);
+-static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
+-                                          __be16 from, __be16 to,
+-                                          bool pseudohdr)
++static __always_inline
++void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
++                            __be16 from, __be16 to, bool pseudohdr)
+ {
+       inet_proto_csum_replace4(sum, skb, (__force __be32)from,
+                                (__force __be32)to, pseudohdr);
+ }
+-static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
+-                                  int start, int offset)
++static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum,
++                                           int start, int offset)
+ {
+       __sum16 *psum = (__sum16 *)(ptr + offset);
+       __wsum delta;
+@@ -178,7 +179,7 @@ static inline __wsum remcsum_adjust(void
+       return delta;
+ }
+-static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
++static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
+ {
+       *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
+ }
diff --git a/queue-5.4/net-ll_temac-check-the-return-value-of-devm_kmalloc.patch b/queue-5.4/net-ll_temac-check-the-return-value-of-devm_kmalloc.patch
new file mode 100644 (file)
index 0000000..4063a6e
--- /dev/null
@@ -0,0 +1,33 @@
+From b352c3465bb808ab700d03f5bac2f7a6f37c5350 Mon Sep 17 00:00:00 2001
+From: Xiaoke Wang <xkernel.wang@foxmail.com>
+Date: Fri, 18 Feb 2022 10:19:39 +0800
+Subject: net: ll_temac: check the return value of devm_kmalloc()
+
+From: Xiaoke Wang <xkernel.wang@foxmail.com>
+
+commit b352c3465bb808ab700d03f5bac2f7a6f37c5350 upstream.
+
+devm_kmalloc() returns a pointer to allocated memory on success, NULL
+on failure. While lp->indirect_lock is allocated by devm_kmalloc()
+without proper check. It is better to check the value of it to
+prevent potential wrong memory access.
+
+Fixes: f14f5c11f051 ("net: ll_temac: Support indirect_mutex share within TEMAC IP")
+Signed-off-by: Xiaoke Wang <xkernel.wang@foxmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/xilinx/ll_temac_main.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
+@@ -1345,6 +1345,8 @@ static int temac_probe(struct platform_d
+               lp->indirect_lock = devm_kmalloc(&pdev->dev,
+                                                sizeof(*lp->indirect_lock),
+                                                GFP_KERNEL);
++              if (!lp->indirect_lock)
++                      return -ENOMEM;
+               spin_lock_init(lp->indirect_lock);
+       }
diff --git a/queue-5.4/net-mlx5e-fix-wrong-return-value-on-ioctl-eeprom-query-failure.patch b/queue-5.4/net-mlx5e-fix-wrong-return-value-on-ioctl-eeprom-query-failure.patch
new file mode 100644 (file)
index 0000000..e50cacf
--- /dev/null
@@ -0,0 +1,32 @@
+From 0b89429722353d112f8b8b29ca397e95fa994d27 Mon Sep 17 00:00:00 2001
+From: Gal Pressman <gal@nvidia.com>
+Date: Wed, 2 Feb 2022 16:07:21 +0200
+Subject: net/mlx5e: Fix wrong return value on ioctl EEPROM query failure
+
+From: Gal Pressman <gal@nvidia.com>
+
+commit 0b89429722353d112f8b8b29ca397e95fa994d27 upstream.
+
+The ioctl EEPROM query wrongly returns success on read failures, fix
+that by returning the appropriate error code.
+
+Fixes: bb64143eee8c ("net/mlx5e: Add ethtool support for dump module EEPROM")
+Signed-off-by: Gal Pressman <gal@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+@@ -1683,7 +1683,7 @@ static int mlx5e_get_module_eeprom(struc
+               if (size_read < 0) {
+                       netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
+                                  __func__, size_read);
+-                      return 0;
++                      return size_read;
+               }
+               i += size_read;
diff --git a/queue-5.4/netfilter-nf_tables-fix-memory-leak-during-stateful-obj-update.patch b/queue-5.4/netfilter-nf_tables-fix-memory-leak-during-stateful-obj-update.patch
new file mode 100644 (file)
index 0000000..7cee00f
--- /dev/null
@@ -0,0 +1,77 @@
+From dad3bdeef45f81a6e90204bcc85360bb76eccec7 Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Mon, 21 Feb 2022 13:31:49 +0100
+Subject: netfilter: nf_tables: fix memory leak during stateful obj update
+
+From: Florian Westphal <fw@strlen.de>
+
+commit dad3bdeef45f81a6e90204bcc85360bb76eccec7 upstream.
+
+stateful objects can be updated from the control plane.
+The transaction logic allocates a temporary object for this purpose.
+
+The ->init function was called for this object, so plain kfree() leaks
+resources. We must call ->destroy function of the object.
+
+nft_obj_destroy does this, but it also decrements the module refcount,
+but the update path doesn't increment it.
+
+To avoid special-casing the update object release, do module_get for
+the update case too and release it via nft_obj_destroy().
+
+Fixes: d62d0ba97b58 ("netfilter: nf_tables: Introduce stateful object update operation")
+Cc: Fernando Fernandez Mancera <ffmancera@riseup.net>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netfilter/nf_tables_api.c |   13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5184,12 +5184,15 @@ static int nf_tables_updobj(const struct
+ {
+       struct nft_object *newobj;
+       struct nft_trans *trans;
+-      int err;
++      int err = -ENOMEM;
++
++      if (!try_module_get(type->owner))
++              return -ENOENT;
+       trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
+                               sizeof(struct nft_trans_obj));
+       if (!trans)
+-              return -ENOMEM;
++              goto err_trans;
+       newobj = nft_obj_init(ctx, type, attr);
+       if (IS_ERR(newobj)) {
+@@ -5206,6 +5209,8 @@ static int nf_tables_updobj(const struct
+ err_free_trans:
+       kfree(trans);
++err_trans:
++      module_put(type->owner);
+       return err;
+ }
+@@ -6544,7 +6549,7 @@ static void nft_obj_commit_update(struct
+       if (obj->ops->update)
+               obj->ops->update(obj, newobj);
+-      kfree(newobj);
++      nft_obj_destroy(&trans->ctx, newobj);
+ }
+ static void nft_commit_release(struct nft_trans *trans)
+@@ -7109,7 +7114,7 @@ static int __nf_tables_abort(struct net
+                       break;
+               case NFT_MSG_NEWOBJ:
+                       if (nft_trans_obj_update(trans)) {
+-                              kfree(nft_trans_obj_newobj(trans));
++                              nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans));
+                               nft_trans_destroy(trans);
+                       } else {
+                               trans->ctx.table->use--;
diff --git a/queue-5.4/nfp-flower-fix-a-potential-leak-in-nfp_tunnel_add_shared_mac.patch b/queue-5.4/nfp-flower-fix-a-potential-leak-in-nfp_tunnel_add_shared_mac.patch
new file mode 100644 (file)
index 0000000..800d88f
--- /dev/null
@@ -0,0 +1,50 @@
+From 3a14d0888eb4b0045884126acc69abfb7b87814d Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Fri, 18 Feb 2022 14:15:35 +0100
+Subject: nfp: flower: Fix a potential leak in nfp_tunnel_add_shared_mac()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+commit 3a14d0888eb4b0045884126acc69abfb7b87814d upstream.
+
+ida_simple_get() returns an id between min (0) and max (NFP_MAX_MAC_INDEX)
+inclusive.
+So NFP_MAX_MAC_INDEX (0xff) is a valid id.
+
+In order for the error handling path to work correctly, the 'invalid'
+value for 'ida_idx' should not be in the 0..NFP_MAX_MAC_INDEX range,
+inclusive.
+
+So set it to -1.
+
+Fixes: 20cce8865098 ("nfp: flower: enable MAC address sharing for offloadable devs")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20220218131535.100258-1-simon.horman@corigine.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+@@ -588,8 +588,8 @@ nfp_tunnel_add_shared_mac(struct nfp_app
+                         int port, bool mod)
+ {
+       struct nfp_flower_priv *priv = app->priv;
+-      int ida_idx = NFP_MAX_MAC_INDEX, err;
+       struct nfp_tun_offloaded_mac *entry;
++      int ida_idx = -1, err;
+       u16 nfp_mac_idx = 0;
+       entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
+@@ -663,7 +663,7 @@ err_remove_hash:
+ err_free_entry:
+       kfree(entry);
+ err_free_ida:
+-      if (ida_idx != NFP_MAX_MAC_INDEX)
++      if (ida_idx != -1)
+               ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
+       return err;
diff --git a/queue-5.4/openvswitch-fix-setting-ipv6-fields-causing-hw-csum-failure.patch b/queue-5.4/openvswitch-fix-setting-ipv6-fields-causing-hw-csum-failure.patch
new file mode 100644 (file)
index 0000000..6e5d946
--- /dev/null
@@ -0,0 +1,149 @@
+From d9b5ae5c1b241b91480aa30408be12fe91af834a Mon Sep 17 00:00:00 2001
+From: Paul Blakey <paulb@nvidia.com>
+Date: Wed, 23 Feb 2022 18:34:16 +0200
+Subject: openvswitch: Fix setting ipv6 fields causing hw csum failure
+
+From: Paul Blakey <paulb@nvidia.com>
+
+commit d9b5ae5c1b241b91480aa30408be12fe91af834a upstream.
+
+Ipv6 ttl, label and tos fields are modified without first
+pulling/pushing the ipv6 header, which would have updated
+the hw csum (if available). This might cause csum validation
+when sending the packet to the stack, as can be seen in
+the trace below.
+
+Fix this by updating skb->csum if available.
+
+Trace resulted by ipv6 ttl dec and then sending packet
+to conntrack [actions: set(ipv6(hlimit=63)),ct(zone=99)]:
+[295241.900063] s_pf0vf2: hw csum failure
+[295241.923191] Call Trace:
+[295241.925728]  <IRQ>
+[295241.927836]  dump_stack+0x5c/0x80
+[295241.931240]  __skb_checksum_complete+0xac/0xc0
+[295241.935778]  nf_conntrack_tcp_packet+0x398/0xba0 [nf_conntrack]
+[295241.953030]  nf_conntrack_in+0x498/0x5e0 [nf_conntrack]
+[295241.958344]  __ovs_ct_lookup+0xac/0x860 [openvswitch]
+[295241.968532]  ovs_ct_execute+0x4a7/0x7c0 [openvswitch]
+[295241.979167]  do_execute_actions+0x54a/0xaa0 [openvswitch]
+[295242.001482]  ovs_execute_actions+0x48/0x100 [openvswitch]
+[295242.006966]  ovs_dp_process_packet+0x96/0x1d0 [openvswitch]
+[295242.012626]  ovs_vport_receive+0x6c/0xc0 [openvswitch]
+[295242.028763]  netdev_frame_hook+0xc0/0x180 [openvswitch]
+[295242.034074]  __netif_receive_skb_core+0x2ca/0xcb0
+[295242.047498]  netif_receive_skb_internal+0x3e/0xc0
+[295242.052291]  napi_gro_receive+0xba/0xe0
+[295242.056231]  mlx5e_handle_rx_cqe_mpwrq_rep+0x12b/0x250 [mlx5_core]
+[295242.062513]  mlx5e_poll_rx_cq+0xa0f/0xa30 [mlx5_core]
+[295242.067669]  mlx5e_napi_poll+0xe1/0x6b0 [mlx5_core]
+[295242.077958]  net_rx_action+0x149/0x3b0
+[295242.086762]  __do_softirq+0xd7/0x2d6
+[295242.090427]  irq_exit+0xf7/0x100
+[295242.093748]  do_IRQ+0x7f/0xd0
+[295242.096806]  common_interrupt+0xf/0xf
+[295242.100559]  </IRQ>
+[295242.102750] RIP: 0033:0x7f9022e88cbd
+[295242.125246] RSP: 002b:00007f9022282b20 EFLAGS: 00000246 ORIG_RAX: ffffffffffffffda
+[295242.132900] RAX: 0000000000000005 RBX: 0000000000000010 RCX: 0000000000000000
+[295242.140120] RDX: 00007f9022282ba8 RSI: 00007f9022282a30 RDI: 00007f9014005c30
+[295242.147337] RBP: 00007f9014014d60 R08: 0000000000000020 R09: 00007f90254a8340
+[295242.154557] R10: 00007f9022282a28 R11: 0000000000000246 R12: 0000000000000000
+[295242.161775] R13: 00007f902308c000 R14: 000000000000002b R15: 00007f9022b71f40
+
+Fixes: 3fdbd1ce11e5 ("openvswitch: add ipv6 'set' action")
+Signed-off-by: Paul Blakey <paulb@nvidia.com>
+Link: https://lore.kernel.org/r/20220223163416.24096-1-paulb@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/checksum.h    |    5 +++++
+ net/openvswitch/actions.c |   46 ++++++++++++++++++++++++++++++++++++++--------
+ 2 files changed, 43 insertions(+), 8 deletions(-)
+
+--- a/include/net/checksum.h
++++ b/include/net/checksum.h
+@@ -139,6 +139,11 @@ static inline void csum_replace2(__sum16
+       *sum = ~csum16_add(csum16_sub(~(*sum), old), new);
+ }
++static inline void csum_replace(__wsum *csum, __wsum old, __wsum new)
++{
++      *csum = csum_add(csum_sub(*csum, old), new);
++}
++
+ struct sk_buff;
+ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
+                             __be32 from, __be32 to, bool pseudohdr);
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -427,12 +427,43 @@ static void set_ipv6_addr(struct sk_buff
+       memcpy(addr, new_addr, sizeof(__be32[4]));
+ }
+-static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
++static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
+ {
++      u8 old_ipv6_tclass = ipv6_get_dsfield(nh);
++
++      ipv6_tclass = OVS_MASKED(old_ipv6_tclass, ipv6_tclass, mask);
++
++      if (skb->ip_summed == CHECKSUM_COMPLETE)
++              csum_replace(&skb->csum, (__force __wsum)(old_ipv6_tclass << 12),
++                           (__force __wsum)(ipv6_tclass << 12));
++
++      ipv6_change_dsfield(nh, ~mask, ipv6_tclass);
++}
++
++static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
++{
++      u32 ofl;
++
++      ofl = nh->flow_lbl[0] << 16 |  nh->flow_lbl[1] << 8 |  nh->flow_lbl[2];
++      fl = OVS_MASKED(ofl, fl, mask);
++
+       /* Bits 21-24 are always unmasked, so this retains their values. */
+-      OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
+-      OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
+-      OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
++      nh->flow_lbl[0] = (u8)(fl >> 16);
++      nh->flow_lbl[1] = (u8)(fl >> 8);
++      nh->flow_lbl[2] = (u8)fl;
++
++      if (skb->ip_summed == CHECKSUM_COMPLETE)
++              csum_replace(&skb->csum, (__force __wsum)htonl(ofl), (__force __wsum)htonl(fl));
++}
++
++static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
++{
++      new_ttl = OVS_MASKED(nh->hop_limit, new_ttl, mask);
++
++      if (skb->ip_summed == CHECKSUM_COMPLETE)
++              csum_replace(&skb->csum, (__force __wsum)(nh->hop_limit << 8),
++                           (__force __wsum)(new_ttl << 8));
++      nh->hop_limit = new_ttl;
+ }
+ static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
+@@ -550,18 +581,17 @@ static int set_ipv6(struct sk_buff *skb,
+               }
+       }
+       if (mask->ipv6_tclass) {
+-              ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
++              set_ipv6_dsfield(skb, nh, key->ipv6_tclass, mask->ipv6_tclass);
+               flow_key->ip.tos = ipv6_get_dsfield(nh);
+       }
+       if (mask->ipv6_label) {
+-              set_ipv6_fl(nh, ntohl(key->ipv6_label),
++              set_ipv6_fl(skb, nh, ntohl(key->ipv6_label),
+                           ntohl(mask->ipv6_label));
+               flow_key->ipv6.label =
+                   *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
+       }
+       if (mask->ipv6_hlimit) {
+-              OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
+-                             mask->ipv6_hlimit);
++              set_ipv6_ttl(skb, nh, key->ipv6_hlimit, mask->ipv6_hlimit);
+               flow_key->ip.ttl = nh->hop_limit;
+       }
+       return 0;
diff --git a/queue-5.4/perf-data-fix-double-free-in-perf_session__delete.patch b/queue-5.4/perf-data-fix-double-free-in-perf_session__delete.patch
new file mode 100644 (file)
index 0000000..1c96ac1
--- /dev/null
@@ -0,0 +1,58 @@
+From 69560e366fc4d5fca7bebb0e44edbfafc8bcaf05 Mon Sep 17 00:00:00 2001
+From: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
+Date: Fri, 18 Feb 2022 18:23:41 +0300
+Subject: perf data: Fix double free in perf_session__delete()
+
+From: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
+
+commit 69560e366fc4d5fca7bebb0e44edbfafc8bcaf05 upstream.
+
+When perf_data__create_dir() fails, it calls close_dir(), but
+perf_session__delete() also calls close_dir() and since dir.version and
+dir.nr were initialized by perf_data__create_dir(), a double free occurs.
+
+This patch moves the initialization of dir.version and dir.nr after
+successful initialization of dir.files, that prevents double freeing.
+This behavior is already implemented in perf_data__open_dir().
+
+Fixes: 145520631130bd64 ("perf data: Add perf_data__(create_dir|close_dir) functions")
+Signed-off-by: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Antonov <alexander.antonov@linux.intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Alexei Budankov <abudankov@huawei.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20220218152341.5197-2-alexey.v.bayduraev@linux.intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/perf/util/data.c |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/tools/perf/util/data.c
++++ b/tools/perf/util/data.c
+@@ -44,10 +44,6 @@ int perf_data__create_dir(struct perf_da
+       if (!files)
+               return -ENOMEM;
+-      data->dir.version = PERF_DIR_VERSION;
+-      data->dir.files   = files;
+-      data->dir.nr      = nr;
+-
+       for (i = 0; i < nr; i++) {
+               struct perf_data_file *file = &files[i];
+@@ -62,6 +58,9 @@ int perf_data__create_dir(struct perf_da
+               file->fd = ret;
+       }
++      data->dir.version = PERF_DIR_VERSION;
++      data->dir.files   = files;
++      data->dir.nr      = nr;
+       return 0;
+ out_err:
diff --git a/queue-5.4/ping-remove-pr_err-from-ping_lookup.patch b/queue-5.4/ping-remove-pr_err-from-ping_lookup.patch
new file mode 100644 (file)
index 0000000..4eb9525
--- /dev/null
@@ -0,0 +1,33 @@
+From cd33bdcbead882c2e58fdb4a54a7bd75b610a452 Mon Sep 17 00:00:00 2001
+From: Xin Long <lucien.xin@gmail.com>
+Date: Wed, 23 Feb 2022 22:41:08 -0500
+Subject: ping: remove pr_err from ping_lookup
+
+From: Xin Long <lucien.xin@gmail.com>
+
+commit cd33bdcbead882c2e58fdb4a54a7bd75b610a452 upstream.
+
+As Jakub noticed, prints should be avoided on the datapath.
+Also, as packets would never come to the else branch in
+ping_lookup(), remove pr_err() from ping_lookup().
+
+Fixes: 35a79e64de29 ("ping: fix the dif and sdif check in ping_lookup")
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Link: https://lore.kernel.org/r/1ef3f2fcd31bd681a193b1fcf235eee1603819bd.1645674068.git.lucien.xin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ping.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -187,7 +187,6 @@ static struct sock *ping_lookup(struct n
+                        (int)ident, &ipv6_hdr(skb)->daddr, dif);
+ #endif
+       } else {
+-              pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol));
+               return NULL;
+       }
index 5ed0b08b8dd19c67b4408784aefcc7ebec545514..19d38c32a882c4c77756903096bf188479a2b402 100644 (file)
@@ -11,3 +11,16 @@ x86-fpu-correct-pkru-xstate-inconsistency.patch
 tee-export-teedev_open-and-teedev_close_context.patch
 optee-use-driver-internal-tee_context-for-some-rpc.patch
 lan743x-fix-deadlock-in-lan743x_phy_link_status_change.patch
+ping-remove-pr_err-from-ping_lookup.patch
+perf-data-fix-double-free-in-perf_session__delete.patch
+bpf-do-not-try-bpf_msg_push_data-with-len-0.patch
+net-__pskb_pull_tail-pskb_carve_frag_list-drop_monitor-friends.patch
+tipc-fix-end-of-loop-tests-for-list_for_each_entry.patch
+gso-do-not-skip-outer-ip-header-in-case-of-ipip-and-net_failover.patch
+openvswitch-fix-setting-ipv6-fields-causing-hw-csum-failure.patch
+drm-edid-always-set-rgb444.patch
+net-mlx5e-fix-wrong-return-value-on-ioctl-eeprom-query-failure.patch
+net-ll_temac-check-the-return-value-of-devm_kmalloc.patch
+net-force-inlining-of-checksum-functions-in-net-checksum.h.patch
+nfp-flower-fix-a-potential-leak-in-nfp_tunnel_add_shared_mac.patch
+netfilter-nf_tables-fix-memory-leak-during-stateful-obj-update.patch
diff --git a/queue-5.4/tipc-fix-end-of-loop-tests-for-list_for_each_entry.patch b/queue-5.4/tipc-fix-end-of-loop-tests-for-list_for_each_entry.patch
new file mode 100644 (file)
index 0000000..0a3c3a1
--- /dev/null
@@ -0,0 +1,47 @@
+From a1f8fec4dac8bc7b172b2bdbd881e015261a6322 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Tue, 22 Feb 2022 16:43:12 +0300
+Subject: tipc: Fix end of loop tests for list_for_each_entry()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit a1f8fec4dac8bc7b172b2bdbd881e015261a6322 upstream.
+
+These tests are supposed to check if the loop exited via a break or not.
+However the tests are wrong because if we did not exit via a break then
+"p" is not a valid pointer.  In that case, it's the equivalent of
+"if (*(u32 *)sr == *last_key) {".  That's going to work most of the time,
+but there is a potential for those to be equal.
+
+Fixes: 1593123a6a49 ("tipc: add name table dump to new netlink api")
+Fixes: 1a1a143daf84 ("tipc: add publication dump to new netlink api")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/name_table.c |    2 +-
+ net/tipc/socket.c     |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/tipc/name_table.c
++++ b/net/tipc/name_table.c
+@@ -812,7 +812,7 @@ static int __tipc_nl_add_nametable_publ(
+               list_for_each_entry(p, &sr->all_publ, all_publ)
+                       if (p->key == *last_key)
+                               break;
+-              if (p->key != *last_key)
++              if (list_entry_is_head(p, &sr->all_publ, all_publ))
+                       return -EPIPE;
+       } else {
+               p = list_first_entry(&sr->all_publ,
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -3590,7 +3590,7 @@ static int __tipc_nl_list_sk_publ(struct
+                       if (p->key == *last_publ)
+                               break;
+               }
+-              if (p->key != *last_publ) {
++              if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
+                       /* We never set seq or call nl_dump_check_consistent()
+                        * this means that setting prev_seq here will cause the
+                        * consistence check to fail in the netlink callback