--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Willem de Bruijn <willemb@google.com>
+Date: Sat, 9 Jan 2021 17:18:34 -0500
+Subject: esp: avoid unneeded kmap_atomic call
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit 9bd6b629c39e3fa9e14243a6d8820492be1a5b2e ]
+
+esp(6)_output_head uses skb_page_frag_refill to allocate a buffer for
+the esp trailer.
+
+It accesses the page with kmap_atomic to handle highmem. But
+skb_page_frag_refill can return compound pages, of which
+kmap_atomic only maps the first underlying page.
+
+skb_page_frag_refill does not return highmem, because flag
+__GFP_HIGHMEM is not set. ESP uses it in the same manner as TCP.
+That also does not call kmap_atomic, but directly uses page_address,
+in skb_copy_to_page_nocache. Do the same for ESP.
+
+This issue has become easier to trigger with recent kmap local
+debugging feature CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP.
+
+Fixes: cac2661c53f3 ("esp4: Avoid skb_cow_data whenever possible")
+Fixes: 03e2a30f6a27 ("esp6: Avoid skb_cow_data whenever possible")
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/esp4.c | 7 +------
+ net/ipv6/esp6.c | 7 +------
+ 2 files changed, 2 insertions(+), 12 deletions(-)
+
+--- a/net/ipv4/esp4.c
++++ b/net/ipv4/esp4.c
+@@ -272,7 +272,6 @@ static int esp_output_udp_encap(struct x
+ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
+ {
+ u8 *tail;
+- u8 *vaddr;
+ int nfrags;
+ int esph_offset;
+ struct page *page;
+@@ -314,14 +313,10 @@ int esp_output_head(struct xfrm_state *x
+ page = pfrag->page;
+ get_page(page);
+
+- vaddr = kmap_atomic(page);
+-
+- tail = vaddr + pfrag->offset;
++ tail = page_address(page) + pfrag->offset;
+
+ esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
+
+- kunmap_atomic(vaddr);
+-
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
+--- a/net/ipv6/esp6.c
++++ b/net/ipv6/esp6.c
+@@ -226,7 +226,6 @@ static void esp_output_fill_trailer(u8 *
+ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
+ {
+ u8 *tail;
+- u8 *vaddr;
+ int nfrags;
+ struct page *page;
+ struct sk_buff *trailer;
+@@ -259,14 +258,10 @@ int esp6_output_head(struct xfrm_state *
+ page = pfrag->page;
+ get_page(page);
+
+- vaddr = kmap_atomic(page);
+-
+- tail = vaddr + pfrag->offset;
++ tail = page_address(page) + pfrag->offset;
+
+ esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
+
+- kunmap_atomic(vaddr);
+-
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Vadim Pasternak <vadimp@nvidia.com>
+Date: Fri, 8 Jan 2021 16:52:09 +0200
+Subject: mlxsw: core: Add validation of transceiver temperature thresholds
+
+From: Vadim Pasternak <vadimp@nvidia.com>
+
+[ Upstream commit 57726ebe2733891c9f59105eff028735f73d05fb ]
+
+Validate thresholds to avoid a single failure due to some transceiver
+unreliability. Ignore the last readouts in case warning temperature is
+above alarm temperature, since it can cause unexpected thermal
+shutdown. Stay with the previous values and refresh threshold within
+the next iteration.
+
+This is a rare scenario, but it was observed at a customer site.
+
+Fixes: 6a79507cfe94 ("mlxsw: core: Extend thermal module with per QSFP module thermal zones")
+Signed-off-by: Vadim Pasternak <vadimp@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+@@ -177,6 +177,12 @@ mlxsw_thermal_module_trips_update(struct
+ if (err)
+ return err;
+
++ if (crit_temp > emerg_temp) {
++ dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n",
++ tz->tzdev->type, crit_temp, emerg_temp);
++ return 0;
++ }
++
+ /* According to the system thermal requirements, the thermal zones are
+ * defined with four trip points. The critical and emergency
+ * temperature thresholds, provided by QSFP module are set as "active"
+@@ -191,11 +197,8 @@ mlxsw_thermal_module_trips_update(struct
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
+- if (emerg_temp > crit_temp)
+- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
++ tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp +
+ MLXSW_THERMAL_MODULE_TEMP_SHIFT;
+- else
+- tz->trips[MLXSW_THERMAL_TEMP_TRIP_CRIT].temp = emerg_temp;
+
+ return 0;
+ }
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Vadim Pasternak <vadimp@nvidia.com>
+Date: Fri, 8 Jan 2021 16:52:10 +0200
+Subject: mlxsw: core: Increase critical threshold for ASIC thermal zone
+
+From: Vadim Pasternak <vadimp@nvidia.com>
+
+[ Upstream commit b06ca3d5a43ca2dd806f7688a17e8e7e0619a80a ]
+
+Increase critical threshold for ASIC thermal zone from 110C to 140C
+according to the system hardware requirements. All the supported ASICs
+(Spectrum-1, Spectrum-2, Spectrum-3) could be still operational with ASIC
+temperature below 140C. With the old critical threshold value system
+can perform unjustified shutdown.
+
+All the systems equipped with the above ASICs implement thermal
+protection mechanism at firmware level and firmware could decide to
+perform system thermal shutdown in case the temperature is below 140C.
+So with the new threshold system will not meltdown, while thermal
+operating range will be aligned with hardware abilities.
+
+Fixes: 41e760841d26 ("mlxsw: core: Replace thermal temperature trips with defines")
+Fixes: a50c1e35650b ("mlxsw: core: Implement thermal zone")
+Signed-off-by: Vadim Pasternak <vadimp@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/core_thermal.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+@@ -19,7 +19,7 @@
+ #define MLXSW_THERMAL_ASIC_TEMP_NORM 75000 /* 75C */
+ #define MLXSW_THERMAL_ASIC_TEMP_HIGH 85000 /* 85C */
+ #define MLXSW_THERMAL_ASIC_TEMP_HOT 105000 /* 105C */
+-#define MLXSW_THERMAL_ASIC_TEMP_CRIT 110000 /* 110C */
++#define MLXSW_THERMAL_ASIC_TEMP_CRIT 140000 /* 140C */
+ #define MLXSW_THERMAL_HYSTERESIS_TEMP 5000 /* 5C */
+ #define MLXSW_THERMAL_MODULE_TEMP_SHIFT (MLXSW_THERMAL_HYSTERESIS_TEMP * 2)
+ #define MLXSW_THERMAL_ZONE_MAX_NAME 16
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 13 Jan 2021 08:18:19 -0800
+Subject: net: avoid 32 x truesize under-estimation for tiny skbs
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 3226b158e67cfaa677fd180152bfb28989cb2fac ]
+
+Both virtio net and napi_get_frags() allocate skbs
+with a very small skb->head
+
+While using page fragments instead of a kmalloc backed skb->head might give
+a small performance improvement in some cases, there is a huge risk of
+under estimating memory usage.
+
+For both GOOD_COPY_LEN and GRO_MAX_HEAD, we can fit at least 32 allocations
+per page (order-3 page in x86), or even 64 on PowerPC
+
+We have been tracking OOM issues on GKE hosts hitting tcp_mem limits
+but consuming far more memory for TCP buffers than instructed in tcp_mem[2]
+
+Even if we force napi_alloc_skb() to only use order-0 pages, the issue
+would still be there on arches with PAGE_SIZE >= 32768
+
+This patch makes sure that small skb head are kmalloc backed, so that
+other objects in the slab page can be reused instead of being held as long
+as skbs are sitting in socket queues.
+
+Note that we might in the future use the sk_buff napi cache,
+instead of going through a more expensive __alloc_skb()
+
+Another idea would be to use separate page sizes depending
+on the allocated length (to never have more than 4 frags per page)
+
+I would like to thank Greg Thelen for his precious help on this matter,
+analysing crash dumps is always a time consuming task.
+
+Fixes: fd11a83dd363 ("net: Pull out core bits of __netdev_alloc_skb and add __napi_alloc_skb")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Cc: Greg Thelen <gthelen@google.com>
+Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Link: https://lore.kernel.org/r/20210113161819.1155526-1-eric.dumazet@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -496,13 +496,17 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
+ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
+ gfp_t gfp_mask)
+ {
+- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
++ struct napi_alloc_cache *nc;
+ struct sk_buff *skb;
+ void *data;
+
+ len += NET_SKB_PAD + NET_IP_ALIGN;
+
+- if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
++ /* If requested length is either too small or too big,
++ * we use kmalloc() for skb->head allocation.
++ */
++ if (len <= SKB_WITH_OVERHEAD(1024) ||
++ len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
+ (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
+ skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
+ if (!skb)
+@@ -510,6 +514,7 @@ struct sk_buff *__napi_alloc_skb(struct
+ goto skb_success;
+ }
+
++ nc = this_cpu_ptr(&napi_alloc_cache);
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ len = SKB_DATA_ALIGN(len);
+
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Petr Machata <petrm@nvidia.com>
+Date: Mon, 11 Jan 2021 18:07:07 +0100
+Subject: net: dcb: Accept RTM_GETDCB messages carrying set-like DCB commands
+
+From: Petr Machata <petrm@nvidia.com>
+
+[ Upstream commit df85bc140a4d6cbaa78d8e9c35154e1a2f0622c7 ]
+
+In commit 826f328e2b7e ("net: dcb: Validate netlink message in DCB
+handler"), Linux started rejecting RTM_GETDCB netlink messages if they
+contained a set-like DCB_CMD_ command.
+
+The reason was that privileges were only verified for RTM_SETDCB messages,
+but the value that determined the action to be taken is the command, not
+the message type. And validation of message type against the DCB command
+was the obvious missing piece.
+
+Unfortunately it turns out that mlnx_qos, a somewhat widely deployed tool
+for configuration of DCB, accesses the DCB set-like APIs through
+RTM_GETDCB.
+
+Therefore do not bounce the discrepancy between message type and command.
+Instead, in addition to validating privileges based on the actual message
+type, validate them also based on the expected message type. This closes
+the loophole of allowing DCB configuration on non-admin accounts, while
+maintaining backward compatibility.
+
+Fixes: 2f90b8657ec9 ("ixgbe: this patch adds support for DCB to the kernel and ixgbe driver")
+Fixes: 826f328e2b7e ("net: dcb: Validate netlink message in DCB handler")
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Link: https://lore.kernel.org/r/a3edcfda0825f2aa2591801c5232f2bbf2d8a554.1610384801.git.me@pmachata.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dcb/dcbnl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/dcb/dcbnl.c
++++ b/net/dcb/dcbnl.c
+@@ -1765,7 +1765,7 @@ static int dcb_doit(struct sk_buff *skb,
+ fn = &reply_funcs[dcb->cmd];
+ if (!fn->cb)
+ return -EOPNOTSUPP;
+- if (fn->type != nlh->nlmsg_type)
++ if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!tb[DCB_ATTR_IFNAME])
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Petr Machata <me@pmachata.org>
+Date: Tue, 22 Dec 2020 22:49:44 +0100
+Subject: net: dcb: Validate netlink message in DCB handler
+
+From: Petr Machata <me@pmachata.org>
+
+[ Upstream commit 826f328e2b7e8854dd42ea44e6519cd75018e7b1 ]
+
+DCB uses the same handler function for both RTM_GETDCB and RTM_SETDCB
+messages. dcb_doit() bounces RTM_SETDCB mesasges if the user does not have
+the CAP_NET_ADMIN capability.
+
+However, the operation to be performed is not decided from the DCB message
+type, but from the DCB command. Thus DCB_CMD_*_GET commands are used for
+reading DCB objects, the corresponding SET and DEL commands are used for
+manipulation.
+
+The assumption is that set-like commands will be sent via an RTM_SETDCB
+message, and get-like ones via RTM_GETDCB. However, this assumption is not
+enforced.
+
+It is therefore possible to manipulate DCB objects without CAP_NET_ADMIN
+capability by sending the corresponding command in an RTM_GETDCB message.
+That is a bug. Fix it by validating the type of the request message against
+the type used for the response.
+
+Fixes: 2f90b8657ec9 ("ixgbe: this patch adds support for DCB to the kernel and ixgbe driver")
+Signed-off-by: Petr Machata <me@pmachata.org>
+Link: https://lore.kernel.org/r/a2a9b88418f3a58ef211b718f2970128ef9e3793.1608673640.git.me@pmachata.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dcb/dcbnl.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/dcb/dcbnl.c
++++ b/net/dcb/dcbnl.c
+@@ -1765,6 +1765,8 @@ static int dcb_doit(struct sk_buff *skb,
+ fn = &reply_funcs[dcb->cmd];
+ if (!fn->cb)
+ return -EOPNOTSUPP;
++ if (fn->type != nlh->nlmsg_type)
++ return -EPERM;
+
+ if (!tb[DCB_ATTR_IFNAME])
+ return -EINVAL;
--- /dev/null
+From dcfea72e79b0aa7a057c8f6024169d86a1bbc84b Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Wed, 8 Jan 2020 16:59:02 -0500
+Subject: net: introduce skb_list_walk_safe for skb segment walking
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit dcfea72e79b0aa7a057c8f6024169d86a1bbc84b upstream.
+
+As part of the continual effort to remove direct usage of skb->next and
+skb->prev, this patch adds a helper for iterating through the
+singly-linked variant of skb lists, which are used for lists of GSO
+packet. The name "skb_list_..." has been chosen to match the existing
+function, "kfree_skb_list, which also operates on these singly-linked
+lists, and the "..._walk_safe" part is the same idiom as elsewhere in
+the kernel.
+
+This patch removes the helper from wireguard and puts it into
+linux/skbuff.h, while making it a bit more robust for general usage. In
+particular, parenthesis are added around the macro argument usage, and it
+now accounts for trying to iterate through an already-null skb pointer,
+which will simply run the iteration zero times. This latter enhancement
+means it can be used to replace both do { ... } while and while (...)
+open-coded idioms.
+
+This should take care of these three possible usages, which match all
+current methods of iterations.
+
+skb_list_walk_safe(segs, skb, next) { ... }
+skb_list_walk_safe(skb, skb, next) { ... }
+skb_list_walk_safe(segs, skb, segs) { ... }
+
+Gcc appears to generate efficient code for each of these.
+
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+[ Just the skbuff.h changes for backporting - gregkh]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1480,6 +1480,11 @@ static inline void skb_mark_not_on_list(
+ skb->next = NULL;
+ }
+
++/* Iterate through singly-linked GSO fragments of an skb. */
++#define skb_list_walk_safe(first, skb, next) \
++ for ((skb) = (first), (next) = (skb) ? (skb)->next : NULL; (skb); \
++ (skb) = (next), (next) = (skb) ? (skb)->next : NULL)
++
+ static inline void skb_list_del_init(struct sk_buff *skb)
+ {
+ __list_del_entry(&skb->list);
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Aya Levin <ayal@nvidia.com>
+Date: Thu, 7 Jan 2021 15:50:18 +0200
+Subject: net: ipv6: Validate GSO SKB before finish IPv6 processing
+
+From: Aya Levin <ayal@nvidia.com>
+
+[ Upstream commit b210de4f8c97d57de051e805686248ec4c6cfc52 ]
+
+There are cases where GSO segment's length exceeds the egress MTU:
+ - Forwarding of a TCP GRO skb, when DF flag is not set.
+ - Forwarding of an skb that arrived on a virtualisation interface
+ (virtio-net/vhost/tap) with TSO/GSO size set by other network
+ stack.
+ - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
+ interface with a smaller MTU.
+ - Arriving GRO skb (or GSO skb in a virtualised environment) that is
+ bridged to a NETIF_F_TSO tunnel stacked over an interface with an
+ insufficient MTU.
+
+If so:
+ - Consume the SKB and its segments.
+ - Issue an ICMP packet with 'Packet Too Big' message containing the
+ MTU, allowing the source host to reduce its Path MTU appropriately.
+
+Note: These cases are handled in the same manner in IPv4 output finish.
+This patch aligns the behavior of IPv6 and the one of IPv4.
+
+Fixes: 9e50849054a4 ("netfilter: ipv6: move POSTROUTING invocation before fragmentation")
+Signed-off-by: Aya Levin <ayal@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://lore.kernel.org/r/1610027418-30438-1-git-send-email-ayal@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_output.c | 41 ++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 40 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -124,8 +124,43 @@ static int ip6_finish_output2(struct net
+ return -EINVAL;
+ }
+
++static int
++ip6_finish_output_gso_slowpath_drop(struct net *net, struct sock *sk,
++ struct sk_buff *skb, unsigned int mtu)
++{
++ struct sk_buff *segs, *nskb;
++ netdev_features_t features;
++ int ret = 0;
++
++ /* Please see corresponding comment in ip_finish_output_gso
++ * describing the cases where GSO segment length exceeds the
++ * egress MTU.
++ */
++ features = netif_skb_features(skb);
++ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
++ if (IS_ERR_OR_NULL(segs)) {
++ kfree_skb(skb);
++ return -ENOMEM;
++ }
++
++ consume_skb(skb);
++
++ skb_list_walk_safe(segs, segs, nskb) {
++ int err;
++
++ skb_mark_not_on_list(segs);
++ err = ip6_fragment(net, sk, segs, ip6_finish_output2);
++ if (err && ret == 0)
++ ret = err;
++ }
++
++ return ret;
++}
++
+ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+ {
++ unsigned int mtu;
++
+ #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
+ /* Policy lookup after SNAT yielded a new policy */
+ if (skb_dst(skb)->xfrm) {
+@@ -134,7 +169,11 @@ static int __ip6_finish_output(struct ne
+ }
+ #endif
+
+- if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
++ mtu = ip6_skb_dst_mtu(skb);
++ if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))
++ return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu);
++
++ if ((skb->len > mtu && !skb_is_gso(skb)) ||
+ dst_allfrag(skb_dst(skb)) ||
+ (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
+ return ip6_fragment(net, sk, skb, ip6_finish_output2);
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Stefan Chulski <stefanc@marvell.com>
+Date: Sun, 10 Jan 2021 21:23:02 +0200
+Subject: net: mvpp2: Remove Pause and Asym_Pause support
+
+From: Stefan Chulski <stefanc@marvell.com>
+
+[ Upstream commit 6f83802a1a06e74eafbdbc9b52c05516d3083d02 ]
+
+Packet Processor hardware not connected to MAC flow control unit and
+cannot support TX flow control.
+This patch disable flow control support.
+
+Fixes: 3f518509dedc ("ethernet: Add new driver for Marvell Armada 375 network unit")
+Signed-off-by: Stefan Chulski <stefanc@marvell.com>
+Acked-by: Marcin Wojtas <mw@semihalf.com>
+Link: https://lore.kernel.org/r/1610306582-16641-1-git-send-email-stefanc@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -4790,8 +4790,6 @@ static void mvpp2_phylink_validate(struc
+
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+- phylink_set(mask, Pause);
+- phylink_set(mask, Asym_Pause);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GKR:
--- /dev/null
+From foo@baz Fri Jan 22 12:59:43 PM CET 2021
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Fri, 22 Jan 2021 01:08:31 +0100
+Subject: net, sctp, filter: remap copy_from_user failure error
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ no upstream commit ]
+
+Fix a potential kernel address leakage for the prerequisite where there is
+a BPF program attached to the cgroup/setsockopt hook. The latter can only
+be attached under root, however, if the attached program returns 1 to then
+run the related kernel handler, an unprivileged program could probe for
+kernel addresses that way. The reason this is possible is that we're under
+set_fs(KERNEL_DS) when running the kernel setsockopt handler. Aside from
+old cBPF there is also SCTP's struct sctp_getaddrs_old which contains
+pointers in the uapi struct that further need copy_from_user() inside the
+handler. In the normal case this would just return -EFAULT, but under a
+temporary KERNEL_DS setting the memory would be copied and we'd end up at
+a different error code, that is, -EINVAL, for both cases given subsequent
+validations fail, which then allows the app to distinguish and make use of
+this fact for probing the address space. In case of later kernel versions
+this issue won't work anymore thanks to Christoph Hellwig's work that got
+rid of the various temporary set_fs() address space overrides altogether.
+One potential option for 5.4 as the only affected stable kernel with the
+least complexity would be to remap those affected -EFAULT copy_from_user()
+error codes with -EINVAL such that they cannot be probed anymore. Risk of
+breakage should be rather low for this particular error case.
+
+Fixes: 0d01da6afc54 ("bpf: implement getsockopt and setsockopt hooks")
+Reported-by: Ryota Shiga (Flatt Security)
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Stanislav Fomichev <sdf@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/filter.c | 2 +-
+ net/sctp/socket.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1475,7 +1475,7 @@ struct bpf_prog *__get_filter(struct soc
+
+ if (copy_from_user(prog->insns, fprog->filter, fsize)) {
+ __bpf_prog_free(prog);
+- return ERR_PTR(-EFAULT);
++ return ERR_PTR(-EINVAL);
+ }
+
+ prog->len = fprog->len;
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1319,7 +1319,7 @@ static int __sctp_setsockopt_connectx(st
+
+ kaddrs = memdup_user(addrs, addrs_size);
+ if (IS_ERR(kaddrs))
+- return PTR_ERR(kaddrs);
++ return PTR_ERR(kaddrs) == -EFAULT ? -EINVAL : PTR_ERR(kaddrs);
+
+ /* Allow security module to validate connectx addresses. */
+ err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_CONNECTX,
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Jakub Kicinski <kuba@kernel.org>
+Date: Wed, 13 Jan 2021 17:29:47 -0800
+Subject: net: sit: unregister_netdevice on newlink's error path
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 47e4bb147a96f1c9b4e7691e7e994e53838bfff8 ]
+
+We need to unregister the netdevice if config failed.
+.ndo_uninit takes care of most of the heavy lifting.
+
+This was uncovered by recent commit c269a24ce057 ("net: make
+free_netdev() more lenient with unregistering devices").
+Previously the partially-initialized device would be left
+in the system.
+
+Reported-and-tested-by: syzbot+2393580080a2da190f04@syzkaller.appspotmail.com
+Fixes: e2f1f072db8d ("sit: allow to configure 6rd tunnels via netlink")
+Acked-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Link: https://lore.kernel.org/r/20210114012947.2515313-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/sit.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1597,8 +1597,11 @@ static int ipip6_newlink(struct net *src
+ }
+
+ #ifdef CONFIG_IPV6_SIT_6RD
+- if (ipip6_netlink_6rd_parms(data, &ip6rd))
++ if (ipip6_netlink_6rd_parms(data, &ip6rd)) {
+ err = ipip6_tunnel_update_6rd(nt, &ip6rd);
++ if (err < 0)
++ unregister_netdevice_queue(dev, NULL);
++ }
+ #endif
+
+ return err;
--- /dev/null
+From 5eee7bd7e245914e4e050c413dfe864e31805207 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Mon, 13 Jan 2020 18:42:26 -0500
+Subject: net: skbuff: disambiguate argument and member for skb_list_walk_safe helper
+
+From: Jason A. Donenfeld <Jason@zx2c4.com>
+
+commit 5eee7bd7e245914e4e050c413dfe864e31805207 upstream.
+
+This worked before, because we made all callers name their next pointer
+"next". But in trying to be more "drop-in" ready, the silliness here is
+revealed. This commit fixes the problem by making the macro argument and
+the member use different names.
+
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1481,9 +1481,9 @@ static inline void skb_mark_not_on_list(
+ }
+
+ /* Iterate through singly-linked GSO fragments of an skb. */
+-#define skb_list_walk_safe(first, skb, next) \
+- for ((skb) = (first), (next) = (skb) ? (skb)->next : NULL; (skb); \
+- (skb) = (next), (next) = (skb) ? (skb)->next : NULL)
++#define skb_list_walk_safe(first, skb, next_skb) \
++ for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
++ (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
+
+ static inline void skb_list_del_init(struct sk_buff *skb)
+ {
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: David Wu <david.wu@rock-chips.com>
+Date: Wed, 13 Jan 2021 11:41:09 +0800
+Subject: net: stmmac: Fixed mtu channged by cache aligned
+
+From: David Wu <david.wu@rock-chips.com>
+
+[ Upstream commit 5b55299eed78538cc4746e50ee97103a1643249c ]
+
+Since the original mtu is not used when the mtu is updated,
+the mtu is aligned with cache, this will get an incorrect.
+For example, if you want to configure the mtu to be 1500,
+but mtu 1536 is configured in fact.
+
+Fixed: eaf4fac478077 ("net: stmmac: Do not accept invalid MTU values")
+Signed-off-by: David Wu <david.wu@rock-chips.com>
+Link: https://lore.kernel.org/r/20210113034109.27865-1-david.wu@rock-chips.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -3739,6 +3739,7 @@ static int stmmac_change_mtu(struct net_
+ {
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int txfifosz = priv->plat->tx_fifo_size;
++ const int mtu = new_mtu;
+
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+@@ -3756,7 +3757,7 @@ static int stmmac_change_mtu(struct net_
+ if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
+ return -EINVAL;
+
+- dev->mtu = new_mtu;
++ dev->mtu = mtu;
+
+ netdev_update_features(dev);
+
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Manish Chopra <manishc@marvell.com>
+Date: Thu, 7 Jan 2021 02:15:20 -0800
+Subject: netxen_nic: fix MSI/MSI-x interrupts
+
+From: Manish Chopra <manishc@marvell.com>
+
+[ Upstream commit a2bc221b972db91e4be1970e776e98f16aa87904 ]
+
+For all PCI functions on the netxen_nic adapter, interrupt
+mode (INTx or MSI) configuration is dependent on what has
+been configured by the PCI function zero in the shared
+interrupt register, as these adapters do not support mixed
+mode interrupts among the functions of a given adapter.
+
+Logic for setting MSI/MSI-x interrupt mode in the shared interrupt
+register based on PCI function id zero check is not appropriate for
+all family of netxen adapters, as for some of the netxen family
+adapters PCI function zero is not really meant to be probed/loaded
+in the host but rather just act as a management function on the device,
+which caused all the other PCI functions on the adapter to always use
+legacy interrupt (INTx) mode instead of choosing MSI/MSI-x interrupt mode.
+
+This patch replaces that check with port number so that for all
+type of adapters driver attempts for MSI/MSI-x interrupt modes.
+
+Fixes: b37eb210c076 ("netxen_nic: Avoid mixed mode interrupts")
+Signed-off-by: Manish Chopra <manishc@marvell.com>
+Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
+Link: https://lore.kernel.org/r/20210107101520.6735-1-manishc@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -564,11 +564,6 @@ static const struct net_device_ops netxe
+ .ndo_set_features = netxen_set_features,
+ };
+
+-static inline bool netxen_function_zero(struct pci_dev *pdev)
+-{
+- return (PCI_FUNC(pdev->devfn) == 0) ? true : false;
+-}
+-
+ static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
+ u32 mode)
+ {
+@@ -664,7 +659,7 @@ static int netxen_setup_intr(struct netx
+ netxen_initialize_interrupt_registers(adapter);
+ netxen_set_msix_bit(pdev, 0);
+
+- if (netxen_function_zero(pdev)) {
++ if (adapter->portnum == 0) {
+ if (!netxen_setup_msi_interrupts(adapter, num_msix))
+ netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
+ else
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
+Date: Fri, 8 Jan 2021 09:58:39 +0000
+Subject: rndis_host: set proper input size for OID_GEN_PHYSICAL_MEDIUM request
+
+From: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
+
+[ Upstream commit e56b3d94d939f52d46209b9e1b6700c5bfff3123 ]
+
+MSFT ActiveSync implementation requires that the size of the response for
+incoming query is to be provided in the request input length. Failure to
+set the input size proper results in failed request transfer, where the
+ActiveSync counterpart reports the NDIS_STATUS_INVALID_LENGTH (0xC0010014L)
+error.
+
+Set the input size for OID_GEN_PHYSICAL_MEDIUM query to the expected size
+of the response in order for the ActiveSync to properly respond to the
+request.
+
+Fixes: 039ee17d1baa ("rndis_host: Add RNDIS physical medium checking into generic_rndis_bind()")
+Signed-off-by: Andrey Zhizhikin <andrey.zhizhikin@leica-geosystems.com>
+Link: https://lore.kernel.org/r/20210108095839.3335-1-andrey.zhizhikin@leica-geosystems.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/rndis_host.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/usb/rndis_host.c
++++ b/drivers/net/usb/rndis_host.c
+@@ -387,7 +387,7 @@ generic_rndis_bind(struct usbnet *dev, s
+ reply_len = sizeof *phym;
+ retval = rndis_query(dev, intf, u.buf,
+ RNDIS_OID_GEN_PHYSICAL_MEDIUM,
+- 0, (void **) &phym, &reply_len);
++ reply_len, (void **)&phym, &reply_len);
+ if (retval != 0 || !phym) {
+ /* OID is optional so don't fail here. */
+ phym_unspec = cpu_to_le32(RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED);
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Baptiste Lepers <baptiste.lepers@gmail.com>
+Date: Tue, 12 Jan 2021 15:59:15 +0000
+Subject: rxrpc: Call state should be read with READ_ONCE() under some circumstances
+
+From: Baptiste Lepers <baptiste.lepers@gmail.com>
+
+[ Upstream commit a95d25dd7b94a5ba18246da09b4218f132fed60e ]
+
+The call state may be changed at any time by the data-ready routine in
+response to received packets, so if the call state is to be read and acted
+upon several times in a function, READ_ONCE() must be used unless the call
+state lock is held.
+
+As it happens, we used READ_ONCE() to read the state a few lines above the
+unmarked read in rxrpc_input_data(), so use that value rather than
+re-reading it.
+
+Fixes: a158bdd3247b ("rxrpc: Fix call timeouts")
+Signed-off-by: Baptiste Lepers <baptiste.lepers@gmail.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://lore.kernel.org/r/161046715522.2450566.488819910256264150.stgit@warthog.procyon.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rxrpc/input.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -431,7 +431,7 @@ static void rxrpc_input_data(struct rxrp
+ return;
+ }
+
+- if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
++ if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ unsigned long timo = READ_ONCE(call->next_req_timo);
+ unsigned long now, expect_req_by;
+
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: David Howells <dhowells@redhat.com>
+Date: Tue, 12 Jan 2021 15:23:51 +0000
+Subject: rxrpc: Fix handling of an unsupported token type in rxrpc_read()
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit d52e419ac8b50c8bef41b398ed13528e75d7ad48 ]
+
+Clang static analysis reports the following:
+
+net/rxrpc/key.c:657:11: warning: Assigned value is garbage or undefined
+ toksize = toksizes[tok++];
+ ^ ~~~~~~~~~~~~~~~
+
+rxrpc_read() contains two consecutive loops. The first loop calculates the
+token sizes and stores the results in toksizes[] and the second one uses
+the array. When there is an error in identifying the token in the first
+loop, the token is skipped, no change is made to the toksizes[] array.
+When the same error happens in the second loop, the token is not skipped.
+This will cause the toksizes[] array to be out of step and will overrun
+past the calculated sizes.
+
+Fix this by making both loops log a message and return an error in this
+case. This should only happen if a new token type is incompletely
+implemented, so it should normally be impossible to trigger this.
+
+Fixes: 9a059cd5ca7d ("rxrpc: Downgrade the BUG() for unsupported token type in rxrpc_read()")
+Reported-by: Tom Rix <trix@redhat.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Tom Rix <trix@redhat.com>
+Link: https://lore.kernel.org/r/161046503122.2445787.16714129930607546635.stgit@warthog.procyon.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rxrpc/key.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/rxrpc/key.c
++++ b/net/rxrpc/key.c
+@@ -1110,7 +1110,7 @@ static long rxrpc_read(const struct key
+ default: /* we have a ticket we can't encode */
+ pr_err("Unsupported key token type (%u)\n",
+ token->security_index);
+- continue;
++ return -ENOPKG;
+ }
+
+ _debug("token[%u]: toksize=%u", ntoks, toksize);
+@@ -1225,7 +1225,9 @@ static long rxrpc_read(const struct key
+ break;
+
+ default:
+- break;
++ pr_err("Unsupported key token type (%u)\n",
++ token->security_index);
++ return -ENOPKG;
+ }
+
+ ASSERTCMP((unsigned long)xdr - (unsigned long)oldxdr, ==,
nfsd4-readdirplus-shouldn-t-return-parent-of-export.patch
bpf-don-t-leak-memory-in-bpf-getsockopt-when-optlen-0.patch
bpf-fix-helper-bpf_map_peek_elem_proto-pointing-to-wrong-callback.patch
+udp-prevent-reuseport_select_sock-from-reading-uninitialized-socks.patch
+netxen_nic-fix-msi-msi-x-interrupts.patch
+net-introduce-skb_list_walk_safe-for-skb-segment-walking.patch
+net-skbuff-disambiguate-argument-and-member-for-skb_list_walk_safe-helper.patch
+net-ipv6-validate-gso-skb-before-finish-ipv6-processing.patch
+mlxsw-core-add-validation-of-transceiver-temperature-thresholds.patch
+mlxsw-core-increase-critical-threshold-for-asic-thermal-zone.patch
+net-mvpp2-remove-pause-and-asym_pause-support.patch
+rndis_host-set-proper-input-size-for-oid_gen_physical_medium-request.patch
+esp-avoid-unneeded-kmap_atomic-call.patch
+net-dcb-validate-netlink-message-in-dcb-handler.patch
+net-dcb-accept-rtm_getdcb-messages-carrying-set-like-dcb-commands.patch
+rxrpc-call-state-should-be-read-with-read_once-under-some-circumstances.patch
+net-stmmac-fixed-mtu-channged-by-cache-aligned.patch
+net-sit-unregister_netdevice-on-newlink-s-error-path.patch
+net-avoid-32-x-truesize-under-estimation-for-tiny-skbs.patch
+rxrpc-fix-handling-of-an-unsupported-token-type-in-rxrpc_read.patch
+net-sctp-filter-remap-copy_from_user-failure-error.patch
+tipc-fix-null-deref-in-tipc_link_xmit.patch
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Hoang Le <hoang.h.le@dektech.com.au>
+Date: Fri, 8 Jan 2021 14:13:37 +0700
+Subject: tipc: fix NULL deref in tipc_link_xmit()
+
+From: Hoang Le <hoang.h.le@dektech.com.au>
+
+[ Upstream commit b77413446408fdd256599daf00d5be72b5f3e7c6 ]
+
+The buffer list can have zero skb as following path:
+tipc_named_node_up()->tipc_node_xmit()->tipc_link_xmit(), so
+we need to check the list before casting an &sk_buff.
+
+Fault report:
+ [] tipc: Bulk publication failure
+ [] general protection fault, probably for non-canonical [#1] PREEMPT [...]
+ [] KASAN: null-ptr-deref in range [0x00000000000000c8-0x00000000000000cf]
+ [] CPU: 0 PID: 0 Comm: swapper/0 Kdump: loaded Not tainted 5.10.0-rc4+ #2
+ [] Hardware name: Bochs ..., BIOS Bochs 01/01/2011
+ [] RIP: 0010:tipc_link_xmit+0xc1/0x2180
+ [] Code: 24 b8 00 00 00 00 4d 39 ec 4c 0f 44 e8 e8 d7 0a 10 f9 48 [...]
+ [] RSP: 0018:ffffc90000006ea0 EFLAGS: 00010202
+ [] RAX: dffffc0000000000 RBX: ffff8880224da000 RCX: 1ffff11003d3cc0d
+ [] RDX: 0000000000000019 RSI: ffffffff886007b9 RDI: 00000000000000c8
+ [] RBP: ffffc90000007018 R08: 0000000000000001 R09: fffff52000000ded
+ [] R10: 0000000000000003 R11: fffff52000000dec R12: ffffc90000007148
+ [] R13: 0000000000000000 R14: 0000000000000000 R15: ffffc90000007018
+ [] FS: 0000000000000000(0000) GS:ffff888037400000(0000) knlGS:000[...]
+ [] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [] CR2: 00007fffd2db5000 CR3: 000000002b08f000 CR4: 00000000000006f0
+
+Fixes: af9b028e270fd ("tipc: make media xmit call outside node spinlock context")
+Acked-by: Jon Maloy <jmaloy@redhat.com>
+Signed-off-by: Hoang Le <hoang.h.le@dektech.com.au>
+Link: https://lore.kernel.org/r/20210108071337.3598-1-hoang.h.le@dektech.com.au
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/link.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -939,9 +939,7 @@ void tipc_link_reset(struct tipc_link *l
+ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+ struct sk_buff_head *xmitq)
+ {
+- struct tipc_msg *hdr = buf_msg(skb_peek(list));
+ unsigned int maxwin = l->window;
+- int imp = msg_importance(hdr);
+ unsigned int mtu = l->mtu;
+ u16 ack = l->rcv_nxt - 1;
+ u16 seqno = l->snd_nxt;
+@@ -950,8 +948,14 @@ int tipc_link_xmit(struct tipc_link *l,
+ struct sk_buff_head *backlogq = &l->backlogq;
+ struct sk_buff *skb, *_skb, **tskb;
+ int pkt_cnt = skb_queue_len(list);
++ struct tipc_msg *hdr;
+ int rc = 0;
++ int imp;
+
++ if (pkt_cnt <= 0)
++ return 0;
++
++ hdr = buf_msg(skb_peek(list));
+ if (unlikely(msg_size(hdr) > mtu)) {
+ pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
+ skb_queue_len(list), msg_user(hdr),
+@@ -960,6 +964,7 @@ int tipc_link_xmit(struct tipc_link *l,
+ return -EMSGSIZE;
+ }
+
++ imp = msg_importance(hdr);
+ /* Allow oversubscription of one data msg per source at congestion */
+ if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
+ if (imp == TIPC_SYSTEM_IMPORTANCE) {
--- /dev/null
+From foo@baz Fri Jan 22 12:59:03 PM CET 2021
+From: Baptiste Lepers <baptiste.lepers@gmail.com>
+Date: Thu, 7 Jan 2021 16:11:10 +1100
+Subject: udp: Prevent reuseport_select_sock from reading uninitialized socks
+
+From: Baptiste Lepers <baptiste.lepers@gmail.com>
+
+[ Upstream commit fd2ddef043592e7de80af53f47fa46fd3573086e ]
+
+reuse->socks[] is modified concurrently by reuseport_add_sock. To
+prevent reading values that have not been fully initialized, only read
+the array up until the last known safe index instead of incorrectly
+re-reading the last index of the array.
+
+Fixes: acdcecc61285f ("udp: correct reuseport selection with connected sockets")
+Signed-off-by: Baptiste Lepers <baptiste.lepers@gmail.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Link: https://lore.kernel.org/r/20210107051110.12247-1-baptiste.lepers@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/sock_reuseport.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/sock_reuseport.c
++++ b/net/core/sock_reuseport.c
+@@ -302,7 +302,7 @@ select_by_hash:
+ i = j = reciprocal_scale(hash, socks);
+ while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
+ i++;
+- if (i >= reuse->num_socks)
++ if (i >= socks)
+ i = 0;
+ if (i == j)
+ goto out;