]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.19
authorSasha Levin <sashal@kernel.org>
Sun, 24 Sep 2023 19:27:46 +0000 (15:27 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 24 Sep 2023 19:27:46 +0000 (15:27 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.19/ipv4-fix-null-deref-in-ipv4_link_failure.patch [new file with mode: 0644]
queue-4.19/net-add-atomic_long_t-to-net_device_stats-fields.patch [new file with mode: 0644]
queue-4.19/net-bridge-use-dev_stats_inc.patch [new file with mode: 0644]
queue-4.19/net-hns3-add-5ms-delay-before-clear-firmware-reset-i.patch [new file with mode: 0644]
queue-4.19/netfilter-nf_tables-disallow-element-removal-on-anon.patch [new file with mode: 0644]
queue-4.19/powerpc-perf-hv-24x7-update-domain-value-check.patch [new file with mode: 0644]
queue-4.19/selftests-tls-add-to-avoid-static-checker-warning.patch [new file with mode: 0644]
queue-4.19/selftests-tls-swap-the-tx-and-rx-sockets-in-some-tes.patch [new file with mode: 0644]
queue-4.19/series
queue-4.19/team-fix-null-ptr-deref-when-team-device-type-is-cha.patch [new file with mode: 0644]

diff --git a/queue-4.19/ipv4-fix-null-deref-in-ipv4_link_failure.patch b/queue-4.19/ipv4-fix-null-deref-in-ipv4_link_failure.patch
new file mode 100644 (file)
index 0000000..d5a3e54
--- /dev/null
@@ -0,0 +1,53 @@
+From b6bff7bfc242b172f9d92c5ff41729afe2a7e73b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Sep 2023 22:12:57 -0700
+Subject: ipv4: fix null-deref in ipv4_link_failure
+
+From: Kyle Zeng <zengyhkyle@gmail.com>
+
+[ Upstream commit 0113d9c9d1ccc07f5a3710dac4aa24b6d711278c ]
+
+Currently, we assume the skb is associated with a device before calling
+__ip_options_compile, which is not always the case if it is re-routed by
+ipvs.
+When skb->dev is NULL, dev_net(skb->dev) will become null-dereference.
+This patch adds a check for the edge case and switch to use the net_device
+from the rtable when skb->dev is NULL.
+
+Fixes: ed0de45a1008 ("ipv4: recompile ip options in ipv4_link_failure")
+Suggested-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: Kyle Zeng <zengyhkyle@gmail.com>
+Cc: Stephen Suryaputra <ssuryaextr@gmail.com>
+Cc: Vadim Fedorenko <vfedorenko@novek.ru>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/route.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 57e2316529d00..9753d07bfc0bf 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1215,6 +1215,7 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+ static void ipv4_send_dest_unreach(struct sk_buff *skb)
+ {
++      struct net_device *dev;
+       struct ip_options opt;
+       int res;
+@@ -1232,7 +1233,8 @@ static void ipv4_send_dest_unreach(struct sk_buff *skb)
+               opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
+               rcu_read_lock();
+-              res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
++              dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
++              res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
+               rcu_read_unlock();
+               if (res)
+-- 
+2.40.1
+
diff --git a/queue-4.19/net-add-atomic_long_t-to-net_device_stats-fields.patch b/queue-4.19/net-add-atomic_long_t-to-net_device_stats-fields.patch
new file mode 100644 (file)
index 0000000..7506076
--- /dev/null
@@ -0,0 +1,166 @@
+From 461a34b0f29813a77a4449dad4ec53db1a2599f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Nov 2022 08:53:55 +0000
+Subject: net: add atomic_long_t to net_device_stats fields
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 6c1c5097781f563b70a81683ea6fdac21637573b ]
+
+Long standing KCSAN issues are caused by data-race around
+some dev->stats changes.
+
+Most performance critical paths already use per-cpu
+variables, or per-queue ones.
+
+It is reasonable (and more correct) to use atomic operations
+for the slow paths.
+
+This patch adds an union for each field of net_device_stats,
+so that we can convert paths that are not yet protected
+by a spinlock or a mutex.
+
+netdev_stats_to_stats64() no longer has an #if BITS_PER_LONG==64
+
+Note that the memcpy() we were using on 64bit arches
+had no provision to avoid load-tearing,
+while atomic_long_read() is providing the needed protection
+at no cost.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 44bdb313da57 ("net: bridge: use DEV_STATS_INC()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 58 +++++++++++++++++++++++----------------
+ include/net/dst.h         |  5 ++--
+ net/core/dev.c            | 14 ++--------
+ 3 files changed, 40 insertions(+), 37 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 7e9df3854420a..e977118111f61 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -162,31 +162,38 @@ static inline bool dev_xmit_complete(int rc)
+  *    (unsigned long) so they can be read and written atomically.
+  */
++#define NET_DEV_STAT(FIELD)                   \
++      union {                                 \
++              unsigned long FIELD;            \
++              atomic_long_t __##FIELD;        \
++      }
++
+ struct net_device_stats {
+-      unsigned long   rx_packets;
+-      unsigned long   tx_packets;
+-      unsigned long   rx_bytes;
+-      unsigned long   tx_bytes;
+-      unsigned long   rx_errors;
+-      unsigned long   tx_errors;
+-      unsigned long   rx_dropped;
+-      unsigned long   tx_dropped;
+-      unsigned long   multicast;
+-      unsigned long   collisions;
+-      unsigned long   rx_length_errors;
+-      unsigned long   rx_over_errors;
+-      unsigned long   rx_crc_errors;
+-      unsigned long   rx_frame_errors;
+-      unsigned long   rx_fifo_errors;
+-      unsigned long   rx_missed_errors;
+-      unsigned long   tx_aborted_errors;
+-      unsigned long   tx_carrier_errors;
+-      unsigned long   tx_fifo_errors;
+-      unsigned long   tx_heartbeat_errors;
+-      unsigned long   tx_window_errors;
+-      unsigned long   rx_compressed;
+-      unsigned long   tx_compressed;
++      NET_DEV_STAT(rx_packets);
++      NET_DEV_STAT(tx_packets);
++      NET_DEV_STAT(rx_bytes);
++      NET_DEV_STAT(tx_bytes);
++      NET_DEV_STAT(rx_errors);
++      NET_DEV_STAT(tx_errors);
++      NET_DEV_STAT(rx_dropped);
++      NET_DEV_STAT(tx_dropped);
++      NET_DEV_STAT(multicast);
++      NET_DEV_STAT(collisions);
++      NET_DEV_STAT(rx_length_errors);
++      NET_DEV_STAT(rx_over_errors);
++      NET_DEV_STAT(rx_crc_errors);
++      NET_DEV_STAT(rx_frame_errors);
++      NET_DEV_STAT(rx_fifo_errors);
++      NET_DEV_STAT(rx_missed_errors);
++      NET_DEV_STAT(tx_aborted_errors);
++      NET_DEV_STAT(tx_carrier_errors);
++      NET_DEV_STAT(tx_fifo_errors);
++      NET_DEV_STAT(tx_heartbeat_errors);
++      NET_DEV_STAT(tx_window_errors);
++      NET_DEV_STAT(rx_compressed);
++      NET_DEV_STAT(tx_compressed);
+ };
++#undef NET_DEV_STAT
+ #include <linux/cache.h>
+@@ -4842,4 +4849,9 @@ do {                                                             \
+ #define PTYPE_HASH_SIZE       (16)
+ #define PTYPE_HASH_MASK       (PTYPE_HASH_SIZE - 1)
++/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
++#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
++#define DEV_STATS_ADD(DEV, FIELD, VAL)        \
++              atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
++
+ #endif        /* _LINUX_NETDEVICE_H */
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 50258a8131377..97267997601f5 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -362,9 +362,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+                                struct net *net)
+ {
+-      /* TODO : stats should be SMP safe */
+-      dev->stats.rx_packets++;
+-      dev->stats.rx_bytes += skb->len;
++      DEV_STATS_INC(dev, rx_packets);
++      DEV_STATS_ADD(dev, rx_bytes, skb->len);
+       __skb_tunnel_rx(skb, dev, net);
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index a9c8660a2570f..3bf40c288c032 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -9035,24 +9035,16 @@ void netdev_run_todo(void)
+ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
+                            const struct net_device_stats *netdev_stats)
+ {
+-#if BITS_PER_LONG == 64
+-      BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
+-      memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
+-      /* zero out counters that only exist in rtnl_link_stats64 */
+-      memset((char *)stats64 + sizeof(*netdev_stats), 0,
+-             sizeof(*stats64) - sizeof(*netdev_stats));
+-#else
+-      size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
+-      const unsigned long *src = (const unsigned long *)netdev_stats;
++      size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
++      const atomic_long_t *src = (atomic_long_t *)netdev_stats;
+       u64 *dst = (u64 *)stats64;
+       BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
+       for (i = 0; i < n; i++)
+-              dst[i] = src[i];
++              dst[i] = atomic_long_read(&src[i]);
+       /* zero out counters that only exist in rtnl_link_stats64 */
+       memset((char *)stats64 + n * sizeof(u64), 0,
+              sizeof(*stats64) - n * sizeof(u64));
+-#endif
+ }
+ EXPORT_SYMBOL(netdev_stats_to_stats64);
+-- 
+2.40.1
+
diff --git a/queue-4.19/net-bridge-use-dev_stats_inc.patch b/queue-4.19/net-bridge-use-dev_stats_inc.patch
new file mode 100644 (file)
index 0000000..07ec51a
--- /dev/null
@@ -0,0 +1,139 @@
+From 2dc14add721fe64db85af5dc0186e5088f41ed2d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 09:13:51 +0000
+Subject: net: bridge: use DEV_STATS_INC()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 44bdb313da57322c9b3c108eb66981c6ec6509f4 ]
+
+syzbot/KCSAN reported data-races in br_handle_frame_finish() [1]
+This function can run from multiple cpus without mutual exclusion.
+
+Adopt SMP safe DEV_STATS_INC() to update dev->stats fields.
+
+Handles updates to dev->stats.tx_dropped while we are at it.
+
+[1]
+BUG: KCSAN: data-race in br_handle_frame_finish / br_handle_frame_finish
+
+read-write to 0xffff8881374b2178 of 8 bytes by interrupt on cpu 1:
+br_handle_frame_finish+0xd4f/0xef0 net/bridge/br_input.c:189
+br_nf_hook_thresh+0x1ed/0x220
+br_nf_pre_routing_finish_ipv6+0x50f/0x540
+NF_HOOK include/linux/netfilter.h:304 [inline]
+br_nf_pre_routing_ipv6+0x1e3/0x2a0 net/bridge/br_netfilter_ipv6.c:178
+br_nf_pre_routing+0x526/0xba0 net/bridge/br_netfilter_hooks.c:508
+nf_hook_entry_hookfn include/linux/netfilter.h:144 [inline]
+nf_hook_bridge_pre net/bridge/br_input.c:272 [inline]
+br_handle_frame+0x4c9/0x940 net/bridge/br_input.c:417
+__netif_receive_skb_core+0xa8a/0x21e0 net/core/dev.c:5417
+__netif_receive_skb_one_core net/core/dev.c:5521 [inline]
+__netif_receive_skb+0x57/0x1b0 net/core/dev.c:5637
+process_backlog+0x21f/0x380 net/core/dev.c:5965
+__napi_poll+0x60/0x3b0 net/core/dev.c:6527
+napi_poll net/core/dev.c:6594 [inline]
+net_rx_action+0x32b/0x750 net/core/dev.c:6727
+__do_softirq+0xc1/0x265 kernel/softirq.c:553
+run_ksoftirqd+0x17/0x20 kernel/softirq.c:921
+smpboot_thread_fn+0x30a/0x4a0 kernel/smpboot.c:164
+kthread+0x1d7/0x210 kernel/kthread.c:388
+ret_from_fork+0x48/0x60 arch/x86/kernel/process.c:147
+ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:304
+
+read-write to 0xffff8881374b2178 of 8 bytes by interrupt on cpu 0:
+br_handle_frame_finish+0xd4f/0xef0 net/bridge/br_input.c:189
+br_nf_hook_thresh+0x1ed/0x220
+br_nf_pre_routing_finish_ipv6+0x50f/0x540
+NF_HOOK include/linux/netfilter.h:304 [inline]
+br_nf_pre_routing_ipv6+0x1e3/0x2a0 net/bridge/br_netfilter_ipv6.c:178
+br_nf_pre_routing+0x526/0xba0 net/bridge/br_netfilter_hooks.c:508
+nf_hook_entry_hookfn include/linux/netfilter.h:144 [inline]
+nf_hook_bridge_pre net/bridge/br_input.c:272 [inline]
+br_handle_frame+0x4c9/0x940 net/bridge/br_input.c:417
+__netif_receive_skb_core+0xa8a/0x21e0 net/core/dev.c:5417
+__netif_receive_skb_one_core net/core/dev.c:5521 [inline]
+__netif_receive_skb+0x57/0x1b0 net/core/dev.c:5637
+process_backlog+0x21f/0x380 net/core/dev.c:5965
+__napi_poll+0x60/0x3b0 net/core/dev.c:6527
+napi_poll net/core/dev.c:6594 [inline]
+net_rx_action+0x32b/0x750 net/core/dev.c:6727
+__do_softirq+0xc1/0x265 kernel/softirq.c:553
+do_softirq+0x5e/0x90 kernel/softirq.c:454
+__local_bh_enable_ip+0x64/0x70 kernel/softirq.c:381
+__raw_spin_unlock_bh include/linux/spinlock_api_smp.h:167 [inline]
+_raw_spin_unlock_bh+0x36/0x40 kernel/locking/spinlock.c:210
+spin_unlock_bh include/linux/spinlock.h:396 [inline]
+batadv_tt_local_purge+0x1a8/0x1f0 net/batman-adv/translation-table.c:1356
+batadv_tt_purge+0x2b/0x630 net/batman-adv/translation-table.c:3560
+process_one_work kernel/workqueue.c:2630 [inline]
+process_scheduled_works+0x5b8/0xa30 kernel/workqueue.c:2703
+worker_thread+0x525/0x730 kernel/workqueue.c:2784
+kthread+0x1d7/0x210 kernel/kthread.c:388
+ret_from_fork+0x48/0x60 arch/x86/kernel/process.c:147
+ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:304
+
+value changed: 0x00000000000d7190 -> 0x00000000000d7191
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 14848 Comm: kworker/u4:11 Not tainted 6.6.0-rc1-syzkaller-00236-gad8a69f361b9 #0
+
+Fixes: 1c29fc4989bc ("[BRIDGE]: keep track of received multicast packets")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Roopa Prabhu <roopa@nvidia.com>
+Cc: Nikolay Aleksandrov <razor@blackwall.org>
+Cc: bridge@lists.linux-foundation.org
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://lore.kernel.org/r/20230918091351.1356153-1-edumazet@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_forward.c | 4 ++--
+ net/bridge/br_input.c   | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index 48ddc60b4fbde..c07a47d65c398 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -122,7 +122,7 @@ static int deliver_clone(const struct net_bridge_port *prev,
+       skb = skb_clone(skb, GFP_ATOMIC);
+       if (!skb) {
+-              dev->stats.tx_dropped++;
++              DEV_STATS_INC(dev, tx_dropped);
+               return -ENOMEM;
+       }
+@@ -261,7 +261,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+       skb = skb_copy(skb, GFP_ATOMIC);
+       if (!skb) {
+-              dev->stats.tx_dropped++;
++              DEV_STATS_INC(dev, tx_dropped);
+               return;
+       }
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 14c2fdc268eac..f3938337ff874 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -146,12 +146,12 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+                       if ((mdst && mdst->host_joined) ||
+                           br_multicast_is_router(br)) {
+                               local_rcv = true;
+-                              br->dev->stats.multicast++;
++                              DEV_STATS_INC(br->dev, multicast);
+                       }
+                       mcast_hit = true;
+               } else {
+                       local_rcv = true;
+-                      br->dev->stats.multicast++;
++                      DEV_STATS_INC(br->dev, multicast);
+               }
+               break;
+       case BR_PKT_UNICAST:
+-- 
+2.40.1
+
diff --git a/queue-4.19/net-hns3-add-5ms-delay-before-clear-firmware-reset-i.patch b/queue-4.19/net-hns3-add-5ms-delay-before-clear-firmware-reset-i.patch
new file mode 100644 (file)
index 0000000..0275426
--- /dev/null
@@ -0,0 +1,47 @@
+From 24d7fdfb62d03c6542c69d91f14b6a546c8ae1c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 15:48:40 +0800
+Subject: net: hns3: add 5ms delay before clear firmware reset irq source
+
+From: Jie Wang <wangjie125@huawei.com>
+
+[ Upstream commit 0770063096d5da4a8e467b6e73c1646a75589628 ]
+
+Currently the reset process in hns3 and firmware watchdog init process is
+asynchronous. we think firmware watchdog initialization is completed
+before hns3 clear the firmware interrupt source. However, firmware
+initialization may not complete early.
+
+so we add delay before hns3 clear firmware interrupt source and 5 ms delay
+is enough to avoid second firmware reset interrupt.
+
+Fixes: c1a81619d73a ("net: hns3: Add mailbox interrupt handling to PF driver")
+Signed-off-by: Jie Wang <wangjie125@huawei.com>
+Signed-off-by: Jijie Shao <shaojijie@huawei.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 2c334b56fd42c..d668d25ae7e76 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -2517,8 +2517,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+                                   u32 regclr)
+ {
++#define HCLGE_IMP_RESET_DELAY         5
++
+       switch (event_type) {
+       case HCLGE_VECTOR0_EVENT_RST:
++              if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
++                      mdelay(HCLGE_IMP_RESET_DELAY);
++
+               hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+               break;
+       case HCLGE_VECTOR0_EVENT_MBX:
+-- 
+2.40.1
+
diff --git a/queue-4.19/netfilter-nf_tables-disallow-element-removal-on-anon.patch b/queue-4.19/netfilter-nf_tables-disallow-element-removal-on-anon.patch
new file mode 100644 (file)
index 0000000..1ba164e
--- /dev/null
@@ -0,0 +1,58 @@
+From f7826c261f6f895527a86ec4e7bdc2ef68d358a6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 10 Sep 2023 19:04:45 +0200
+Subject: netfilter: nf_tables: disallow element removal on anonymous sets
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 23a3bfd4ba7acd36abf52b78605f61b21bdac216 ]
+
+Anonymous sets need to be populated once at creation and then they are
+bound to rule since 938154b93be8 ("netfilter: nf_tables: reject unbound
+anonymous set before commit phase"), otherwise transaction reports
+EINVAL.
+
+Userspace does not need to delete elements of anonymous sets that are
+not yet bound, reject this with EOPNOTSUPP.
+
+From flush command path, skip anonymous sets, they are expected to be
+bound already. Otherwise, EINVAL is hit at the end of this transaction
+for unbound sets.
+
+Fixes: 96518518cc41 ("netfilter: add nftables")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 0ff8f1006c6b9..3e30441162896 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -993,8 +993,7 @@ static int nft_flush_table(struct nft_ctx *ctx)
+               if (!nft_is_active_next(ctx->net, set))
+                       continue;
+-              if (nft_set_is_anonymous(set) &&
+-                  !list_empty(&set->bindings))
++              if (nft_set_is_anonymous(set))
+                       continue;
+               err = nft_delset(ctx, set);
+@@ -4902,8 +4901,10 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
+       if (IS_ERR(set))
+               return PTR_ERR(set);
+-      if (!list_empty(&set->bindings) &&
+-          (set->flags & (NFT_SET_CONSTANT | NFT_SET_ANONYMOUS)))
++      if (nft_set_is_anonymous(set))
++              return -EOPNOTSUPP;
++
++      if (!list_empty(&set->bindings) && (set->flags & NFT_SET_CONSTANT))
+               return -EBUSY;
+       if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) {
+-- 
+2.40.1
+
diff --git a/queue-4.19/powerpc-perf-hv-24x7-update-domain-value-check.patch b/queue-4.19/powerpc-perf-hv-24x7-update-domain-value-check.patch
new file mode 100644 (file)
index 0000000..9c95422
--- /dev/null
@@ -0,0 +1,63 @@
+From b02b22d6d0783e03bfd2dc1a3bebef5d1f0f8d85 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Aug 2023 11:26:01 +0530
+Subject: powerpc/perf/hv-24x7: Update domain value check
+
+From: Kajol Jain <kjain@linux.ibm.com>
+
+[ Upstream commit 4ff3ba4db5943cac1045e3e4a3c0463ea10f6930 ]
+
+Valid domain value is in range 1 to HV_PERF_DOMAIN_MAX. Current code has
+check for domain value greater than or equal to HV_PERF_DOMAIN_MAX. But
+the check for domain value 0 is missing.
+
+Fix this issue by adding check for domain value 0.
+
+Before:
+  # ./perf stat -v -e hv_24x7/CPM_ADJUNCT_INST,domain=0,core=1/ sleep 1
+  Using CPUID 00800200
+  Control descriptor is not initialized
+  Error:
+  The sys_perf_event_open() syscall returned with 5 (Input/output error) for
+  event (hv_24x7/CPM_ADJUNCT_INST,domain=0,core=1/).
+  /bin/dmesg | grep -i perf may provide additional information.
+
+  Result from dmesg:
+  [   37.819387] hv-24x7: hcall failed: [0 0x60040000 0x100 0] => ret
+  0xfffffffffffffffc (-4) detail=0x2000000 failing ix=0
+
+After:
+  # ./perf stat -v -e hv_24x7/CPM_ADJUNCT_INST,domain=0,core=1/ sleep 1
+  Using CPUID 00800200
+  Control descriptor is not initialized
+  Warning:
+  hv_24x7/CPM_ADJUNCT_INST,domain=0,core=1/ event is not supported by the kernel.
+  failed to read counter hv_24x7/CPM_ADJUNCT_INST,domain=0,core=1/
+
+Fixes: ebd4a5a3ebd9 ("powerpc/perf/hv-24x7: Minor improvements")
+Reported-by: Krishan Gopal Sarawast <krishang@linux.vnet.ibm.com>
+Signed-off-by: Kajol Jain <kjain@linux.ibm.com>
+Tested-by: Disha Goel <disgoel@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20230825055601.360083-1-kjain@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/perf/hv-24x7.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
+index 2bb798918483d..e6eb2b4cf97ea 100644
+--- a/arch/powerpc/perf/hv-24x7.c
++++ b/arch/powerpc/perf/hv-24x7.c
+@@ -1326,7 +1326,7 @@ static int h_24x7_event_init(struct perf_event *event)
+       }
+       domain = event_get_domain(event);
+-      if (domain >= HV_PERF_DOMAIN_MAX) {
++      if (domain  == 0 || domain >= HV_PERF_DOMAIN_MAX) {
+               pr_devel("invalid domain %d\n", domain);
+               return -EINVAL;
+       }
+-- 
+2.40.1
+
diff --git a/queue-4.19/selftests-tls-add-to-avoid-static-checker-warning.patch b/queue-4.19/selftests-tls-add-to-avoid-static-checker-warning.patch
new file mode 100644 (file)
index 0000000..d1bd9f6
--- /dev/null
@@ -0,0 +1,41 @@
+From fcbf2a9330e347df55fdba69b0d700ac61ed0173 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 May 2021 20:27:19 -0700
+Subject: selftests/tls: Add {} to avoid static checker warning
+
+From: Kees Cook <keescook@chromium.org>
+
+[ Upstream commit f50688b47c5858d2ff315d020332bf4cb6710837 ]
+
+This silences a static checker warning due to the unusual macro
+construction of EXPECT_*() by adding explicit {}s around the enclosing
+while loop.
+
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Fixes: 7f657d5bf507 ("selftests: tls: add selftests for TLS sockets")
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: c326ca98446e ("selftests: tls: swap the TX and RX sockets in some tests")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/tls.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index 7549d39ccafff..43bb9eadf03e7 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -202,8 +202,9 @@ TEST_F(tls, sendmsg_large)
+               EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
+       }
+-      while (recvs++ < sends)
++      while (recvs++ < sends) {
+               EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
++      }
+       free(mem);
+ }
+-- 
+2.40.1
+
diff --git a/queue-4.19/selftests-tls-swap-the-tx-and-rx-sockets-in-some-tes.patch b/queue-4.19/selftests-tls-swap-the-tx-and-rx-sockets-in-some-tes.patch
new file mode 100644 (file)
index 0000000..3087048
--- /dev/null
@@ -0,0 +1,55 @@
+From 5b071efcdf9a22ec79e240f5bf949d4baee663ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Sep 2023 16:16:25 +0200
+Subject: selftests: tls: swap the TX and RX sockets in some tests
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit c326ca98446e0ae4fee43a40acf79412b74cfedb ]
+
+tls.sendmsg_large and tls.sendmsg_multiple are trying to send through
+the self->cfd socket (only configured with TLS_RX) and to receive through
+the self->fd socket (only configured with TLS_TX), so they're not using
+kTLS at all. Swap the sockets.
+
+Fixes: 7f657d5bf507 ("selftests: tls: add selftests for TLS sockets")
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/tls.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
+index 43bb9eadf03e7..92adfe4df4e6d 100644
+--- a/tools/testing/selftests/net/tls.c
++++ b/tools/testing/selftests/net/tls.c
+@@ -199,11 +199,11 @@ TEST_F(tls, sendmsg_large)
+               msg.msg_iov = &vec;
+               msg.msg_iovlen = 1;
+-              EXPECT_EQ(sendmsg(self->cfd, &msg, 0), send_len);
++              EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+       }
+       while (recvs++ < sends) {
+-              EXPECT_NE(recv(self->fd, mem, send_len, 0), -1);
++              EXPECT_NE(recv(self->cfd, mem, send_len, 0), -1);
+       }
+       free(mem);
+@@ -232,9 +232,9 @@ TEST_F(tls, sendmsg_multiple)
+       msg.msg_iov = vec;
+       msg.msg_iovlen = iov_len;
+-      EXPECT_EQ(sendmsg(self->cfd, &msg, 0), total_len);
++      EXPECT_EQ(sendmsg(self->fd, &msg, 0), total_len);
+       buf = malloc(total_len);
+-      EXPECT_NE(recv(self->fd, buf, total_len, 0), -1);
++      EXPECT_NE(recv(self->cfd, buf, total_len, 0), -1);
+       for (i = 0; i < iov_len; i++) {
+               EXPECT_EQ(memcmp(test_strs[i], buf + len_cmp,
+                                strlen(test_strs[i])),
+-- 
+2.40.1
+
index 7392b38919fcb4beba083cab0502cc58c2de934c..fa47b019cc829a2332049e15ad3aab5e0b1fffb7 100644 (file)
@@ -1,3 +1,12 @@
 nfs-pnfs-report-einval-errors-from-connect-to-the-se.patch
 ata-ahci-drop-pointless-vprintk-calls-and-convert-th.patch
 ata-libahci-clear-pending-interrupt-status.patch
+netfilter-nf_tables-disallow-element-removal-on-anon.patch
+selftests-tls-add-to-avoid-static-checker-warning.patch
+selftests-tls-swap-the-tx-and-rx-sockets-in-some-tes.patch
+ipv4-fix-null-deref-in-ipv4_link_failure.patch
+powerpc-perf-hv-24x7-update-domain-value-check.patch
+net-hns3-add-5ms-delay-before-clear-firmware-reset-i.patch
+net-add-atomic_long_t-to-net_device_stats-fields.patch
+net-bridge-use-dev_stats_inc.patch
+team-fix-null-ptr-deref-when-team-device-type-is-cha.patch
diff --git a/queue-4.19/team-fix-null-ptr-deref-when-team-device-type-is-cha.patch b/queue-4.19/team-fix-null-ptr-deref-when-team-device-type-is-cha.patch
new file mode 100644 (file)
index 0000000..a404604
--- /dev/null
@@ -0,0 +1,121 @@
+From 41d7813ebb9a1769742b391c4f6c6e9e092d9130 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Sep 2023 20:30:11 +0800
+Subject: team: fix null-ptr-deref when team device type is changed
+
+From: Ziyang Xuan <william.xuanziyang@huawei.com>
+
+[ Upstream commit 492032760127251e5540a5716a70996bacf2a3fd ]
+
+Get a null-ptr-deref bug as follows with reproducer [1].
+
+BUG: kernel NULL pointer dereference, address: 0000000000000228
+...
+RIP: 0010:vlan_dev_hard_header+0x35/0x140 [8021q]
+...
+Call Trace:
+ <TASK>
+ ? __die+0x24/0x70
+ ? page_fault_oops+0x82/0x150
+ ? exc_page_fault+0x69/0x150
+ ? asm_exc_page_fault+0x26/0x30
+ ? vlan_dev_hard_header+0x35/0x140 [8021q]
+ ? vlan_dev_hard_header+0x8e/0x140 [8021q]
+ neigh_connected_output+0xb2/0x100
+ ip6_finish_output2+0x1cb/0x520
+ ? nf_hook_slow+0x43/0xc0
+ ? ip6_mtu+0x46/0x80
+ ip6_finish_output+0x2a/0xb0
+ mld_sendpack+0x18f/0x250
+ mld_ifc_work+0x39/0x160
+ process_one_work+0x1e6/0x3f0
+ worker_thread+0x4d/0x2f0
+ ? __pfx_worker_thread+0x10/0x10
+ kthread+0xe5/0x120
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork+0x34/0x50
+ ? __pfx_kthread+0x10/0x10
+ ret_from_fork_asm+0x1b/0x30
+
+[1]
+$ teamd -t team0 -d -c '{"runner": {"name": "loadbalance"}}'
+$ ip link add name t-dummy type dummy
+$ ip link add link t-dummy name t-dummy.100 type vlan id 100
+$ ip link add name t-nlmon type nlmon
+$ ip link set t-nlmon master team0
+$ ip link set t-nlmon nomaster
+$ ip link set t-dummy up
+$ ip link set team0 up
+$ ip link set t-dummy.100 down
+$ ip link set t-dummy.100 master team0
+
+When enslave a vlan device to team device and team device type is changed
+from non-ether to ether, header_ops of team device is changed to
+vlan_header_ops. That is incorrect and will trigger null-ptr-deref
+for vlan->real_dev in vlan_dev_hard_header() because team device is not
+a vlan device.
+
+Cache eth_header_ops in team_setup(), then assign cached header_ops to
+header_ops of team net device when its type is changed from non-ether
+to ether to fix the bug.
+
+Fixes: 1d76efe1577b ("team: add support for non-ethernet devices")
+Suggested-by: Hangbin Liu <liuhangbin@gmail.com>
+Reviewed-by: Hangbin Liu <liuhangbin@gmail.com>
+Signed-off-by: Ziyang Xuan <william.xuanziyang@huawei.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20230918123011.1884401-1-william.xuanziyang@huawei.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/team/team.c | 10 +++++++++-
+ include/linux/if_team.h |  2 ++
+ 2 files changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 8b5e1ec6aabfb..08f9530fd5b15 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2095,7 +2095,12 @@ static const struct ethtool_ops team_ethtool_ops = {
+ static void team_setup_by_port(struct net_device *dev,
+                              struct net_device *port_dev)
+ {
+-      dev->header_ops = port_dev->header_ops;
++      struct team *team = netdev_priv(dev);
++
++      if (port_dev->type == ARPHRD_ETHER)
++              dev->header_ops = team->header_ops_cache;
++      else
++              dev->header_ops = port_dev->header_ops;
+       dev->type = port_dev->type;
+       dev->hard_header_len = port_dev->hard_header_len;
+       dev->needed_headroom = port_dev->needed_headroom;
+@@ -2142,8 +2147,11 @@ static int team_dev_type_check_change(struct net_device *dev,
+ static void team_setup(struct net_device *dev)
+ {
++      struct team *team = netdev_priv(dev);
++
+       ether_setup(dev);
+       dev->max_mtu = ETH_MAX_MTU;
++      team->header_ops_cache = dev->header_ops;
+       dev->netdev_ops = &team_netdev_ops;
+       dev->ethtool_ops = &team_ethtool_ops;
+diff --git a/include/linux/if_team.h b/include/linux/if_team.h
+index ac42da56f7a28..fd32538ae705e 100644
+--- a/include/linux/if_team.h
++++ b/include/linux/if_team.h
+@@ -196,6 +196,8 @@ struct team {
+       struct net_device *dev; /* associated netdevice */
+       struct team_pcpu_stats __percpu *pcpu_stats;
++      const struct header_ops *header_ops_cache;
++
+       struct mutex lock; /* used for overall locking, e.g. port lists write */
+       /*
+-- 
+2.40.1
+