--- /dev/null
+From dac8e00fb640e9569cdeefd3ce8a75639e5d0711 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 2 Dec 2021 18:27:18 -0800
+Subject: bonding: make tx_rebalance_counter an atomic
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit dac8e00fb640e9569cdeefd3ce8a75639e5d0711 upstream.
+
+KCSAN reported a data-race [1] around tx_rebalance_counter
+which can be accessed from different contexts, without
+the protection of a lock/mutex.
+
+[1]
+BUG: KCSAN: data-race in bond_alb_init_slave / bond_alb_monitor
+
+write to 0xffff888157e8ca24 of 4 bytes by task 7075 on cpu 0:
+ bond_alb_init_slave+0x713/0x860 drivers/net/bonding/bond_alb.c:1613
+ bond_enslave+0xd94/0x3010 drivers/net/bonding/bond_main.c:1949
+ do_set_master net/core/rtnetlink.c:2521 [inline]
+ __rtnl_newlink net/core/rtnetlink.c:3475 [inline]
+ rtnl_newlink+0x1298/0x13b0 net/core/rtnetlink.c:3506
+ rtnetlink_rcv_msg+0x745/0x7e0 net/core/rtnetlink.c:5571
+ netlink_rcv_skb+0x14e/0x250 net/netlink/af_netlink.c:2491
+ rtnetlink_rcv+0x18/0x20 net/core/rtnetlink.c:5589
+ netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+ netlink_unicast+0x5fc/0x6c0 net/netlink/af_netlink.c:1345
+ netlink_sendmsg+0x6e1/0x7d0 net/netlink/af_netlink.c:1916
+ sock_sendmsg_nosec net/socket.c:704 [inline]
+ sock_sendmsg net/socket.c:724 [inline]
+ ____sys_sendmsg+0x39a/0x510 net/socket.c:2409
+ ___sys_sendmsg net/socket.c:2463 [inline]
+ __sys_sendmsg+0x195/0x230 net/socket.c:2492
+ __do_sys_sendmsg net/socket.c:2501 [inline]
+ __se_sys_sendmsg net/socket.c:2499 [inline]
+ __x64_sys_sendmsg+0x42/0x50 net/socket.c:2499
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x44/0xd0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+read to 0xffff888157e8ca24 of 4 bytes by task 1082 on cpu 1:
+ bond_alb_monitor+0x8f/0xc00 drivers/net/bonding/bond_alb.c:1511
+ process_one_work+0x3fc/0x980 kernel/workqueue.c:2298
+ worker_thread+0x616/0xa70 kernel/workqueue.c:2445
+ kthread+0x2c7/0x2e0 kernel/kthread.c:327
+ ret_from_fork+0x1f/0x30
+
+value changed: 0x00000001 -> 0x00000064
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 1 PID: 1082 Comm: kworker/u4:3 Not tainted 5.16.0-rc3-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Workqueue: bond1 bond_alb_monitor
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_alb.c | 14 ++++++++------
+ include/net/bond_alb.h | 2 +-
+ 2 files changed, 9 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -1502,14 +1502,14 @@ void bond_alb_monitor(struct work_struct
+ struct slave *slave;
+
+ if (!bond_has_slaves(bond)) {
+- bond_info->tx_rebalance_counter = 0;
++ atomic_set(&bond_info->tx_rebalance_counter, 0);
+ bond_info->lp_counter = 0;
+ goto re_arm;
+ }
+
+ rcu_read_lock();
+
+- bond_info->tx_rebalance_counter++;
++ atomic_inc(&bond_info->tx_rebalance_counter);
+ bond_info->lp_counter++;
+
+ /* send learning packets */
+@@ -1531,7 +1531,7 @@ void bond_alb_monitor(struct work_struct
+ }
+
+ /* rebalance tx traffic */
+- if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
++ if (atomic_read(&bond_info->tx_rebalance_counter) >= BOND_TLB_REBALANCE_TICKS) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ tlb_clear_slave(bond, slave, 1);
+ if (slave == rcu_access_pointer(bond->curr_active_slave)) {
+@@ -1541,7 +1541,7 @@ void bond_alb_monitor(struct work_struct
+ bond_info->unbalanced_load = 0;
+ }
+ }
+- bond_info->tx_rebalance_counter = 0;
++ atomic_set(&bond_info->tx_rebalance_counter, 0);
+ }
+
+ if (bond_info->rlb_enabled) {
+@@ -1611,7 +1611,8 @@ int bond_alb_init_slave(struct bonding *
+ tlb_init_slave(slave);
+
+ /* order a rebalance ASAP */
+- bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
++ atomic_set(&bond->alb_info.tx_rebalance_counter,
++ BOND_TLB_REBALANCE_TICKS);
+
+ if (bond->alb_info.rlb_enabled)
+ bond->alb_info.rlb_rebalance = 1;
+@@ -1648,7 +1649,8 @@ void bond_alb_handle_link_change(struct
+ rlb_clear_slave(bond, slave);
+ } else if (link == BOND_LINK_UP) {
+ /* order a rebalance ASAP */
+- bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
++ atomic_set(&bond_info->tx_rebalance_counter,
++ BOND_TLB_REBALANCE_TICKS);
+ if (bond->alb_info.rlb_enabled) {
+ bond->alb_info.rlb_rebalance = 1;
+ /* If the updelay module parameter is smaller than the
+--- a/include/net/bond_alb.h
++++ b/include/net/bond_alb.h
+@@ -126,7 +126,7 @@ struct tlb_slave_info {
+ struct alb_bond_info {
+ struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
+ u32 unbalanced_load;
+- int tx_rebalance_counter;
++ atomic_t tx_rebalance_counter;
+ int lp_counter;
+ /* -------- rlb parameters -------- */
+ int rlb_enabled;
--- /dev/null
+From 2fa7d94afc1afbb4d702760c058dc2d7ed30f226 Mon Sep 17 00:00:00 2001
+From: Maxim Mikityanskiy <maximmi@nvidia.com>
+Date: Tue, 30 Nov 2021 20:16:07 +0200
+Subject: bpf: Fix the off-by-two error in range markings
+
+From: Maxim Mikityanskiy <maximmi@nvidia.com>
+
+commit 2fa7d94afc1afbb4d702760c058dc2d7ed30f226 upstream.
+
+The first commit cited below attempts to fix the off-by-one error that
+appeared in some comparisons with an open range. Due to this error,
+arithmetically equivalent pieces of code could get different verdicts
+from the verifier, for example (pseudocode):
+
+ // 1. Passes the verifier:
+ if (data + 8 > data_end)
+ return early
+ read *(u64 *)data, i.e. [data; data+7]
+
+ // 2. Rejected by the verifier (should still pass):
+ if (data + 7 >= data_end)
+ return early
+ read *(u64 *)data, i.e. [data; data+7]
+
+The attempted fix, however, shifts the range by one in a wrong
+direction, so the bug not only remains, but also such piece of code
+starts failing in the verifier:
+
+ // 3. Rejected by the verifier, but the check is stricter than in #1.
+ if (data + 8 >= data_end)
+ return early
+ read *(u64 *)data, i.e. [data; data+7]
+
+The change performed by that fix converted an off-by-one bug into
+off-by-two. The second commit cited below added the BPF selftests
+written to ensure than code chunks like #3 are rejected, however,
+they should be accepted.
+
+This commit fixes the off-by-two error by adjusting new_range in the
+right direction and fixes the tests by changing the range into the
+one that should actually fail.
+
+Fixes: fb2a311a31d3 ("bpf: fix off by one for range markings with L{T, E} patterns")
+Fixes: b37242c773b2 ("bpf: add test cases to bpf selftests to cover all access tests")
+Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20211130181607.593149-1-maximmi@nvidia.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c | 2
+ tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c | 32 +++++-----
+ 2 files changed, 17 insertions(+), 17 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -8228,7 +8228,7 @@ static void find_good_pkt_pointers(struc
+
+ new_range = dst_reg->off;
+ if (range_right_open)
+- new_range--;
++ new_range++;
+
+ /* Examples for register markings:
+ *
+--- a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
++++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c
+@@ -112,10 +112,10 @@
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -167,10 +167,10 @@
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -274,9 +274,9 @@
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -437,9 +437,9 @@
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -544,10 +544,10 @@
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -599,10 +599,10 @@
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -706,9 +706,9 @@
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+@@ -869,9 +869,9 @@
+ offsetof(struct xdp_md, data_meta)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
--- /dev/null
+From 79364031c5b4365ca28ac0fa00acfab5bf465be1 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Sat, 27 Nov 2021 17:32:00 +0100
+Subject: bpf: Make sure bpf_disable_instrumentation() is safe vs preemption.
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+commit 79364031c5b4365ca28ac0fa00acfab5bf465be1 upstream.
+
+The initial implementation of migrate_disable() for mainline was a
+wrapper around preempt_disable(). RT kernels substituted this with a
+real migrate disable implementation.
+
+Later on mainline gained true migrate disable support, but neither
+documentation nor affected code were updated.
+
+Remove stale comments claiming that migrate_disable() is PREEMPT_RT only.
+
+Don't use __this_cpu_inc() in the !PREEMPT_RT path because preemption is
+not disabled and the RMW operation can be preempted.
+
+Fixes: 74d862b682f51 ("sched: Make migrate_disable/enable() independent of RT")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20211127163200.10466-3-bigeasy@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/bpf.h | 16 ++--------------
+ include/linux/filter.h | 3 ---
+ 2 files changed, 2 insertions(+), 17 deletions(-)
+
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1321,28 +1321,16 @@ extern struct mutex bpf_stats_enabled_mu
+ * kprobes, tracepoints) to prevent deadlocks on map operations as any of
+ * these events can happen inside a region which holds a map bucket lock
+ * and can deadlock on it.
+- *
+- * Use the preemption safe inc/dec variants on RT because migrate disable
+- * is preemptible on RT and preemption in the middle of the RMW operation
+- * might lead to inconsistent state. Use the raw variants for non RT
+- * kernels as migrate_disable() maps to preempt_disable() so the slightly
+- * more expensive save operation can be avoided.
+ */
+ static inline void bpf_disable_instrumentation(void)
+ {
+ migrate_disable();
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- this_cpu_inc(bpf_prog_active);
+- else
+- __this_cpu_inc(bpf_prog_active);
++ this_cpu_inc(bpf_prog_active);
+ }
+
+ static inline void bpf_enable_instrumentation(void)
+ {
+- if (IS_ENABLED(CONFIG_PREEMPT_RT))
+- this_cpu_dec(bpf_prog_active);
+- else
+- __this_cpu_dec(bpf_prog_active);
++ this_cpu_dec(bpf_prog_active);
+ migrate_enable();
+ }
+
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -639,9 +639,6 @@ static __always_inline u32 bpf_prog_run(
+ * This uses migrate_disable/enable() explicitly to document that the
+ * invocation of a BPF program does not require reentrancy protection
+ * against a BPF program which is invoked from a preempting task.
+- *
+- * For non RT enabled kernels migrate_disable/enable() maps to
+- * preempt_disable/enable(), i.e. it disables also preemption.
+ */
+ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog,
+ const void *ctx)
--- /dev/null
+From dde91ccfa25fd58f64c397d91b81a4b393100ffa Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <atenart@kernel.org>
+Date: Fri, 3 Dec 2021 11:13:18 +0100
+Subject: ethtool: do not perform operations on net devices being unregistered
+
+From: Antoine Tenart <atenart@kernel.org>
+
+commit dde91ccfa25fd58f64c397d91b81a4b393100ffa upstream.
+
+There is a short period between a net device starts to be unregistered
+and when it is actually gone. In that time frame ethtool operations
+could still be performed, which might end up in unwanted or undefined
+behaviours[1].
+
+Do not allow ethtool operations after a net device starts its
+unregistration. This patch targets the netlink part as the ioctl one
+isn't affected: the reference to the net device is taken and the
+operation is executed within an rtnl lock section and the net device
+won't be found after unregister.
+
+[1] For example adding Tx queues after unregister ends up in NULL
+ pointer exceptions and UaFs, such as:
+
+ BUG: KASAN: use-after-free in kobject_get+0x14/0x90
+ Read of size 1 at addr ffff88801961248c by task ethtool/755
+
+ CPU: 0 PID: 755 Comm: ethtool Not tainted 5.15.0-rc6+ #778
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-4.fc34 04/014
+ Call Trace:
+ dump_stack_lvl+0x57/0x72
+ print_address_description.constprop.0+0x1f/0x140
+ kasan_report.cold+0x7f/0x11b
+ kobject_get+0x14/0x90
+ kobject_add_internal+0x3d1/0x450
+ kobject_init_and_add+0xba/0xf0
+ netdev_queue_update_kobjects+0xcf/0x200
+ netif_set_real_num_tx_queues+0xb4/0x310
+ veth_set_channels+0x1c3/0x550
+ ethnl_set_channels+0x524/0x610
+
+Fixes: 041b1c5d4a53 ("ethtool: helper functions for netlink interface")
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Antoine Tenart <atenart@kernel.org>
+Link: https://lore.kernel.org/r/20211203101318.435618-1-atenart@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ethtool/netlink.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ethtool/netlink.c
++++ b/net/ethtool/netlink.c
+@@ -40,7 +40,8 @@ int ethnl_ops_begin(struct net_device *d
+ if (dev->dev.parent)
+ pm_runtime_get_sync(dev->dev.parent);
+
+- if (!netif_device_present(dev)) {
++ if (!netif_device_present(dev) ||
++ dev->reg_state == NETREG_UNREGISTERING) {
+ ret = -ENODEV;
+ goto err;
+ }
--- /dev/null
+From 28dc1b86f8ea9fd6f4c9e0b363db73ecabf84e22 Mon Sep 17 00:00:00 2001
+From: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Date: Fri, 22 Oct 2021 17:28:17 -0700
+Subject: ice: ignore dropped packets during init
+
+From: Jesse Brandeburg <jesse.brandeburg@intel.com>
+
+commit 28dc1b86f8ea9fd6f4c9e0b363db73ecabf84e22 upstream.
+
+If the hardware is constantly receiving unicast or broadcast packets
+during driver load, the device previously counted many GLV_RDPC (VSI
+dropped packets) events during init. This causes confusing dropped
+packet statistics during driver load. The dropped packets counter
+incrementing does stop once the driver finishes loading.
+
+Avoid this problem by baselining our statistics at the end of driver
+open instead of the end of probe.
+
+Fixes: cdedef59deb0 ("ice: Configure VSIs for Tx/Rx")
+Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
+Tested-by: Gurucharan G <gurucharanx.g@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ice/ice_main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5621,6 +5621,9 @@ static int ice_up_complete(struct ice_vs
+ netif_carrier_on(vsi->netdev);
+ }
+
++ /* clear this now, and the first stats read will be used as baseline */
++ vsi->stat_offsets_loaded = false;
++
+ ice_service_task_schedule(pf);
+
+ return 0;
--- /dev/null
+From c56c96303e9289cc34716b1179597b6f470833de Mon Sep 17 00:00:00 2001
+From: Jianglei Nie <niejianglei2021@163.com>
+Date: Thu, 9 Dec 2021 14:15:11 +0800
+Subject: nfp: Fix memory leak in nfp_cpp_area_cache_add()
+
+From: Jianglei Nie <niejianglei2021@163.com>
+
+commit c56c96303e9289cc34716b1179597b6f470833de upstream.
+
+In line 800 (#1), nfp_cpp_area_alloc() allocates and initializes a
+CPP area structure. But in line 807 (#2), when the cache is allocated
+failed, this CPP area structure is not freed, which will result in
+memory leak.
+
+We can fix it by freeing the CPP area when the cache is allocated
+failed (#2).
+
+792 int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
+793 {
+794 struct nfp_cpp_area_cache *cache;
+795 struct nfp_cpp_area *area;
+
+800 area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0),
+801 0, size);
+ // #1: allocates and initializes
+
+802 if (!area)
+803 return -ENOMEM;
+
+805 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+806 if (!cache)
+807 return -ENOMEM; // #2: missing free
+
+817 return 0;
+818 }
+
+Fixes: 4cb584e0ee7d ("nfp: add CPP access core")
+Signed-off-by: Jianglei Nie <niejianglei2021@163.com>
+Acked-by: Simon Horman <simon.horman@corigine.com>
+Link: https://lore.kernel.org/r/20211209061511.122535-1-niejianglei2021@163.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+@@ -803,8 +803,10 @@ int nfp_cpp_area_cache_add(struct nfp_cp
+ return -ENOMEM;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+- if (!cache)
++ if (!cache) {
++ nfp_cpp_area_free(area);
+ return -ENOMEM;
++ }
+
+ cache->id = 0;
+ cache->addr = 0;
vrf-don-t-run-conntrack-on-vrf-with-dflt-qdisc.patch
bpf-x86-fix-no-previous-prototype-warning.patch
bpf-sockmap-attach-map-progs-to-psock-early-for-feature-probes.patch
+bpf-make-sure-bpf_disable_instrumentation-is-safe-vs-preemption.patch
+bpf-fix-the-off-by-two-error-in-range-markings.patch
+ice-ignore-dropped-packets-during-init.patch
+ethtool-do-not-perform-operations-on-net-devices-being-unregistered.patch
+bonding-make-tx_rebalance_counter-an-atomic.patch
+nfp-fix-memory-leak-in-nfp_cpp_area_cache_add.patch