--- /dev/null
+From ba9a57e349fcebc897540d7d6c7f311c4a1d1c5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Sep 2021 15:57:50 -0700
+Subject: af_unix: fix races in sk_peer_pid and sk_peer_cred accesses
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 35306eb23814444bd4021f8a1c3047d3cb0c8b2b ]
+
+Jann Horn reported that SO_PEERCRED and SO_PEERGROUPS implementations
+are racy, as af_unix can concurrently change sk_peer_pid and sk_peer_cred.
+
+In order to fix this issue, this patch adds a new spinlock that needs
+to be used whenever these fields are read or written.
+
+Jann also pointed out that l2cap_sock_get_peer_pid_cb() is currently
+reading sk->sk_peer_pid which makes no sense, as this field
+is only possibly set by AF_UNIX sockets.
+We will have to clean this in a separate patch.
+This could be done by reverting b48596d1dc25 "Bluetooth: L2CAP: Add get_peer_pid callback"
+or implementing what was truly expected.
+
+Fixes: 109f6e39fa07 ("af_unix: Allow SO_PEERCRED to work across namespaces.")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Jann Horn <jannh@google.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Cc: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 2 ++
+ net/core/sock.c | 32 ++++++++++++++++++++++++++------
+ net/unix/af_unix.c | 34 ++++++++++++++++++++++++++++------
+ 3 files changed, 56 insertions(+), 12 deletions(-)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 3c7addf95150..cdca984f3630 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -479,8 +479,10 @@ struct sock {
+ u32 sk_ack_backlog;
+ u32 sk_max_ack_backlog;
+ kuid_t sk_uid;
++ spinlock_t sk_peer_lock;
+ struct pid *sk_peer_pid;
+ const struct cred *sk_peer_cred;
++
+ long sk_rcvtimeo;
+ ktime_t sk_stamp;
+ #if BITS_PER_LONG==32
+diff --git a/net/core/sock.c b/net/core/sock.c
+index d638c5361ed2..f9c835167391 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1255,6 +1255,16 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
+ }
+ EXPORT_SYMBOL(sock_setsockopt);
+
++static const struct cred *sk_get_peer_cred(struct sock *sk)
++{
++ const struct cred *cred;
++
++ spin_lock(&sk->sk_peer_lock);
++ cred = get_cred(sk->sk_peer_cred);
++ spin_unlock(&sk->sk_peer_lock);
++
++ return cred;
++}
+
+ static void cred_to_ucred(struct pid *pid, const struct cred *cred,
+ struct ucred *ucred)
+@@ -1428,7 +1438,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+ struct ucred peercred;
+ if (len > sizeof(peercred))
+ len = sizeof(peercred);
++
++ spin_lock(&sk->sk_peer_lock);
+ cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
++ spin_unlock(&sk->sk_peer_lock);
++
+ if (copy_to_user(optval, &peercred, len))
+ return -EFAULT;
+ goto lenout;
+@@ -1436,20 +1450,23 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
+
+ case SO_PEERGROUPS:
+ {
++ const struct cred *cred;
+ int ret, n;
+
+- if (!sk->sk_peer_cred)
++ cred = sk_get_peer_cred(sk);
++ if (!cred)
+ return -ENODATA;
+
+- n = sk->sk_peer_cred->group_info->ngroups;
++ n = cred->group_info->ngroups;
+ if (len < n * sizeof(gid_t)) {
+ len = n * sizeof(gid_t);
++ put_cred(cred);
+ return put_user(len, optlen) ? -EFAULT : -ERANGE;
+ }
+ len = n * sizeof(gid_t);
+
+- ret = groups_to_user((gid_t __user *)optval,
+- sk->sk_peer_cred->group_info);
++ ret = groups_to_user((gid_t __user *)optval, cred->group_info);
++ put_cred(cred);
+ if (ret)
+ return ret;
+ goto lenout;
+@@ -1788,9 +1805,10 @@ static void __sk_destruct(struct rcu_head *head)
+ sk->sk_frag.page = NULL;
+ }
+
+- if (sk->sk_peer_cred)
+- put_cred(sk->sk_peer_cred);
++ /* We do not need to acquire sk->sk_peer_lock, we are the last user. */
++ put_cred(sk->sk_peer_cred);
+ put_pid(sk->sk_peer_pid);
++
+ if (likely(sk->sk_net_refcnt))
+ put_net(sock_net(sk));
+ sk_prot_free(sk->sk_prot_creator, sk);
+@@ -3000,6 +3018,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+
+ sk->sk_peer_pid = NULL;
+ sk->sk_peer_cred = NULL;
++ spin_lock_init(&sk->sk_peer_lock);
++
+ sk->sk_write_pending = 0;
+ sk->sk_rcvlowat = 1;
+ sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index d5c0ae34b1e4..b7edca89e0ba 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -593,20 +593,42 @@ static void unix_release_sock(struct sock *sk, int embrion)
+
+ static void init_peercred(struct sock *sk)
+ {
+- put_pid(sk->sk_peer_pid);
+- if (sk->sk_peer_cred)
+- put_cred(sk->sk_peer_cred);
++ const struct cred *old_cred;
++ struct pid *old_pid;
++
++ spin_lock(&sk->sk_peer_lock);
++ old_pid = sk->sk_peer_pid;
++ old_cred = sk->sk_peer_cred;
+ sk->sk_peer_pid = get_pid(task_tgid(current));
+ sk->sk_peer_cred = get_current_cred();
++ spin_unlock(&sk->sk_peer_lock);
++
++ put_pid(old_pid);
++ put_cred(old_cred);
+ }
+
+ static void copy_peercred(struct sock *sk, struct sock *peersk)
+ {
+- put_pid(sk->sk_peer_pid);
+- if (sk->sk_peer_cred)
+- put_cred(sk->sk_peer_cred);
++ const struct cred *old_cred;
++ struct pid *old_pid;
++
++ if (sk < peersk) {
++ spin_lock(&sk->sk_peer_lock);
++ spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
++ } else {
++ spin_lock(&peersk->sk_peer_lock);
++ spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
++ }
++ old_pid = sk->sk_peer_pid;
++ old_cred = sk->sk_peer_cred;
+ sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
+ sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
++
++ spin_unlock(&sk->sk_peer_lock);
++ spin_unlock(&peersk->sk_peer_lock);
++
++ put_pid(old_pid);
++ put_cred(old_cred);
+ }
+
+ static int unix_listen(struct socket *sock, int backlog)
+--
+2.33.0
+
--- /dev/null
+From 1c0b35735d11c241f3f67b3883cb51addc81ca6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Sep 2021 12:11:52 +0100
+Subject: bpf: Exempt CAP_BPF from checks against bpf_jit_limit
+
+From: Lorenz Bauer <lmb@cloudflare.com>
+
+[ Upstream commit 8a98ae12fbefdb583a7696de719a1d57e5e940a2 ]
+
+When introducing CAP_BPF, bpf_jit_charge_modmem() was not changed to treat
+programs with CAP_BPF as privileged for the purpose of JIT memory allocation.
+This means that a program without CAP_BPF can block a program with CAP_BPF
+from loading a program.
+
+Fix this by checking bpf_capable() in bpf_jit_charge_modmem().
+
+Fixes: 2c78ee898d8f ("bpf: Implement CAP_BPF")
+Signed-off-by: Lorenz Bauer <lmb@cloudflare.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20210922111153.19843-1-lmb@cloudflare.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index d12efb2550d3..2e4a658d65d6 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -831,7 +831,7 @@ int bpf_jit_charge_modmem(u32 pages)
+ {
+ if (atomic_long_add_return(pages, &bpf_jit_current) >
+ (bpf_jit_limit >> PAGE_SHIFT)) {
+- if (!capable(CAP_SYS_ADMIN)) {
++ if (!bpf_capable()) {
+ atomic_long_sub(pages, &bpf_jit_current);
+ return -EPERM;
+ }
+--
+2.33.0
+
--- /dev/null
+From 3d1c5180b626294b3d4525f81910e31535f2f0b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Sep 2021 10:33:51 +0800
+Subject: bpf: Handle return value of BPF_PROG_TYPE_STRUCT_OPS prog
+
+From: Hou Tao <houtao1@huawei.com>
+
+[ Upstream commit 356ed64991c6847a0c4f2e8fa3b1133f7a14f1fc ]
+
+Currently if a function ptr in struct_ops has a return value, its
+caller will get a random return value from it, because the return
+value of related BPF_PROG_TYPE_STRUCT_OPS prog is just dropped.
+
+So adding a new flag BPF_TRAMP_F_RET_FENTRY_RET to tell bpf trampoline
+to save and return the return value of struct_ops prog if ret_size of
+the function ptr is greater than 0. Also restricting the flag to be
+used alone.
+
+Fixes: 85d33df357b6 ("bpf: Introduce BPF_MAP_TYPE_STRUCT_OPS")
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/20210914023351.3664499-1-houtao1@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/net/bpf_jit_comp.c | 53 ++++++++++++++++++++++++++++---------
+ include/linux/bpf.h | 2 ++
+ kernel/bpf/bpf_struct_ops.c | 7 +++--
+ 3 files changed, 47 insertions(+), 15 deletions(-)
+
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index 0a962cd6bac1..a0a7ead52698 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -1547,7 +1547,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
+ }
+
+ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+- struct bpf_prog *p, int stack_size, bool mod_ret)
++ struct bpf_prog *p, int stack_size, bool save_ret)
+ {
+ u8 *prog = *pprog;
+ int cnt = 0;
+@@ -1573,11 +1573,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
+ if (emit_call(&prog, p->bpf_func, prog))
+ return -EINVAL;
+
+- /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
++ /*
++ * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
+ * of the previous call which is then passed on the stack to
+ * the next BPF program.
++ *
++ * BPF_TRAMP_FENTRY trampoline may need to return the return
++ * value of BPF_PROG_TYPE_STRUCT_OPS prog.
+ */
+- if (mod_ret)
++ if (save_ret)
+ emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+
+ if (p->aux->sleepable) {
+@@ -1645,13 +1649,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
+ }
+
+ static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
+- struct bpf_tramp_progs *tp, int stack_size)
++ struct bpf_tramp_progs *tp, int stack_size,
++ bool save_ret)
+ {
+ int i;
+ u8 *prog = *pprog;
+
+ for (i = 0; i < tp->nr_progs; i++) {
+- if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
++ if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
++ save_ret))
+ return -EINVAL;
+ }
+ *pprog = prog;
+@@ -1694,6 +1700,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
+ return 0;
+ }
+
++static bool is_valid_bpf_tramp_flags(unsigned int flags)
++{
++ if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
++ (flags & BPF_TRAMP_F_SKIP_FRAME))
++ return false;
++
++ /*
++ * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
++ * and it must be used alone.
++ */
++ if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
++ (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
++ return false;
++
++ return true;
++}
++
+ /* Example:
+ * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
+ * its 'struct btf_func_model' will be nr_args=2
+@@ -1766,17 +1789,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
+ u8 **branches = NULL;
+ u8 *prog;
++ bool save_ret;
+
+ /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
+ if (nr_args > 6)
+ return -ENOTSUPP;
+
+- if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
+- (flags & BPF_TRAMP_F_SKIP_FRAME))
++ if (!is_valid_bpf_tramp_flags(flags))
+ return -EINVAL;
+
+- if (flags & BPF_TRAMP_F_CALL_ORIG)
+- stack_size += 8; /* room for return value of orig_call */
++ /* room for return value of orig_call or fentry prog */
++ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
++ if (save_ret)
++ stack_size += 8;
+
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ /* skip patched call instruction and point orig_call to actual
+@@ -1803,7 +1828,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ }
+
+ if (fentry->nr_progs)
+- if (invoke_bpf(m, &prog, fentry, stack_size))
++ if (invoke_bpf(m, &prog, fentry, stack_size,
++ flags & BPF_TRAMP_F_RET_FENTRY_RET))
+ return -EINVAL;
+
+ if (fmod_ret->nr_progs) {
+@@ -1850,7 +1876,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ }
+
+ if (fexit->nr_progs)
+- if (invoke_bpf(m, &prog, fexit, stack_size)) {
++ if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+@@ -1870,9 +1896,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
+ ret = -EINVAL;
+ goto cleanup;
+ }
+- /* restore original return value back into RAX */
+- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+ }
++ /* restore return value of orig_call or fentry prog back into RAX */
++ if (save_ret)
++ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+
+ EMIT1(0x5B); /* pop rbx */
+ EMIT1(0xC9); /* leave */
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 3f93a50c25ef..0caa448f7b40 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -526,6 +526,8 @@ struct btf_func_model {
+ * programs only. Should not be used with normal calls and indirect calls.
+ */
+ #define BPF_TRAMP_F_SKIP_FRAME BIT(2)
++/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
++#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
+
+ /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
+ * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
+diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
+index f527063864b5..ac283f9b2332 100644
+--- a/kernel/bpf/bpf_struct_ops.c
++++ b/kernel/bpf/bpf_struct_ops.c
+@@ -367,6 +367,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+ const struct btf_type *mtype, *ptype;
+ struct bpf_prog *prog;
+ u32 moff;
++ u32 flags;
+
+ moff = btf_member_bit_offset(t, member) / 8;
+ ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
+@@ -430,10 +431,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
+
+ tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
+ tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
++ flags = st_ops->func_models[i].ret_size > 0 ?
++ BPF_TRAMP_F_RET_FENTRY_RET : 0;
+ err = arch_prepare_bpf_trampoline(NULL, image,
+ st_map->image + PAGE_SIZE,
+- &st_ops->func_models[i], 0,
+- tprogs, NULL);
++ &st_ops->func_models[i],
++ flags, tprogs, NULL);
+ if (err < 0)
+ goto reset_unlock;
+
+--
+2.33.0
+
--- /dev/null
+From aad2e1ca60169acf89a80e3ca42f108c22b000d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Sep 2021 17:04:37 +0100
+Subject: bpf, mips: Validate conditional branch offsets
+
+From: Piotr Krysiuk <piotras@gmail.com>
+
+[ Upstream commit 37cb28ec7d3a36a5bace7063a3dba633ab110f8b ]
+
+The conditional branch instructions on MIPS use 18-bit signed offsets
+allowing for a branch range of 128 KBytes (backward and forward).
+However, this limit is not observed by the cBPF JIT compiler, and so
+the JIT compiler emits out-of-range branches when translating certain
+cBPF programs. A specific example of such a cBPF program is included in
+the "BPF_MAXINSNS: exec all MSH" test from lib/test_bpf.c that executes
+anomalous machine code containing incorrect branch offsets under JIT.
+
+Furthermore, this issue can be abused to craft undesirable machine
+code, where the control flow is hijacked to execute arbitrary Kernel
+code.
+
+The following steps can be used to reproduce the issue:
+
+ # echo 1 > /proc/sys/net/core/bpf_jit_enable
+ # modprobe test_bpf test_name="BPF_MAXINSNS: exec all MSH"
+
+This should produce multiple warnings from build_bimm() similar to:
+
+ ------------[ cut here ]------------
+ WARNING: CPU: 0 PID: 209 at arch/mips/mm/uasm-mips.c:210 build_insn+0x558/0x590
+ Micro-assembler field overflow
+ Modules linked in: test_bpf(+)
+ CPU: 0 PID: 209 Comm: modprobe Not tainted 5.14.3 #1
+ Stack : 00000000 807bb824 82b33c9c 801843c0 00000000 00000004 00000000 63c9b5ee
+ 82b33af4 80999898 80910000 80900000 82fd6030 00000001 82b33a98 82087180
+ 00000000 00000000 80873b28 00000000 000000fc 82b3394c 00000000 2e34312e
+ 6d6d6f43 809a180f 809a1836 6f6d203a 80900000 00000001 82b33bac 80900000
+ 00027f80 00000000 00000000 807bb824 00000000 804ed790 001cc317 00000001
+ [...]
+ Call Trace:
+ [<80108f44>] show_stack+0x38/0x118
+ [<807a7aac>] dump_stack_lvl+0x5c/0x7c
+ [<807a4b3c>] __warn+0xcc/0x140
+ [<807a4c3c>] warn_slowpath_fmt+0x8c/0xb8
+ [<8011e198>] build_insn+0x558/0x590
+ [<8011e358>] uasm_i_bne+0x20/0x2c
+ [<80127b48>] build_body+0xa58/0x2a94
+ [<80129c98>] bpf_jit_compile+0x114/0x1e4
+ [<80613fc4>] bpf_prepare_filter+0x2ec/0x4e4
+ [<8061423c>] bpf_prog_create+0x80/0xc4
+ [<c0a006e4>] test_bpf_init+0x300/0xba8 [test_bpf]
+ [<8010051c>] do_one_initcall+0x50/0x1d4
+ [<801c5e54>] do_init_module+0x60/0x220
+ [<801c8b20>] sys_finit_module+0xc4/0xfc
+ [<801144d0>] syscall_common+0x34/0x58
+ [...]
+ ---[ end trace a287d9742503c645 ]---
+
+Then the anomalous machine code executes:
+
+=> 0xc0a18000: addiu sp,sp,-16
+ 0xc0a18004: sw s3,0(sp)
+ 0xc0a18008: sw s4,4(sp)
+ 0xc0a1800c: sw s5,8(sp)
+ 0xc0a18010: sw ra,12(sp)
+ 0xc0a18014: move s5,a0
+ 0xc0a18018: move s4,zero
+ 0xc0a1801c: move s3,zero
+
+ # __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0)
+ 0xc0a18020: lui t6,0x8012
+ 0xc0a18024: ori t4,t6,0x9e14
+ 0xc0a18028: li a1,0
+ 0xc0a1802c: jalr t4
+ 0xc0a18030: move a0,s5
+ 0xc0a18034: bnez v0,0xc0a1ffb8 # incorrect branch offset
+ 0xc0a18038: move v0,zero
+ 0xc0a1803c: andi s4,s3,0xf
+ 0xc0a18040: b 0xc0a18048
+ 0xc0a18044: sll s4,s4,0x2
+ [...]
+
+ # __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0)
+ 0xc0a1ffa0: lui t6,0x8012
+ 0xc0a1ffa4: ori t4,t6,0x9e14
+ 0xc0a1ffa8: li a1,0
+ 0xc0a1ffac: jalr t4
+ 0xc0a1ffb0: move a0,s5
+ 0xc0a1ffb4: bnez v0,0xc0a1ffb8 # incorrect branch offset
+ 0xc0a1ffb8: move v0,zero
+ 0xc0a1ffbc: andi s4,s3,0xf
+ 0xc0a1ffc0: b 0xc0a1ffc8
+ 0xc0a1ffc4: sll s4,s4,0x2
+
+ # __BPF_STMT(BPF_LDX | BPF_B | BPF_MSH, 0)
+ 0xc0a1ffc8: lui t6,0x8012
+ 0xc0a1ffcc: ori t4,t6,0x9e14
+ 0xc0a1ffd0: li a1,0
+ 0xc0a1ffd4: jalr t4
+ 0xc0a1ffd8: move a0,s5
+ 0xc0a1ffdc: bnez v0,0xc0a3ffb8 # correct branch offset
+ 0xc0a1ffe0: move v0,zero
+ 0xc0a1ffe4: andi s4,s3,0xf
+ 0xc0a1ffe8: b 0xc0a1fff0
+ 0xc0a1ffec: sll s4,s4,0x2
+ [...]
+
+ # epilogue
+ 0xc0a3ffb8: lw s3,0(sp)
+ 0xc0a3ffbc: lw s4,4(sp)
+ 0xc0a3ffc0: lw s5,8(sp)
+ 0xc0a3ffc4: lw ra,12(sp)
+ 0xc0a3ffc8: addiu sp,sp,16
+ 0xc0a3ffcc: jr ra
+ 0xc0a3ffd0: nop
+
+To mitigate this issue, we assert the branch ranges for each emit call
+that could generate an out-of-range branch.
+
+Fixes: 36366e367ee9 ("MIPS: BPF: Restore MIPS32 cBPF JIT")
+Fixes: c6610de353da ("MIPS: net: Add BPF JIT")
+Signed-off-by: Piotr Krysiuk <piotras@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
+Acked-by: Johan Almbladh <johan.almbladh@anyfinetworks.com>
+Cc: Paul Burton <paulburton@kernel.org>
+Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Link: https://lore.kernel.org/bpf/20210915160437.4080-1-piotras@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/net/bpf_jit.c | 57 +++++++++++++++++++++++++++++++----------
+ 1 file changed, 43 insertions(+), 14 deletions(-)
+
+diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
+index 0af88622c619..cb6d22439f71 100644
+--- a/arch/mips/net/bpf_jit.c
++++ b/arch/mips/net/bpf_jit.c
+@@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
+ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
+ func##_positive)
+
++static bool is_bad_offset(int b_off)
++{
++ return b_off > 0x1ffff || b_off < -0x20000;
++}
++
+ static int build_body(struct jit_ctx *ctx)
+ {
+ const struct bpf_prog *prog = ctx->skf;
+@@ -728,7 +733,10 @@ static int build_body(struct jit_ctx *ctx)
+ /* Load return register on DS for failures */
+ emit_reg_move(r_ret, r_zero, ctx);
+ /* Return with error */
+- emit_b(b_imm(prog->len, ctx), ctx);
++ b_off = b_imm(prog->len, ctx);
++ if (is_bad_offset(b_off))
++ return -E2BIG;
++ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ break;
+ case BPF_LD | BPF_W | BPF_IND:
+@@ -775,8 +783,10 @@ static int build_body(struct jit_ctx *ctx)
+ emit_jalr(MIPS_R_RA, r_s0, ctx);
+ emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
+ /* Check the error value */
+- emit_bcond(MIPS_COND_NE, r_ret, 0,
+- b_imm(prog->len, ctx), ctx);
++ b_off = b_imm(prog->len, ctx);
++ if (is_bad_offset(b_off))
++ return -E2BIG;
++ emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
+ emit_reg_move(r_ret, r_zero, ctx);
+ /* We are good */
+ /* X <- P[1:K] & 0xf */
+@@ -855,8 +865,10 @@ static int build_body(struct jit_ctx *ctx)
+ /* A /= X */
+ ctx->flags |= SEEN_X | SEEN_A;
+ /* Check if r_X is zero */
+- emit_bcond(MIPS_COND_EQ, r_X, r_zero,
+- b_imm(prog->len, ctx), ctx);
++ b_off = b_imm(prog->len, ctx);
++ if (is_bad_offset(b_off))
++ return -E2BIG;
++ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
+ emit_load_imm(r_ret, 0, ctx); /* delay slot */
+ emit_div(r_A, r_X, ctx);
+ break;
+@@ -864,8 +876,10 @@ static int build_body(struct jit_ctx *ctx)
+ /* A %= X */
+ ctx->flags |= SEEN_X | SEEN_A;
+ /* Check if r_X is zero */
+- emit_bcond(MIPS_COND_EQ, r_X, r_zero,
+- b_imm(prog->len, ctx), ctx);
++ b_off = b_imm(prog->len, ctx);
++ if (is_bad_offset(b_off))
++ return -E2BIG;
++ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
+ emit_load_imm(r_ret, 0, ctx); /* delay slot */
+ emit_mod(r_A, r_X, ctx);
+ break;
+@@ -926,7 +940,10 @@ static int build_body(struct jit_ctx *ctx)
+ break;
+ case BPF_JMP | BPF_JA:
+ /* pc += K */
+- emit_b(b_imm(i + k + 1, ctx), ctx);
++ b_off = b_imm(i + k + 1, ctx);
++ if (is_bad_offset(b_off))
++ return -E2BIG;
++ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ break;
+ case BPF_JMP | BPF_JEQ | BPF_K:
+@@ -1056,12 +1073,16 @@ static int build_body(struct jit_ctx *ctx)
+ break;
+ case BPF_RET | BPF_A:
+ ctx->flags |= SEEN_A;
+- if (i != prog->len - 1)
++ if (i != prog->len - 1) {
+ /*
+ * If this is not the last instruction
+ * then jump to the epilogue
+ */
+- emit_b(b_imm(prog->len, ctx), ctx);
++ b_off = b_imm(prog->len, ctx);
++ if (is_bad_offset(b_off))
++ return -E2BIG;
++ emit_b(b_off, ctx);
++ }
+ emit_reg_move(r_ret, r_A, ctx); /* delay slot */
+ break;
+ case BPF_RET | BPF_K:
+@@ -1075,7 +1096,10 @@ static int build_body(struct jit_ctx *ctx)
+ * If this is not the last instruction
+ * then jump to the epilogue
+ */
+- emit_b(b_imm(prog->len, ctx), ctx);
++ b_off = b_imm(prog->len, ctx);
++ if (is_bad_offset(b_off))
++ return -E2BIG;
++ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ }
+ break;
+@@ -1133,8 +1157,10 @@ static int build_body(struct jit_ctx *ctx)
+ /* Load *dev pointer */
+ emit_load_ptr(r_s0, r_skb, off, ctx);
+ /* error (0) in the delay slot */
+- emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
+- b_imm(prog->len, ctx), ctx);
++ b_off = b_imm(prog->len, ctx);
++ if (is_bad_offset(b_off))
++ return -E2BIG;
++ emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
+ emit_reg_move(r_ret, r_zero, ctx);
+ if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+ BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
+@@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)
+
+ /* Generate the actual JIT code */
+ build_prologue(&ctx);
+- build_body(&ctx);
++ if (build_body(&ctx)) {
++ module_memfree(ctx.target);
++ goto out;
++ }
+ build_epilogue(&ctx);
+
+ /* Update the icache */
+--
+2.33.0
+
--- /dev/null
+From 48a300e7172ebedf0719e28b929b7792596a521d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Sep 2021 14:42:02 +0100
+Subject: drm/i915/request: fix early tracepoints
+
+From: Matthew Auld <matthew.auld@intel.com>
+
+[ Upstream commit c83ff0186401169eb27ce5057d820b7a863455c3 ]
+
+Currently we blow up in trace_dma_fence_init, when calling into
+get_driver_name or get_timeline_name, since both the engine and context
+might be NULL(or contain some garbage address) in the case of newly
+allocated slab objects via the request ctor. Note that we also use
+SLAB_TYPESAFE_BY_RCU here, which allows requests to be immediately
+freed, but delay freeing the underlying page by an RCU grace period.
+With this scheme requests can be re-allocated, at the same time as they
+are also being read by some lockless RCU lookup mechanism.
+
+In the ctor case, which is only called for new slab objects(i.e allocate
+new page and call the ctor for each object) it's safe to reset the
+context/engine prior to calling into dma_fence_init, since we can be
+certain that no one is doing an RCU lookup which might depend on peeking
+at the engine/context, like in active_engine(), since the object can't
+yet be externally visible.
+
+In the recycled case(which might also be externally visible) the request
+refcount always transitions from 0->1 after we set the context/engine
+etc, which should ensure it's valid to dereference the engine for
+example, when doing an RCU list-walk, so long as we can also increment
+the refcount first. If the refcount is already zero, then the request is
+considered complete/released. If it's non-zero, then the request might
+be in the process of being re-allocated, or potentially still in flight,
+however after successfully incrementing the refcount, it's possible to
+carefully inspect the request state, to determine if the request is
+still what we were looking for. Note that all externally visible
+requests returned to the cache must have zero refcount.
+
+One possible fix then is to move dma_fence_init out from the request
+ctor. Originally this was how it was done, but it was moved in:
+
+commit 855e39e65cfc33a73724f1cc644ffc5754864a20
+Author: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Mon Feb 3 09:41:48 2020 +0000
+
+ drm/i915: Initialise basic fence before acquiring seqno
+
+where it looks like intel_timeline_get_seqno() relied on some of the
+rq->fence state, but that is no longer the case since:
+
+commit 12ca695d2c1ed26b2dcbb528b42813bd0f216cfc
+Author: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Date: Tue Mar 23 16:49:50 2021 +0100
+
+ drm/i915: Do not share hwsp across contexts any more, v8.
+
+intel_timeline_get_seqno() could also be cleaned up slightly by dropping
+the request argument.
+
+Moving dma_fence_init back out of the ctor, should ensure we have enough
+of the request initialised in case of trace_dma_fence_init.
+Functionally this should be the same, and is effectively what we were
+already open coding before, except now we also assign the fence->lock
+and fence->ops, but since these are invariant for recycled
+requests(which might be externally visible), and will therefore already
+hold the same value, it shouldn't matter.
+
+An alternative fix, since we don't yet have a fully initialised request
+when in the ctor, is just setting the context/engine as NULL, but this
+does require adding some extra handling in get_driver_name etc.
+
+v2(Daniel):
+ - Try to make the commit message less confusing
+
+Fixes: 855e39e65cfc ("drm/i915: Initialise basic fence before acquiring seqno")
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Cc: Michael Mason <michael.w.mason@intel.com>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20210921134202.3803151-1-matthew.auld@intel.com
+(cherry picked from commit be988eaee1cb208c4445db46bc3ceaf75f586f0b)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/i915_request.c | 11 ++---------
+ 1 file changed, 2 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
+index d8fef42ca38e..896389f93029 100644
+--- a/drivers/gpu/drm/i915/i915_request.c
++++ b/drivers/gpu/drm/i915/i915_request.c
+@@ -776,8 +776,6 @@ static void __i915_request_ctor(void *arg)
+ i915_sw_fence_init(&rq->submit, submit_notify);
+ i915_sw_fence_init(&rq->semaphore, semaphore_notify);
+
+- dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
+-
+ rq->capture_list = NULL;
+
+ init_llist_head(&rq->execute_cb);
+@@ -840,17 +838,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
+ rq->ring = ce->ring;
+ rq->execution_mask = ce->engine->mask;
+
+- kref_init(&rq->fence.refcount);
+- rq->fence.flags = 0;
+- rq->fence.error = 0;
+- INIT_LIST_HEAD(&rq->fence.cb_list);
+-
+ ret = intel_timeline_get_seqno(tl, rq, &seqno);
+ if (ret)
+ goto err_free;
+
+- rq->fence.context = tl->fence_context;
+- rq->fence.seqno = seqno;
++ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
++ tl->fence_context, seqno);
+
+ RCU_INIT_POINTER(rq->timeline, tl);
+ RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
+--
+2.33.0
+
--- /dev/null
+From ac8edaa9655b67bd34138f1f43155753c853c40b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Sep 2021 19:41:24 +0200
+Subject: dsa: mv88e6xxx: 6161: Use chip wide MAX MTU
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+[ Upstream commit fe23036192c95b66e60d019d2ec1814d0d561ffd ]
+
+The datasheets suggests the 6161 uses a per port setting for jumbo
+frames. Testing has however shown this is not correct, it uses the old
+style chip wide MTU control. Change the ops in the 6161 structure to
+reflect this.
+
+Fixes: 1baf0fac10fb ("net: dsa: mv88e6xxx: Use chip-wide max frame size for MTU")
+Reported by: 曹煜 <cao88yu@gmail.com>
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mv88e6xxx/chip.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 184cbc93328c..caa3c4f30405 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3455,7 +3455,6 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_floods = mv88e6352_port_set_egress_floods,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+- .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_limit = mv88e6097_port_pause_limit,
+ .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
+@@ -3480,6 +3479,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
+ .avb_ops = &mv88e6165_avb_ops,
+ .ptp_ops = &mv88e6165_ptp_ops,
+ .phylink_validate = mv88e6185_phylink_validate,
++ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
+ };
+
+ static const struct mv88e6xxx_ops mv88e6165_ops = {
+--
+2.33.0
+
--- /dev/null
+From a43c93d884f8ee69054b7b08c38af4c9f7b8c975 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Sep 2021 19:41:25 +0200
+Subject: dsa: mv88e6xxx: Fix MTU definition
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+[ Upstream commit b92ce2f54c0f0ff781e914ec189c25f7bf1b1ec2 ]
+
+The MTU passed to the DSA driver is the payload size, typically 1500.
+However, the switch uses the frame size when applying restrictions.
+Adjust the MTU with the size of the Ethernet header and the frame
+checksum. The VLAN header also needs to be included when the frame
+size it per port, but not when it is global.
+
+Fixes: 1baf0fac10fb ("net: dsa: mv88e6xxx: Use chip-wide max frame size for MTU")
+Reported by: 曹煜 <cao88yu@gmail.com>
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mv88e6xxx/chip.c | 12 ++++++------
+ drivers/net/dsa/mv88e6xxx/global1.c | 2 ++
+ drivers/net/dsa/mv88e6xxx/port.c | 2 ++
+ 3 files changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index caa3c4f30405..50bbea220fbf 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -2613,8 +2613,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
+ if (err)
+ return err;
+
+- /* Port Control 2: don't force a good FCS, set the maximum frame size to
+- * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
++ /* Port Control 2: don't force a good FCS, set the MTU size to
++ * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
+ * untagged frames on this port, do a destination address lookup on all
+ * received packets as usual, disable ARP mirroring and don't send a
+ * copy of all transmitted/received frames on this port to the CPU.
+@@ -2633,7 +2633,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
+ return err;
+
+ if (chip->info->ops->port_set_jumbo_size) {
+- err = chip->info->ops->port_set_jumbo_size(chip, port, 10240);
++ err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
+ if (err)
+ return err;
+ }
+@@ -2718,10 +2718,10 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ if (chip->info->ops->port_set_jumbo_size)
+- return 10240;
++ return 10240 - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ else if (chip->info->ops->set_max_frame_size)
+- return 1632;
+- return 1522;
++ return 1632 - VLAN_ETH_HLEN - ETH_FCS_LEN;
++ return 1522 - VLAN_ETH_HLEN - ETH_FCS_LEN;
+ }
+
+ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
+index 33d443a37efc..9936ae69e5ee 100644
+--- a/drivers/net/dsa/mv88e6xxx/global1.c
++++ b/drivers/net/dsa/mv88e6xxx/global1.c
+@@ -232,6 +232,8 @@ int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
+ u16 val;
+ int err;
+
++ mtu += ETH_HLEN + ETH_FCS_LEN;
++
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
+index 8128dc607cf4..dfd9e8292e9a 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.c
++++ b/drivers/net/dsa/mv88e6xxx/port.c
+@@ -1082,6 +1082,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
+ u16 reg;
+ int err;
+
++ size += VLAN_ETH_HLEN + ETH_FCS_LEN;
++
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, ®);
+ if (err)
+ return err;
+--
+2.33.0
+
--- /dev/null
+From dbf71ba4cf5a7a5f1b256a74fa50aefec5ef0e46 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Sep 2021 19:41:26 +0200
+Subject: dsa: mv88e6xxx: Include tagger overhead when setting MTU for DSA and
+ CPU ports
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Andrew Lunn <andrew@lunn.ch>
+
+[ Upstream commit b9c587fed61cf88bd45822c3159644445f6d5aa6 ]
+
+Same members of the Marvell Ethernet switches impose MTU restrictions
+on ports used for connecting to the CPU or another switch for DSA. If
+the MTU is set too low, tagged frames will be discarded. Ensure the
+worst case tagger overhead is included in setting the MTU for DSA and
+CPU ports.
+
+Fixes: 1baf0fac10fb ("net: dsa: mv88e6xxx: Use chip-wide max frame size for MTU")
+Reported by: 曹煜 <cao88yu@gmail.com>
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/mv88e6xxx/chip.c | 9 ++++++---
+ drivers/net/dsa/mv88e6xxx/chip.h | 1 +
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 50bbea220fbf..18388ea5ebd9 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -2718,10 +2718,10 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ if (chip->info->ops->port_set_jumbo_size)
+- return 10240 - VLAN_ETH_HLEN - ETH_FCS_LEN;
++ return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ else if (chip->info->ops->set_max_frame_size)
+- return 1632 - VLAN_ETH_HLEN - ETH_FCS_LEN;
+- return 1522 - VLAN_ETH_HLEN - ETH_FCS_LEN;
++ return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
++ return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ }
+
+ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+@@ -2729,6 +2729,9 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int ret = 0;
+
++ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
++ new_mtu += EDSA_HLEN;
++
+ mv88e6xxx_reg_lock(chip);
+ if (chip->info->ops->port_set_jumbo_size)
+ ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
+index 81c244fc0419..51a7ff44478e 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.h
++++ b/drivers/net/dsa/mv88e6xxx/chip.h
+@@ -18,6 +18,7 @@
+ #include <linux/timecounter.h>
+ #include <net/dsa.h>
+
++#define EDSA_HLEN 8
+ #define MV88E6XXX_N_FID 4096
+
+ /* PVT limits for 4-bit port and 5-bit switch */
+--
+2.33.0
+
--- /dev/null
+From c74505a1e9c508ba378bcd79e9d3d43f422e47ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Sep 2021 10:52:37 -0700
+Subject: e100: fix buffer overrun in e100_get_regs
+
+From: Jacob Keller <jacob.e.keller@intel.com>
+
+[ Upstream commit 51032e6f17ce990d06123ad7307f258c50d25aa7 ]
+
+The e100_get_regs function is used to implement a simple register dump
+for the e100 device. The data is broken into a couple of MAC control
+registers, and then a series of PHY registers, followed by a memory dump
+buffer.
+
+The total length of the register dump is defined as (1 + E100_PHY_REGS)
+* sizeof(u32) + sizeof(nic->mem->dump_buf).
+
+The logic for filling in the PHY registers uses a convoluted inverted
+count for loop which counts from E100_PHY_REGS (0x1C) down to 0, and
+assigns the slots 1 + E100_PHY_REGS - i. The first loop iteration will
+fill in [1] and the final loop iteration will fill in [1 + 0x1C]. This
+is actually one more than the supposed number of PHY registers.
+
+The memory dump buffer is then filled into the space at
+[2 + E100_PHY_REGS] which will cause that memcpy to assign 4 bytes past
+the total size.
+
+The end result is that we overrun the total buffer size allocated by the
+kernel, which could lead to a panic or other issues due to memory
+corruption.
+
+It is difficult to determine the actual total number of registers
+here. The only 8255x datasheet I could find indicates there are 28 total
+MDI registers. However, we're reading 29 here, and reading them in
+reverse!
+
+In addition, the ethtool e100 register dump interface appears to read
+the first PHY register to determine if the device is in MDI or MDIx
+mode. This doesn't appear to be documented anywhere within the 8255x
+datasheet. I can only assume it must be in register 28 (the extra
+register we're reading here).
+
+Lets not change any of the intended meaning of what we copy here. Just
+extend the space by 4 bytes to account for the extra register and
+continue copying the data out in the same order.
+
+Change the E100_PHY_REGS value to be the correct total (29) so that the
+total register dump size is calculated properly. Fix the offset for
+where we copy the dump buffer so that it doesn't overrun the total size.
+
+Re-write the for loop to use counting up instead of the convoluted
+down-counting. Correct the mdio_read offset to use the 0-based register
+offsets, but maintain the bizarre reverse ordering so that we have the
+ABI expected by applications like ethtool. This requires and additional
+subtraction of 1. It seems a bit odd but it makes the flow of assignment
+into the register buffer easier to follow.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Felicitas Hetzelt <felicitashetzelt@gmail.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Jacob Keller <jacob.e.keller@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/e100.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
+index fee329d98621..ee86ea12fa37 100644
+--- a/drivers/net/ethernet/intel/e100.c
++++ b/drivers/net/ethernet/intel/e100.c
+@@ -2431,7 +2431,7 @@ static void e100_get_drvinfo(struct net_device *netdev,
+ sizeof(info->bus_info));
+ }
+
+-#define E100_PHY_REGS 0x1C
++#define E100_PHY_REGS 0x1D
+ static int e100_get_regs_len(struct net_device *netdev)
+ {
+ struct nic *nic = netdev_priv(netdev);
+@@ -2453,14 +2453,18 @@ static void e100_get_regs(struct net_device *netdev,
+ buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
+ ioread8(&nic->csr->scb.cmd_lo) << 16 |
+ ioread16(&nic->csr->scb.status);
+- for (i = E100_PHY_REGS; i >= 0; i--)
+- buff[1 + E100_PHY_REGS - i] =
+- mdio_read(netdev, nic->mii.phy_id, i);
++ for (i = 0; i < E100_PHY_REGS; i++)
++ /* Note that we read the registers in reverse order. This
++ * ordering is the ABI apparently used by ethtool and other
++ * applications.
++ */
++ buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
++ E100_PHY_REGS - 1 - i);
+ memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
+ e100_exec_cb(nic, NULL, e100_dump);
+ msleep(10);
+- memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
+- sizeof(nic->mem->dump_buf));
++ memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
++ sizeof(nic->mem->dump_buf));
+ }
+
+ static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+--
+2.33.0
+
--- /dev/null
+From ce944f78edcb92f0fdc4aebadec76c4b8c09648d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Sep 2021 10:52:36 -0700
+Subject: e100: fix length calculation in e100_get_regs_len
+
+From: Jacob Keller <jacob.e.keller@intel.com>
+
+[ Upstream commit 4329c8dc110b25d5f04ed20c6821bb60deff279f ]
+
+commit abf9b902059f ("e100: cleanup unneeded math") tried to simplify
+e100_get_regs_len and remove a double 'divide and then multiply'
+calculation that the e100_reg_regs_len function did.
+
+This change broke the size calculation entirely as it failed to account
+for the fact that the numbered registers are actually 4 bytes wide and
+not 1 byte. This resulted in a significant under allocation of the
+register buffer used by e100_get_regs.
+
+Fix this by properly multiplying the register count by u32 first before
+adding the size of the dump buffer.
+
+Fixes: abf9b902059f ("e100: cleanup unneeded math")
+Reported-by: Felicitas Hetzelt <felicitashetzelt@gmail.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/e100.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
+index 609e47b8287d..fee329d98621 100644
+--- a/drivers/net/ethernet/intel/e100.c
++++ b/drivers/net/ethernet/intel/e100.c
+@@ -2435,7 +2435,11 @@ static void e100_get_drvinfo(struct net_device *netdev,
+ static int e100_get_regs_len(struct net_device *netdev)
+ {
+ struct nic *nic = netdev_priv(netdev);
+- return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
++
++ /* We know the number of registers, and the size of the dump buffer.
++ * Calculate the total size in bytes.
++ */
++ return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
+ }
+
+ static void e100_get_regs(struct net_device *netdev,
+--
+2.33.0
+
--- /dev/null
+From c2591017553ea7f2259a7a2a5c623271d8409dfd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Sep 2021 21:31:51 +0300
+Subject: hwmon: (mlxreg-fan) Return non-zero value when fan current state is
+ enforced from sysfs
+
+From: Vadim Pasternak <vadimp@nvidia.com>
+
+[ Upstream commit e6fab7af6ba1bc77c78713a83876f60ca7a4a064 ]
+
+Fan speed minimum can be enforced from sysfs. For example, setting
+current fan speed to 20 is used to enforce fan speed to be at 100%
+speed, 19 - to be not below 90% speed, etcetera. This feature provides
+ability to limit fan speed according to some system wise
+considerations, like absence of some replaceable units or high system
+ambient temperature.
+
+Request for changing fan minimum speed is configuration request and can
+be set only through 'sysfs' write procedure. In this situation value of
+argument 'state' is above nominal fan speed maximum.
+
+Return non-zero code in this case to avoid
+thermal_cooling_device_stats_update() call, because in this case
+statistics update violates thermal statistics table range.
+The issues is observed in case kernel is configured with option
+CONFIG_THERMAL_STATISTICS.
+
+Here is the trace from KASAN:
+[ 159.506659] BUG: KASAN: slab-out-of-bounds in thermal_cooling_device_stats_update+0x7d/0xb0
+[ 159.516016] Read of size 4 at addr ffff888116163840 by task hw-management.s/7444
+[ 159.545625] Call Trace:
+[ 159.548366] dump_stack+0x92/0xc1
+[ 159.552084] ? thermal_cooling_device_stats_update+0x7d/0xb0
+[ 159.635869] thermal_zone_device_update+0x345/0x780
+[ 159.688711] thermal_zone_device_set_mode+0x7d/0xc0
+[ 159.694174] mlxsw_thermal_modules_init+0x48f/0x590 [mlxsw_core]
+[ 159.700972] ? mlxsw_thermal_set_cur_state+0x5a0/0x5a0 [mlxsw_core]
+[ 159.731827] mlxsw_thermal_init+0x763/0x880 [mlxsw_core]
+[ 160.070233] RIP: 0033:0x7fd995909970
+[ 160.074239] Code: 73 01 c3 48 8b 0d 28 d5 2b 00 f7 d8 64 89 01 48 83 c8 ff c3 66 0f 1f 44 00 00 83 3d 99 2d 2c 00 00 75 10 b8 01 00 00 00 0f 05 <48> 3d 01 f0 ff ..
+[ 160.095242] RSP: 002b:00007fff54f5d938 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+[ 160.103722] RAX: ffffffffffffffda RBX: 0000000000000013 RCX: 00007fd995909970
+[ 160.111710] RDX: 0000000000000013 RSI: 0000000001906008 RDI: 0000000000000001
+[ 160.119699] RBP: 0000000001906008 R08: 00007fd995bc9760 R09: 00007fd996210700
+[ 160.127687] R10: 0000000000000073 R11: 0000000000000246 R12: 0000000000000013
+[ 160.135673] R13: 0000000000000001 R14: 00007fd995bc8600 R15: 0000000000000013
+[ 160.143671]
+[ 160.145338] Allocated by task 2924:
+[ 160.149242] kasan_save_stack+0x19/0x40
+[ 160.153541] __kasan_kmalloc+0x7f/0xa0
+[ 160.157743] __kmalloc+0x1a2/0x2b0
+[ 160.161552] thermal_cooling_device_setup_sysfs+0xf9/0x1a0
+[ 160.167687] __thermal_cooling_device_register+0x1b5/0x500
+[ 160.173833] devm_thermal_of_cooling_device_register+0x60/0xa0
+[ 160.180356] mlxreg_fan_probe+0x474/0x5e0 [mlxreg_fan]
+[ 160.248140]
+[ 160.249807] The buggy address belongs to the object at ffff888116163400
+[ 160.249807] which belongs to the cache kmalloc-1k of size 1024
+[ 160.263814] The buggy address is located 64 bytes to the right of
+[ 160.263814] 1024-byte region [ffff888116163400, ffff888116163800)
+[ 160.277536] The buggy address belongs to the page:
+[ 160.282898] page:0000000012275840 refcount:1 mapcount:0 mapping:0000000000000000 index:0xffff888116167000 pfn:0x116160
+[ 160.294872] head:0000000012275840 order:3 compound_mapcount:0 compound_pincount:0
+[ 160.303251] flags: 0x200000000010200(slab|head|node=0|zone=2)
+[ 160.309694] raw: 0200000000010200 ffffea00046f7208 ffffea0004928208 ffff88810004dbc0
+[ 160.318367] raw: ffff888116167000 00000000000a0006 00000001ffffffff 0000000000000000
+[ 160.327033] page dumped because: kasan: bad access detected
+[ 160.333270]
+[ 160.334937] Memory state around the buggy address:
+[ 160.356469] >ffff888116163800: fc ..
+
+Fixes: 65afb4c8e7e4 ("hwmon: (mlxreg-fan) Add support for Mellanox FAN driver")
+Signed-off-by: Vadim Pasternak <vadimp@nvidia.com>
+Link: https://lore.kernel.org/r/20210916183151.869427-1-vadimp@nvidia.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/mlxreg-fan.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
+index ed8d59d4eecb..bd8f5a3aaad9 100644
+--- a/drivers/hwmon/mlxreg-fan.c
++++ b/drivers/hwmon/mlxreg-fan.c
+@@ -291,8 +291,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ {
+ struct mlxreg_fan *fan = cdev->devdata;
+ unsigned long cur_state;
++ int i, config = 0;
+ u32 regval;
+- int i;
+ int err;
+
+ /*
+@@ -305,6 +305,12 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ * overwritten.
+ */
+ if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
++ /*
++ * This is configuration change, which is only supported through sysfs.
++ * For configuration non-zero value is to be returned to avoid thermal
++ * statistics update.
++ */
++ config = 1;
+ state -= MLXREG_FAN_MAX_STATE;
+ for (i = 0; i < state; i++)
+ fan->cooling_levels[i] = state;
+@@ -319,7 +325,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+
+ cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
+ if (state < cur_state)
+- return 0;
++ return config;
+
+ state = cur_state;
+ }
+@@ -335,7 +341,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ dev_err(fan->dev, "Failed to write PWM duty\n");
+ return err;
+ }
+- return 0;
++ return config;
+ }
+
+ static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
+--
+2.33.0
+
--- /dev/null
+From 8c316488cc43fbb89f2116202d3e54b077185292 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Sep 2021 10:07:40 +0300
+Subject: hwmon: (pmbus/mp2975) Add missed POUT attribute for page 1 mp2975
+ controller
+
+From: Vadim Pasternak <vadimp@nvidia.com>
+
+[ Upstream commit 2292e2f685cd5c65e3f47bbcf9f469513acc3195 ]
+
+Add missed attribute for reading POUT from page 1.
+It is supported by device, but has been missed in initial commit.
+
+Fixes: 2c6fcbb21149 ("hwmon: (pmbus) Add support for MPS Multi-phase mp2975 controller")
+Signed-off-by: Vadim Pasternak <vadimp@nvidia.com>
+Link: https://lore.kernel.org/r/20210927070740.2149290-1-vadimp@nvidia.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/pmbus/mp2975.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
+index 1c3e2a9453b1..a41fe06e0ad4 100644
+--- a/drivers/hwmon/pmbus/mp2975.c
++++ b/drivers/hwmon/pmbus/mp2975.c
+@@ -54,7 +54,7 @@
+
+ #define MP2975_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
+- PMBUS_PHASE_VIRTUAL)
++ PMBUS_HAVE_POUT | PMBUS_PHASE_VIRTUAL)
+
+ struct mp2975_data {
+ struct pmbus_driver_info info;
+--
+2.33.0
+
--- /dev/null
+From b42016a1f859149aee1a4e560dec049ad650e748 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Sep 2021 12:30:11 +0300
+Subject: hwmon: (tmp421) fix rounding for negative values
+
+From: Paul Fertser <fercerpav@gmail.com>
+
+[ Upstream commit 724e8af85854c4d3401313b6dd7d79cf792d8990 ]
+
+Old code produces -24999 for 0b1110011100000000 input in standard format due to
+always rounding up rather than "away from zero".
+
+Use the common macro for division, unify and simplify the conversion code along
+the way.
+
+Fixes: 9410700b881f ("hwmon: Add driver for Texas Instruments TMP421/422/423 sensor chips")
+Signed-off-by: Paul Fertser <fercerpav@gmail.com>
+Link: https://lore.kernel.org/r/20210924093011.26083-3-fercerpav@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/tmp421.c | 24 ++++++++----------------
+ 1 file changed, 8 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
+index c9ef83627bb7..b963a369c5ab 100644
+--- a/drivers/hwmon/tmp421.c
++++ b/drivers/hwmon/tmp421.c
+@@ -100,23 +100,17 @@ struct tmp421_data {
+ s16 temp[4];
+ };
+
+-static int temp_from_s16(s16 reg)
++static int temp_from_raw(u16 reg, bool extended)
+ {
+ /* Mask out status bits */
+ int temp = reg & ~0xf;
+
+- return (temp * 1000 + 128) / 256;
+-}
+-
+-static int temp_from_u16(u16 reg)
+-{
+- /* Mask out status bits */
+- int temp = reg & ~0xf;
+-
+- /* Add offset for extended temperature range. */
+- temp -= 64 * 256;
++ if (extended)
++ temp = temp - 64 * 256;
++ else
++ temp = (s16)temp;
+
+- return (temp * 1000 + 128) / 256;
++ return DIV_ROUND_CLOSEST(temp * 1000, 256);
+ }
+
+ static int tmp421_update_device(struct tmp421_data *data)
+@@ -172,10 +166,8 @@ static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
+
+ switch (attr) {
+ case hwmon_temp_input:
+- if (tmp421->config & TMP421_CONFIG_RANGE)
+- *val = temp_from_u16(tmp421->temp[channel]);
+- else
+- *val = temp_from_s16(tmp421->temp[channel]);
++ *val = temp_from_raw(tmp421->temp[channel],
++ tmp421->config & TMP421_CONFIG_RANGE);
+ return 0;
+ case hwmon_temp_fault:
+ /*
+--
+2.33.0
+
--- /dev/null
+From c52de109cdc0176ce94d5e3360e6477f22998d57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Sep 2021 12:30:10 +0300
+Subject: hwmon: (tmp421) report /PVLD condition as fault
+
+From: Paul Fertser <fercerpav@gmail.com>
+
+[ Upstream commit 540effa7f283d25bcc13c0940d808002fee340b8 ]
+
+For both local and remote sensors all the supported ICs can report an
+"undervoltage lockout" condition which means the conversion wasn't
+properly performed due to insufficient power supply voltage and so the
+measurement results can't be trusted.
+
+Fixes: 9410700b881f ("hwmon: Add driver for Texas Instruments TMP421/422/423 sensor chips")
+Signed-off-by: Paul Fertser <fercerpav@gmail.com>
+Link: https://lore.kernel.org/r/20210924093011.26083-2-fercerpav@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/tmp421.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
+index 8fd8c3a94dfe..c9ef83627bb7 100644
+--- a/drivers/hwmon/tmp421.c
++++ b/drivers/hwmon/tmp421.c
+@@ -179,10 +179,10 @@ static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
+ return 0;
+ case hwmon_temp_fault:
+ /*
+- * The OPEN bit signals a fault. This is bit 0 of the temperature
+- * register (low byte).
++ * Any of OPEN or /PVLD bits indicate a hardware mulfunction
++ * and the conversion result may be incorrect
+ */
+- *val = tmp421->temp[channel] & 0x01;
++ *val = !!(tmp421->temp[channel] & 0x03);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+@@ -195,9 +195,6 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
+ {
+ switch (attr) {
+ case hwmon_temp_fault:
+- if (channel == 0)
+- return 0;
+- return 0444;
+ case hwmon_temp_input:
+ return 0444;
+ default:
+--
+2.33.0
+
--- /dev/null
+From eab53982661841d2b981130b307531708c637ea5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Sep 2021 13:43:28 +0200
+Subject: IB/cma: Do not send IGMP leaves for sendonly Multicast groups
+
+From: Christoph Lameter <cl@gentwo.de>
+
+[ Upstream commit 2cc74e1ee31d00393b6698ec80b322fd26523da4 ]
+
+ROCE uses IGMP for Multicast instead of the native Infiniband system where
+joins are required in order to post messages on the Multicast group. On
+Ethernet one can send Multicast messages to arbitrary addresses without
+the need to subscribe to a group.
+
+So ROCE correctly does not send IGMP joins during rdma_join_multicast().
+
+F.e. in cma_iboe_join_multicast() we see:
+
+ if (addr->sa_family == AF_INET) {
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+ ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+ if (!send_only) {
+ err = cma_igmp_send(ndev, &ib.rec.mgid,
+ true);
+ }
+ }
+ } else {
+
+So the IGMP join is suppressed as it is unnecessary.
+
+However no such check is done in destroy_mc(). And therefore leaving a
+sendonly multicast group will send an IGMP leave.
+
+This means that the following scenario can lead to a multicast receiver
+unexpectedly being unsubscribed from a MC group:
+
+1. Sender thread does a sendonly join on MC group X. No IGMP join
+ is sent.
+
+2. Receiver thread does a regular join on the same MC Group x.
+ IGMP join is sent and the receiver begins to get messages.
+
+3. Sender thread terminates and destroys MC group X.
+ IGMP leave is sent and the receiver no longer receives data.
+
+This patch adds the same logic for sendonly joins to destroy_mc() that is
+also used in cma_iboe_join_multicast().
+
+Fixes: ab15c95a17b3 ("IB/core: Support for CMA multicast join flags")
+Link: https://lore.kernel.org/r/alpine.DEB.2.22.394.2109081340540.668072@gentwo.de
+Signed-off-by: Christoph Lameter <cl@linux.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cma.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index a4962b499b61..3029e96161b5 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1814,6 +1814,8 @@ static void cma_release_port(struct rdma_id_private *id_priv)
+ static void destroy_mc(struct rdma_id_private *id_priv,
+ struct cma_multicast *mc)
+ {
++ bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
++
+ if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
+ ib_sa_free_multicast(mc->sa_mc);
+
+@@ -1830,7 +1832,10 @@ static void destroy_mc(struct rdma_id_private *id_priv,
+
+ cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
+ &mgid);
+- cma_igmp_send(ndev, &mgid, false);
++
++ if (!send_only)
++ cma_igmp_send(ndev, &mgid, false);
++
+ dev_put(ndev);
+ }
+
+--
+2.33.0
+
--- /dev/null
+From 84521a713e917d086e1749cab0369e74b8b79a4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Sep 2021 18:08:39 +0200
+Subject: ipvs: check that ip_vs_conn_tab_bits is between 8 and 20
+
+From: Andrea Claudi <aclaudi@redhat.com>
+
+[ Upstream commit 69e73dbfda14fbfe748d3812da1244cce2928dcb ]
+
+ip_vs_conn_tab_bits may be provided by the user through the
+conn_tab_bits module parameter. If this value is greater than 31, or
+less than 0, the shift operator used to derive tab_size causes undefined
+behaviour.
+
+Fix this checking ip_vs_conn_tab_bits value to be in the range specified
+in ipvs Kconfig. If not, simply use default value.
+
+Fixes: 6f7edb4881bf ("IPVS: Allow boot time change of hash size")
+Reported-by: Yi Chen <yiche@redhat.com>
+Signed-off-by: Andrea Claudi <aclaudi@redhat.com>
+Acked-by: Julian Anastasov <ja@ssi.bg>
+Acked-by: Simon Horman <horms@verge.net.au>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipvs/ip_vs_conn.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
+index c100c6b112c8..2c467c422dc6 100644
+--- a/net/netfilter/ipvs/ip_vs_conn.c
++++ b/net/netfilter/ipvs/ip_vs_conn.c
+@@ -1468,6 +1468,10 @@ int __init ip_vs_conn_init(void)
+ int idx;
+
+ /* Compute size and mask */
++ if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) {
++ pr_info("conn_tab_bits not in [8, 20]. Using default value\n");
++ ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
++ }
+ ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
+ ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
+
+--
+2.33.0
+
--- /dev/null
+From eedc7caba683d487144f35409d543e25cc31403c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Sep 2021 15:23:59 -0700
+Subject: ixgbe: Fix NULL pointer dereference in ixgbe_xdp_setup
+
+From: Feng Zhou <zhoufeng.zf@bytedance.com>
+
+[ Upstream commit 513e605d7a9ce136886cb42ebb2c40e9a6eb6333 ]
+
+The ixgbe driver currently generates a NULL pointer dereference with
+some machine (online cpus < 63). This is due to the fact that the
+maximum value of num_xdp_queues is nr_cpu_ids. Code is in
+"ixgbe_set_rss_queues"".
+
+Here's how the problem repeats itself:
+Some machine (online cpus < 63), And user set num_queues to 63 through
+ethtool. Code is in the "ixgbe_set_channels",
+ adapter->ring_feature[RING_F_FDIR].limit = count;
+
+It becomes 63.
+
+When user use xdp, "ixgbe_set_rss_queues" will set queues num.
+ adapter->num_rx_queues = rss_i;
+ adapter->num_tx_queues = rss_i;
+ adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
+
+And rss_i's value is from
+ f = &adapter->ring_feature[RING_F_FDIR];
+ rss_i = f->indices = f->limit;
+
+So "num_rx_queues" > "num_xdp_queues", when run to "ixgbe_xdp_setup",
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->xdp_ring[i]->xsk_umem)
+
+It leads to panic.
+
+Call trace:
+[exception RIP: ixgbe_xdp+368]
+RIP: ffffffffc02a76a0 RSP: ffff9fe16202f8d0 RFLAGS: 00010297
+RAX: 0000000000000000 RBX: 0000000000000020 RCX: 0000000000000000
+RDX: 0000000000000000 RSI: 000000000000001c RDI: ffffffffa94ead90
+RBP: ffff92f8f24c0c18 R8: 0000000000000000 R9: 0000000000000000
+R10: ffff9fe16202f830 R11: 0000000000000000 R12: ffff92f8f24c0000
+R13: ffff9fe16202fc01 R14: 000000000000000a R15: ffffffffc02a7530
+ORIG_RAX: ffffffffffffffff CS: 0010 SS: 0018
+ 7 [ffff9fe16202f8f0] dev_xdp_install at ffffffffa89fbbcc
+ 8 [ffff9fe16202f920] dev_change_xdp_fd at ffffffffa8a08808
+ 9 [ffff9fe16202f960] do_setlink at ffffffffa8a20235
+10 [ffff9fe16202fa88] rtnl_setlink at ffffffffa8a20384
+11 [ffff9fe16202fc78] rtnetlink_rcv_msg at ffffffffa8a1a8dd
+12 [ffff9fe16202fcf0] netlink_rcv_skb at ffffffffa8a717eb
+13 [ffff9fe16202fd40] netlink_unicast at ffffffffa8a70f88
+14 [ffff9fe16202fd80] netlink_sendmsg at ffffffffa8a71319
+15 [ffff9fe16202fdf0] sock_sendmsg at ffffffffa89df290
+16 [ffff9fe16202fe08] __sys_sendto at ffffffffa89e19c8
+17 [ffff9fe16202ff30] __x64_sys_sendto at ffffffffa89e1a64
+18 [ffff9fe16202ff38] do_syscall_64 at ffffffffa84042b9
+19 [ffff9fe16202ff50] entry_SYSCALL_64_after_hwframe at ffffffffa8c0008c
+
+So I fix ixgbe_max_channels so that it will not allow a setting of queues
+to be higher than the num_online_cpus(). And when run to ixgbe_xdp_setup,
+take the smaller value of num_rx_queues and num_xdp_queues.
+
+Fixes: 4a9b32f30f80 ("ixgbe: fix potential RX buffer starvation for AF_XDP")
+Signed-off-by: Feng Zhou <zhoufeng.zf@bytedance.com>
+Tested-by: Sandeep Penigalapati <sandeep.penigalapati@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 2 +-
+ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 8 ++++++--
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+index a280aa34ca1d..55983904b6df 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+@@ -3216,7 +3216,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
+ max_combined = ixgbe_max_rss_indices(adapter);
+ }
+
+- return max_combined;
++ return min_t(int, max_combined, num_online_cpus());
+ }
+
+ static void ixgbe_get_channels(struct net_device *dev,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 37439b76fcb5..ffe322136c58 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -10123,6 +10123,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct bpf_prog *old_prog;
+ bool need_reset;
++ int num_queues;
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ return -EINVAL;
+@@ -10172,11 +10173,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+ /* Kick start the NAPI context if there is an AF_XDP socket open
+ * on that queue id. This so that receiving will start.
+ */
+- if (need_reset && prog)
+- for (i = 0; i < adapter->num_rx_queues; i++)
++ if (need_reset && prog) {
++ num_queues = min_t(int, adapter->num_rx_queues,
++ adapter->num_xdp_queues);
++ for (i = 0; i < num_queues; i++)
+ if (adapter->xdp_ring[i]->xsk_pool)
+ (void)ixgbe_xsk_wakeup(adapter->netdev, i,
+ XDP_WAKEUP_RX);
++ }
+
+ return 0;
+ }
+--
+2.33.0
+
--- /dev/null
+From d66e9fe71c0bccd3202a8801d22e2b5c706e265e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Aug 2021 15:32:40 +0800
+Subject: mac80211: Fix ieee80211_amsdu_aggregate frag_tail bug
+
+From: Chih-Kang Chang <gary.chang@realtek.com>
+
+[ Upstream commit fe94bac626d9c1c5bc98ab32707be8a9d7f8adba ]
+
+In ieee80211_amsdu_aggregate() set a pointer frag_tail point to the
+end of skb_shinfo(head)->frag_list, and use it to bind other skb in
+the end of this function. But when execute ieee80211_amsdu_aggregate()
+->ieee80211_amsdu_realloc_pad()->pskb_expand_head(), the address of
+skb_shinfo(head)->frag_list will be changed. However, the
+ieee80211_amsdu_aggregate() not update frag_tail after call
+pskb_expand_head(). That will cause the second skb can't bind to the
+head skb appropriately.So we update the address of frag_tail to fix it.
+
+Fixes: 6e0456b54545 ("mac80211: add A-MSDU tx support")
+Signed-off-by: Chih-Kang Chang <gary.chang@realtek.com>
+Signed-off-by: Zong-Zhe Yang <kevin_yang@realtek.com>
+Signed-off-by: Ping-Ke Shih <pkshih@realtek.com>
+Link: https://lore.kernel.org/r/20210830073240.12736-1-pkshih@realtek.com
+[reword comment]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/tx.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 673ad3cf2c3a..bef517ccdecb 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -3365,6 +3365,14 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
+ if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
+ goto out;
+
++ /* If n == 2, the "while (*frag_tail)" loop above didn't execute
++ * and frag_tail should be &skb_shinfo(head)->frag_list.
++ * However, ieee80211_amsdu_prepare_head() can reallocate it.
++ * Reload frag_tail to have it pointing to the correct place.
++ */
++ if (n == 2)
++ frag_tail = &skb_shinfo(head)->frag_list;
++
+ /*
+ * Pad out the previous subframe to a multiple of 4 by adding the
+ * padding to the next one, that's being added. Note that head->len
+--
+2.33.0
+
--- /dev/null
+From b19ec117ebeaff3a852abb1a56dbceb5b8d882ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Sep 2021 11:29:37 +0200
+Subject: mac80211-hwsim: fix late beacon hrtimer handling
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 313bbd1990b6ddfdaa7da098d0c56b098a833572 ]
+
+Thomas explained in https://lore.kernel.org/r/87mtoeb4hb.ffs@tglx
+that our handling of the hrtimer here is wrong: If the timer fires
+late (e.g. due to vCPU scheduling, as reported by Dmitry/syzbot)
+then it tries to actually rearm the timer at the next deadline,
+which might be in the past already:
+
+ 1 2 3 N N+1
+ | | | ... | |
+
+ ^ intended to fire here (1)
+ ^ next deadline here (2)
+ ^ actually fired here
+
+The next time it fires, it's later, but will still try to schedule
+for the next deadline (now 3), etc. until it catches up with N,
+but that might take a long time, causing stalls etc.
+
+Now, all of this is simulation, so we just have to fix it, but
+note that the behaviour is wrong even per spec, since there's no
+value then in sending all those beacons unaligned - they should be
+aligned to the TBTT (1, 2, 3, ... in the picture), and if we're a
+bit (or a lot) late, then just resume at that point.
+
+Therefore, change the code to use hrtimer_forward_now() which will
+ensure that the next firing of the timer would be at N+1 (in the
+picture), i.e. the next interval point after the current time.
+
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Reported-by: Dmitry Vyukov <dvyukov@google.com>
+Reported-by: syzbot+0e964fad69a9c462bc1e@syzkaller.appspotmail.com
+Fixes: 01e59e467ecf ("mac80211_hwsim: hrtimer beacon")
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/r/20210915112936.544f383472eb.I3f9712009027aa09244b65399bf18bf482a8c4f1@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mac80211_hwsim.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 4ca0b06d09ad..b793d61d15d2 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -1795,8 +1795,8 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
+ bcn_int -= data->bcn_delta;
+ data->bcn_delta = 0;
+ }
+- hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer),
+- ns_to_ktime(bcn_int * NSEC_PER_USEC));
++ hrtimer_forward_now(&data->beacon_timer,
++ ns_to_ktime(bcn_int * NSEC_PER_USEC));
+ return HRTIMER_RESTART;
+ }
+
+--
+2.33.0
+
--- /dev/null
+From c030c3e86acb9d75e35bdf00e2378e6c40177971 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Sep 2021 14:45:22 +0200
+Subject: mac80211: limit injected vht mcs/nss in ieee80211_parse_tx_radiotap
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 13cb6d826e0ac0d144b0d48191ff1a111d32f0c6 ]
+
+Limit max values for vht mcs and nss in ieee80211_parse_tx_radiotap
+routine in order to fix the following warning reported by syzbot:
+
+WARNING: CPU: 0 PID: 10717 at include/net/mac80211.h:989 ieee80211_rate_set_vht include/net/mac80211.h:989 [inline]
+WARNING: CPU: 0 PID: 10717 at include/net/mac80211.h:989 ieee80211_parse_tx_radiotap+0x101e/0x12d0 net/mac80211/tx.c:2244
+Modules linked in:
+CPU: 0 PID: 10717 Comm: syz-executor.5 Not tainted 5.14.0-syzkaller #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+RIP: 0010:ieee80211_rate_set_vht include/net/mac80211.h:989 [inline]
+RIP: 0010:ieee80211_parse_tx_radiotap+0x101e/0x12d0 net/mac80211/tx.c:2244
+RSP: 0018:ffffc9000186f3e8 EFLAGS: 00010216
+RAX: 0000000000000618 RBX: ffff88804ef76500 RCX: ffffc900143a5000
+RDX: 0000000000040000 RSI: ffffffff888f478e RDI: 0000000000000003
+RBP: 00000000ffffffff R08: 0000000000000000 R09: 0000000000000100
+R10: ffffffff888f46f9 R11: 0000000000000000 R12: 00000000fffffff8
+R13: ffff88804ef7653c R14: 0000000000000001 R15: 0000000000000004
+FS: 00007fbf5718f700(0000) GS:ffff8880b9c00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000001b2de23000 CR3: 000000006a671000 CR4: 00000000001506f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000600
+Call Trace:
+ ieee80211_monitor_select_queue+0xa6/0x250 net/mac80211/iface.c:740
+ netdev_core_pick_tx+0x169/0x2e0 net/core/dev.c:4089
+ __dev_queue_xmit+0x6f9/0x3710 net/core/dev.c:4165
+ __bpf_tx_skb net/core/filter.c:2114 [inline]
+ __bpf_redirect_no_mac net/core/filter.c:2139 [inline]
+ __bpf_redirect+0x5ba/0xd20 net/core/filter.c:2162
+ ____bpf_clone_redirect net/core/filter.c:2429 [inline]
+ bpf_clone_redirect+0x2ae/0x420 net/core/filter.c:2401
+ bpf_prog_eeb6f53a69e5c6a2+0x59/0x234
+ bpf_dispatcher_nop_func include/linux/bpf.h:717 [inline]
+ __bpf_prog_run include/linux/filter.h:624 [inline]
+ bpf_prog_run include/linux/filter.h:631 [inline]
+ bpf_test_run+0x381/0xa30 net/bpf/test_run.c:119
+ bpf_prog_test_run_skb+0xb84/0x1ee0 net/bpf/test_run.c:663
+ bpf_prog_test_run kernel/bpf/syscall.c:3307 [inline]
+ __sys_bpf+0x2137/0x5df0 kernel/bpf/syscall.c:4605
+ __do_sys_bpf kernel/bpf/syscall.c:4691 [inline]
+ __se_sys_bpf kernel/bpf/syscall.c:4689 [inline]
+ __x64_sys_bpf+0x75/0xb0 kernel/bpf/syscall.c:4689
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+RIP: 0033:0x4665f9
+
+Reported-by: syzbot+0196ac871673f0c20f68@syzkaller.appspotmail.com
+Fixes: 646e76bb5daf4 ("mac80211: parse VHT info in injected frames")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/c26c3f02dcb38ab63b2f2534cb463d95ee81bb13.1632141760.git.lorenzo@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/tx.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index bef517ccdecb..bbbcc678c655 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2177,7 +2177,11 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
+ }
+
+ vht_mcs = iterator.this_arg[4] >> 4;
++ if (vht_mcs > 11)
++ vht_mcs = 0;
+ vht_nss = iterator.this_arg[4] & 0xF;
++ if (!vht_nss || vht_nss > 8)
++ vht_nss = 1;
+ break;
+
+ /*
+--
+2.33.0
+
--- /dev/null
+From 0cefbc2ff0769c7a203f18732ccada715b45d32e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Sep 2021 15:40:05 +0200
+Subject: mac80211: mesh: fix potentially unaligned access
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit b9731062ce8afd35cf723bf3a8ad55d208f915a5 ]
+
+The pointer here points directly into the frame, so the
+access is potentially unaligned. Use get_unaligned_le16
+to avoid that.
+
+Fixes: 3f52b7e328c5 ("mac80211: mesh power save basics")
+Link: https://lore.kernel.org/r/20210920154009.3110ff75be0c.Ib6a2ff9e9cc9bc6fca50fce631ec1ce725cc926b@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/mesh_ps.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
+index 204830a55240..3fbd0b9ff913 100644
+--- a/net/mac80211/mesh_ps.c
++++ b/net/mac80211/mesh_ps.c
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
+ * Copyright 2012-2013, cozybit Inc.
++ * Copyright (C) 2021 Intel Corporation
+ */
+
+ #include "mesh.h"
+@@ -588,7 +589,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
+
+ /* only transmit to PS STA with announced, non-zero awake window */
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
+- (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))
++ (!elems->awake_window || !get_unaligned_le16(elems->awake_window)))
+ return;
+
+ if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
+--
+2.33.0
+
--- /dev/null
+From 9d9fb85241e19436af487258b577188e828cdb51 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Sep 2021 17:04:11 -0700
+Subject: mptcp: don't return sockets in foreign netns
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit ea1300b9df7c8e8b65695a08b8f6aaf4b25fec9c ]
+
+mptcp_token_get_sock() may return a mptcp socket that is in
+a different net namespace than the socket that received the token value.
+
+The mptcp syncookie code path had an explicit check for this,
+this moves the test into mptcp_token_get_sock() function.
+
+Eventually token.c should be converted to pernet storage, but
+such change is not suitable for net tree.
+
+Fixes: 2c5ebd001d4f0 ("mptcp: refactor token container")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/mptcp_diag.c | 2 +-
+ net/mptcp/protocol.h | 2 +-
+ net/mptcp/subflow.c | 2 +-
+ net/mptcp/syncookies.c | 13 +------------
+ net/mptcp/token.c | 11 ++++++++---
+ net/mptcp/token_test.c | 14 ++++++++------
+ 6 files changed, 20 insertions(+), 24 deletions(-)
+
+diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
+index 5f390a97f556..f1af3f44875e 100644
+--- a/net/mptcp/mptcp_diag.c
++++ b/net/mptcp/mptcp_diag.c
+@@ -36,7 +36,7 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb,
+ struct sock *sk;
+
+ net = sock_net(in_skb->sk);
+- msk = mptcp_token_get_sock(req->id.idiag_cookie[0]);
++ msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]);
+ if (!msk)
+ goto out_nosk;
+
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 13ab89dc1914..3e5af8397434 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -424,7 +424,7 @@ int mptcp_token_new_connect(struct sock *sk);
+ void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
+ struct mptcp_sock *msk);
+ bool mptcp_token_exists(u32 token);
+-struct mptcp_sock *mptcp_token_get_sock(u32 token);
++struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token);
+ struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
+ long *s_num);
+ void mptcp_token_destroy(struct mptcp_sock *msk);
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index bba5696fee36..2e9238490924 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -69,7 +69,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
+ struct mptcp_sock *msk;
+ int local_id;
+
+- msk = mptcp_token_get_sock(subflow_req->token);
++ msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
+ if (!msk) {
+ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
+ return NULL;
+diff --git a/net/mptcp/syncookies.c b/net/mptcp/syncookies.c
+index 37127781aee9..7f22526346a7 100644
+--- a/net/mptcp/syncookies.c
++++ b/net/mptcp/syncookies.c
+@@ -108,18 +108,12 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
+
+ e->valid = 0;
+
+- msk = mptcp_token_get_sock(e->token);
++ msk = mptcp_token_get_sock(net, e->token);
+ if (!msk) {
+ spin_unlock_bh(&join_entry_locks[i]);
+ return false;
+ }
+
+- /* If this fails, the token got re-used in the mean time by another
+- * mptcp socket in a different netns, i.e. entry is outdated.
+- */
+- if (!net_eq(sock_net((struct sock *)msk), net))
+- goto err_put;
+-
+ subflow_req->remote_nonce = e->remote_nonce;
+ subflow_req->local_nonce = e->local_nonce;
+ subflow_req->backup = e->backup;
+@@ -128,11 +122,6 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
+ subflow_req->msk = msk;
+ spin_unlock_bh(&join_entry_locks[i]);
+ return true;
+-
+-err_put:
+- spin_unlock_bh(&join_entry_locks[i]);
+- sock_put((struct sock *)msk);
+- return false;
+ }
+
+ void __init mptcp_join_cookie_init(void)
+diff --git a/net/mptcp/token.c b/net/mptcp/token.c
+index 0691a4883f3a..f0d656bf27ad 100644
+--- a/net/mptcp/token.c
++++ b/net/mptcp/token.c
+@@ -232,6 +232,7 @@ bool mptcp_token_exists(u32 token)
+
+ /**
+ * mptcp_token_get_sock - retrieve mptcp connection sock using its token
++ * @net: restrict to this namespace
+ * @token: token of the mptcp connection to retrieve
+ *
+ * This function returns the mptcp connection structure with the given token.
+@@ -239,7 +240,7 @@ bool mptcp_token_exists(u32 token)
+ *
+ * returns NULL if no connection with the given token value exists.
+ */
+-struct mptcp_sock *mptcp_token_get_sock(u32 token)
++struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token)
+ {
+ struct hlist_nulls_node *pos;
+ struct token_bucket *bucket;
+@@ -252,11 +253,15 @@ struct mptcp_sock *mptcp_token_get_sock(u32 token)
+ again:
+ sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {
+ msk = mptcp_sk(sk);
+- if (READ_ONCE(msk->token) != token)
++ if (READ_ONCE(msk->token) != token ||
++ !net_eq(sock_net(sk), net))
+ continue;
++
+ if (!refcount_inc_not_zero(&sk->sk_refcnt))
+ goto not_found;
+- if (READ_ONCE(msk->token) != token) {
++
++ if (READ_ONCE(msk->token) != token ||
++ !net_eq(sock_net(sk), net)) {
+ sock_put(sk);
+ goto again;
+ }
+diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
+index e1bd6f0a0676..5d984bec1cd8 100644
+--- a/net/mptcp/token_test.c
++++ b/net/mptcp/token_test.c
+@@ -11,6 +11,7 @@ static struct mptcp_subflow_request_sock *build_req_sock(struct kunit *test)
+ GFP_USER);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, req);
+ mptcp_token_init_request((struct request_sock *)req);
++ sock_net_set((struct sock *)req, &init_net);
+ return req;
+ }
+
+@@ -22,7 +23,7 @@ static void mptcp_token_test_req_basic(struct kunit *test)
+ KUNIT_ASSERT_EQ(test, 0,
+ mptcp_token_new_request((struct request_sock *)req));
+ KUNIT_EXPECT_NE(test, 0, (int)req->token);
+- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(req->token));
++ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, req->token));
+
+ /* cleanup */
+ mptcp_token_destroy_request((struct request_sock *)req);
+@@ -55,6 +56,7 @@ static struct mptcp_sock *build_msk(struct kunit *test)
+ msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
+ refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
++ sock_net_set((struct sock *)msk, &init_net);
+ return msk;
+ }
+
+@@ -74,11 +76,11 @@ static void mptcp_token_test_msk_basic(struct kunit *test)
+ mptcp_token_new_connect((struct sock *)icsk));
+ KUNIT_EXPECT_NE(test, 0, (int)ctx->token);
+ KUNIT_EXPECT_EQ(test, ctx->token, msk->token);
+- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(ctx->token));
++ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, ctx->token));
+ KUNIT_EXPECT_EQ(test, 2, (int)refcount_read(&sk->sk_refcnt));
+
+ mptcp_token_destroy(msk);
+- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(ctx->token));
++ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, ctx->token));
+ }
+
+ static void mptcp_token_test_accept(struct kunit *test)
+@@ -90,11 +92,11 @@ static void mptcp_token_test_accept(struct kunit *test)
+ mptcp_token_new_request((struct request_sock *)req));
+ msk->token = req->token;
+ mptcp_token_accept(req, msk);
+- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
++ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
+
+ /* this is now a no-op */
+ mptcp_token_destroy_request((struct request_sock *)req);
+- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
++ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
+
+ /* cleanup */
+ mptcp_token_destroy(msk);
+@@ -116,7 +118,7 @@ static void mptcp_token_test_destroyed(struct kunit *test)
+
+ /* simulate race on removal */
+ refcount_set(&sk->sk_refcnt, 0);
+- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(msk->token));
++ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, msk->token));
+
+ /* cleanup */
+ mptcp_token_destroy(msk);
+--
+2.33.0
+
--- /dev/null
+From ebf665f291bd765448dff2e285fff37602b0c32d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Sep 2021 16:23:33 +0300
+Subject: net: enetc: fix the incorrect clearing of IF_MODE bits
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 325fd36ae76a6d089983b2d2eccb41237d35b221 ]
+
+The enetc phylink .mac_config handler intends to clear the IFMODE field
+(bits 1:0) of the PM0_IF_MODE register, but incorrectly clears all the
+other fields instead.
+
+For normal operation, the bug was inconsequential, due to the fact that
+we write the PM0_IF_MODE register in two stages, first in
+phylink .mac_config (which incorrectly cleared out a bunch of stuff),
+then we update the speed and duplex to the correct values in
+phylink .mac_link_up.
+
+Judging by the code (not tested), it looks like maybe loopback mode was
+broken, since this is one of the settings in PM0_IF_MODE which is
+incorrectly cleared.
+
+Fixes: c76a97218dcb ("net: enetc: force the RGMII speed and duplex instead of operating in inband mode")
+Reported-by: Pavel Machek (CIP) <pavel@denx.de>
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc_pf.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+index 68133563a40c..716b396bf094 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+@@ -504,8 +504,7 @@ static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
+
+ if (phy_interface_mode_is_rgmii(phy_mode)) {
+ val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+- val &= ~ENETC_PM0_IFM_EN_AUTO;
+- val &= ENETC_PM0_IFM_IFMODE_MASK;
++ val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
+ val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
+ enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ }
+--
+2.33.0
+
--- /dev/null
+From 78b7c1b24010d3bb316ad047069a1e661e8db3dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Sep 2021 17:35:49 +0800
+Subject: net: hns3: do not allow call hns3_nic_net_open repeatedly
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit 5b09e88e1bf7fe86540fab4b5f3eece8abead39e ]
+
+hns3_nic_net_open() is not allowed to called repeatly, but there
+is no checking for this. When doing device reset and setup tc
+concurrently, there is a small oppotunity to call hns3_nic_net_open
+repeatedly, and cause kernel bug by calling napi_enable twice.
+
+The calltrace information is like below:
+[ 3078.222780] ------------[ cut here ]------------
+[ 3078.230255] kernel BUG at net/core/dev.c:6991!
+[ 3078.236224] Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
+[ 3078.243431] Modules linked in: hns3 hclgevf hclge hnae3 vfio_iommu_type1 vfio_pci vfio_virqfd vfio pv680_mii(O)
+[ 3078.258880] CPU: 0 PID: 295 Comm: kworker/u8:5 Tainted: G O 5.14.0-rc4+ #1
+[ 3078.269102] Hardware name: , BIOS KpxxxFPGA 1P B600 V181 08/12/2021
+[ 3078.276801] Workqueue: hclge hclge_service_task [hclge]
+[ 3078.288774] pstate: 60400009 (nZCv daif +PAN -UAO -TCO BTYPE=--)
+[ 3078.296168] pc : napi_enable+0x80/0x84
+tc qdisc sho[w 3d0e7v8 .e3t0h218 79] lr : hns3_nic_net_open+0x138/0x510 [hns3]
+
+[ 3078.314771] sp : ffff8000108abb20
+[ 3078.319099] x29: ffff8000108abb20 x28: 0000000000000000 x27: ffff0820a8490300
+[ 3078.329121] x26: 0000000000000001 x25: ffff08209cfc6200 x24: 0000000000000000
+[ 3078.339044] x23: ffff0820a8490300 x22: ffff08209cd76000 x21: ffff0820abfe3880
+[ 3078.349018] x20: 0000000000000000 x19: ffff08209cd76900 x18: 0000000000000000
+[ 3078.358620] x17: 0000000000000000 x16: ffffc816e1727a50 x15: 0000ffff8f4ff930
+[ 3078.368895] x14: 0000000000000000 x13: 0000000000000000 x12: 0000259e9dbeb6b4
+[ 3078.377987] x11: 0096a8f7e764eb40 x10: 634615ad28d3eab5 x9 : ffffc816ad8885b8
+[ 3078.387091] x8 : ffff08209cfc6fb8 x7 : ffff0820ac0da058 x6 : ffff0820a8490344
+[ 3078.396356] x5 : 0000000000000140 x4 : 0000000000000003 x3 : ffff08209cd76938
+[ 3078.405365] x2 : 0000000000000000 x1 : 0000000000000010 x0 : ffff0820abfe38a0
+[ 3078.414657] Call trace:
+[ 3078.418517] napi_enable+0x80/0x84
+[ 3078.424626] hns3_reset_notify_up_enet+0x78/0xd0 [hns3]
+[ 3078.433469] hns3_reset_notify+0x64/0x80 [hns3]
+[ 3078.441430] hclge_notify_client+0x68/0xb0 [hclge]
+[ 3078.450511] hclge_reset_rebuild+0x524/0x884 [hclge]
+[ 3078.458879] hclge_reset_service_task+0x3c4/0x680 [hclge]
+[ 3078.467470] hclge_service_task+0xb0/0xb54 [hclge]
+[ 3078.475675] process_one_work+0x1dc/0x48c
+[ 3078.481888] worker_thread+0x15c/0x464
+[ 3078.487104] kthread+0x160/0x170
+[ 3078.492479] ret_from_fork+0x10/0x18
+[ 3078.498785] Code: c8027c81 35ffffa2 d50323bf d65f03c0 (d4210000)
+[ 3078.506889] ---[ end trace 8ebe0340a1b0fb44 ]---
+
+Once hns3_nic_net_open() is excute success, the flag
+HNS3_NIC_STATE_DOWN will be cleared. So add checking for this
+flag, directly return when HNS3_NIC_STATE_DOWN is no set.
+
+Fixes: e888402789b9 ("net: hns3: call hns3_nic_net_open() while doing HNAE3_UP_CLIENT")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 936b9cfe1a62..4777db2623cf 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -444,6 +444,11 @@ static int hns3_nic_net_open(struct net_device *netdev)
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
++ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
++ netdev_warn(netdev, "net open repeatedly!\n");
++ return 0;
++ }
++
+ netif_carrier_off(netdev);
+
+ ret = hns3_nic_set_real_num_queue(netdev);
+--
+2.33.0
+
--- /dev/null
+From 162ed1717319c5fa1512fd7ddf29cbb98e5d3046 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Sep 2021 17:35:55 +0800
+Subject: net: hns3: fix always enable rx vlan filter problem after selftest
+
+From: Guangbin Huang <huangguangbin2@huawei.com>
+
+[ Upstream commit 27bf4af69fcb9845fb2f0076db5d562ec072e70f ]
+
+Currently, the rx vlan filter will always be disabled before selftest and
+be enabled after selftest as the rx vlan filter feature is fixed on in
+old device earlier than V3.
+
+However, this feature is not fixed in some new devices and it can be
+disabled by user. In this case, it is wrong if rx vlan filter is enabled
+after selftest. So fix it.
+
+Fixes: bcc26e8dc432 ("net: hns3: remove unused code in hns3_self_test()")
+Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index 436d777cce06..cd0d7a546957 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -336,7 +336,8 @@ static void hns3_selftest_prepare(struct net_device *ndev,
+
+ #if IS_ENABLED(CONFIG_VLAN_8021Q)
+ /* Disable the vlan filter for selftest does not support it */
+- if (h->ae_algo->ops->enable_vlan_filter)
++ if (h->ae_algo->ops->enable_vlan_filter &&
++ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, false);
+ #endif
+
+@@ -361,7 +362,8 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
+ h->ae_algo->ops->halt_autoneg(h, false);
+
+ #if IS_ENABLED(CONFIG_VLAN_8021Q)
+- if (h->ae_algo->ops->enable_vlan_filter)
++ if (h->ae_algo->ops->enable_vlan_filter &&
++ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, true);
+ #endif
+
+--
+2.33.0
+
--- /dev/null
+From 5150dd548c50e1a5b066d8285a0e597cd71418f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Sep 2021 17:35:52 +0800
+Subject: net: hns3: fix mixed flag HCLGE_FLAG_MQPRIO_ENABLE and
+ HCLGE_FLAG_DCB_ENABLE
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit 0472e95ffeac8e61259eec17ab61608c6b35599d ]
+
+HCLGE_FLAG_MQPRIO_ENABLE is supposed to set when enable
+multiple TCs with tc mqprio, and HCLGE_FLAG_DCB_ENABLE is
+supposed to set when enable multiple TCs with ets. But
+the driver mixed the flags when updating the tm configuration.
+
+Furtherly, PFC should be available when HCLGE_FLAG_MQPRIO_ENABLE
+too, so remove the unnecessary limitation.
+
+Fixes: 5a5c90917467 ("net: hns3: add support for tc mqprio offload")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../hisilicon/hns3/hns3pf/hclge_dcb.c | 7 +++--
+ .../ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 31 +++----------------
+ 2 files changed, 10 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+index a93c7eb4e7cb..28a90ead4795 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+@@ -248,6 +248,10 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
+ }
+
+ hclge_tm_schd_info_update(hdev, num_tc);
++ if (num_tc > 1)
++ hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
++ else
++ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+
+ ret = hclge_ieee_ets_to_tm_info(hdev, ets);
+ if (ret)
+@@ -313,8 +317,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
+ u8 i, j, pfc_map, *prio_tc;
+ int ret;
+
+- if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
++ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (pfc->pfc_en == hdev->tm_info.pfc_en)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index 42e82bf69b8e..69d081515c60 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -646,14 +646,6 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ hdev->tm_info.prio_tc[i] =
+ (i >= hdev->tm_info.num_tc) ? 0 : i;
+-
+- /* DCB is enabled if we have more than 1 TC or pfc_en is
+- * non-zero.
+- */
+- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
+- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+- else
+- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+ }
+
+ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
+@@ -684,10 +676,10 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
+
+ static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
+ {
+- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
++ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
+ if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
+ dev_warn(&hdev->pdev->dev,
+- "DCB is disable, but last mode is FC_PFC\n");
++ "Only 1 tc used, but last mode is FC_PFC\n");
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+@@ -713,7 +705,7 @@ static void hclge_update_fc_mode(struct hclge_dev *hdev)
+ }
+ }
+
+-static void hclge_pfc_info_init(struct hclge_dev *hdev)
++void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
+ {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ hclge_update_fc_mode(hdev);
+@@ -729,7 +721,7 @@ static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
+
+ hclge_tm_vport_info_update(hdev);
+
+- hclge_pfc_info_init(hdev);
++ hclge_tm_pfc_info_update(hdev);
+ }
+
+ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
+@@ -1465,19 +1457,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+ hclge_tm_schd_info_init(hdev);
+ }
+
+-void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
+-{
+- /* DCB is enabled if we have more than 1 TC or pfc_en is
+- * non-zero.
+- */
+- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
+- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+- else
+- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+-
+- hclge_pfc_info_init(hdev);
+-}
+-
+ int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
+ {
+ int ret;
+@@ -1523,7 +1502,7 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
+ if (ret)
+ return ret;
+
+- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
++ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
+ return 0;
+
+ return hclge_tm_bp_setup(hdev);
+--
+2.33.0
+
--- /dev/null
+From 30c7de28ab56e36c2c9a4c128c612dbfb27ab7de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Mar 2021 09:36:25 +0800
+Subject: net: hns3: fix prototype warning
+
+From: Huazhong Tan <tanhuazhong@huawei.com>
+
+[ Upstream commit a1e144d7dc3c55aa4d451e3a23cd8f34cd65ee01 ]
+
+Correct a report warning in hns3_ethtool.c
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index c0aa3be0cdfb..0aee100902ff 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -301,7 +301,7 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
+ }
+
+ /**
+- * hns3_nic_self_test - self test
++ * hns3_self_test - self test
+ * @ndev: net device
+ * @eth_test: test cmd
+ * @data: test result
+--
+2.33.0
+
--- /dev/null
+From 80ef91634abe73ae95b79e289ef4fc28b774300d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Sep 2021 17:35:53 +0800
+Subject: net: hns3: fix show wrong state when add existing uc mac address
+
+From: Jian Shen <shenjian15@huawei.com>
+
+[ Upstream commit 108b3c7810e14892c4a1819b1d268a2c785c087c ]
+
+Currently, if function adds an existing unicast mac address, eventhough
+driver will not add this address into hardware, but it will return 0 in
+function hclge_add_uc_addr_common(). It will cause the state of this
+unicast mac address is ACTIVE in driver, but it should be in TO-ADD state.
+
+To fix this problem, function hclge_add_uc_addr_common() returns -EEXIST
+if mac address is existing, and delete two error log to avoid printing
+them all the time after this modification.
+
+Fixes: 72110b567479 ("net: hns3: return 0 and print warning when hit duplicate MAC")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../hisilicon/hns3/hns3pf/hclge_main.c | 19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index 24357e907155..0e869f449f12 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -7581,15 +7581,8 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
+ }
+
+ /* check if we just hit the duplicate */
+- if (!ret) {
+- dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
+- vport->vport_id, addr);
+- return 0;
+- }
+-
+- dev_err(&hdev->pdev->dev,
+- "PF failed to add unicast entry(%pM) in the MAC table\n",
+- addr);
++ if (!ret)
++ return -EEXIST;
+
+ return ret;
+ }
+@@ -7743,7 +7736,13 @@ static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+- break;
++
++ /* If one unicast mac address is existing in hardware,
++ * we need to try whether other unicast mac addresses
++ * are new addresses that can be added.
++ */
++ if (ret != -EEXIST)
++ break;
+ }
+ }
+ }
+--
+2.33.0
+
--- /dev/null
+From c80111b7b83459c697b2001fe4346e0be246e896 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 28 Nov 2020 11:51:50 +0800
+Subject: net: hns3: keep MAC pause mode when multiple TCs are enabled
+
+From: Yonglong Liu <liuyonglong@huawei.com>
+
+[ Upstream commit d78e5b6a6764cb6e83668806b63d74566db36399 ]
+
+Bellow HNAE3_DEVICE_VERSION_V3, MAC pause mode just support one
+TC, when enabled multiple TCs, force enable PFC mode.
+
+HNAE3_DEVICE_VERSION_V3 can support MAC pause mode on multiple
+TCs, so when enable multiple TCs, just keep MAC pause mode,
+and enable PFC mode just according to the user settings.
+
+Signed-off-by: Yonglong Liu <liuyonglong@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 23 ++++++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+index e8495f58a1a8..42e82bf69b8e 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -682,7 +682,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
+ }
+ }
+
+-static void hclge_pfc_info_init(struct hclge_dev *hdev)
++static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
+ {
+ if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
+ if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
+@@ -700,6 +700,27 @@ static void hclge_pfc_info_init(struct hclge_dev *hdev)
+ }
+ }
+
++static void hclge_update_fc_mode(struct hclge_dev *hdev)
++{
++ if (!hdev->tm_info.pfc_en) {
++ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
++ return;
++ }
++
++ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
++ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
++ hdev->tm_info.fc_mode = HCLGE_FC_PFC;
++ }
++}
++
++static void hclge_pfc_info_init(struct hclge_dev *hdev)
++{
++ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
++ hclge_update_fc_mode(hdev);
++ else
++ hclge_update_fc_mode_by_dcb_flag(hdev);
++}
++
+ static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
+ {
+ hclge_tm_pg_info_init(hdev);
+--
+2.33.0
+
--- /dev/null
+From 7b3fd1f063c6ede79a5d74a401ac6db014f73651 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Aug 2021 14:06:37 +0800
+Subject: net: hns3: reconstruct function hns3_self_test
+
+From: Peng Li <lipeng321@huawei.com>
+
+[ Upstream commit 4c8dab1c709c5a715bce14efdb8f4e889d86aa04 ]
+
+This patch reconstructs function hns3_self_test to reduce the code
+cycle complexity and make code more concise.
+
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Guangbin Huang <huangguangbin2@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/hisilicon/hns3/hns3_ethtool.c | 101 +++++++++++-------
+ 1 file changed, 64 insertions(+), 37 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+index 0aee100902ff..436d777cce06 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -300,33 +300,8 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
+ return ret_val;
+ }
+
+-/**
+- * hns3_self_test - self test
+- * @ndev: net device
+- * @eth_test: test cmd
+- * @data: test result
+- */
+-static void hns3_self_test(struct net_device *ndev,
+- struct ethtool_test *eth_test, u64 *data)
++static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
+ {
+- struct hns3_nic_priv *priv = netdev_priv(ndev);
+- struct hnae3_handle *h = priv->ae_handle;
+- int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
+- bool if_running = netif_running(ndev);
+- int test_index = 0;
+- u32 i;
+-
+- if (hns3_nic_resetting(ndev)) {
+- netdev_err(ndev, "dev resetting!");
+- return;
+- }
+-
+- /* Only do offline selftest, or pass by default */
+- if (eth_test->flags != ETH_TEST_FL_OFFLINE)
+- return;
+-
+- netif_dbg(h, drv, ndev, "self test start");
+-
+ st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
+ st_param[HNAE3_LOOP_APP][1] =
+ h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
+@@ -343,6 +318,18 @@ static void hns3_self_test(struct net_device *ndev,
+ st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
+ st_param[HNAE3_LOOP_PHY][1] =
+ h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
++}
++
++static void hns3_selftest_prepare(struct net_device *ndev,
++ bool if_running, int (*st_param)[2])
++{
++ struct hns3_nic_priv *priv = netdev_priv(ndev);
++ struct hnae3_handle *h = priv->ae_handle;
++
++ if (netif_msg_ifdown(h))
++ netdev_info(ndev, "self test start\n");
++
++ hns3_set_selftest_param(h, st_param);
+
+ if (if_running)
+ ndev->netdev_ops->ndo_stop(ndev);
+@@ -361,6 +348,35 @@ static void hns3_self_test(struct net_device *ndev,
+ h->ae_algo->ops->halt_autoneg(h, true);
+
+ set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
++}
++
++static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
++{
++ struct hns3_nic_priv *priv = netdev_priv(ndev);
++ struct hnae3_handle *h = priv->ae_handle;
++
++ clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
++
++ if (h->ae_algo->ops->halt_autoneg)
++ h->ae_algo->ops->halt_autoneg(h, false);
++
++#if IS_ENABLED(CONFIG_VLAN_8021Q)
++ if (h->ae_algo->ops->enable_vlan_filter)
++ h->ae_algo->ops->enable_vlan_filter(h, true);
++#endif
++
++ if (if_running)
++ ndev->netdev_ops->ndo_open(ndev);
++
++ if (netif_msg_ifdown(h))
++ netdev_info(ndev, "self test end\n");
++}
++
++static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
++ struct ethtool_test *eth_test, u64 *data)
++{
++ int test_index = 0;
++ u32 i;
+
+ for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
+ enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
+@@ -379,21 +395,32 @@ static void hns3_self_test(struct net_device *ndev,
+
+ test_index++;
+ }
++}
+
+- clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+-
+- if (h->ae_algo->ops->halt_autoneg)
+- h->ae_algo->ops->halt_autoneg(h, false);
++/**
++ * hns3_nic_self_test - self test
++ * @ndev: net device
++ * @eth_test: test cmd
++ * @data: test result
++ */
++static void hns3_self_test(struct net_device *ndev,
++ struct ethtool_test *eth_test, u64 *data)
++{
++ int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
++ bool if_running = netif_running(ndev);
+
+-#if IS_ENABLED(CONFIG_VLAN_8021Q)
+- if (h->ae_algo->ops->enable_vlan_filter)
+- h->ae_algo->ops->enable_vlan_filter(h, true);
+-#endif
++ if (hns3_nic_resetting(ndev)) {
++ netdev_err(ndev, "dev resetting!");
++ return;
++ }
+
+- if (if_running)
+- ndev->netdev_ops->ndo_open(ndev);
++ /* Only do offline selftest, or pass by default */
++ if (eth_test->flags != ETH_TEST_FL_OFFLINE)
++ return;
+
+- netif_dbg(h, drv, ndev, "self test end\n");
++ hns3_selftest_prepare(ndev, if_running, st_param);
++ hns3_do_selftest(ndev, st_param, eth_test, data);
++ hns3_selftest_restore(ndev, if_running);
+ }
+
+ static int hns3_get_sset_count(struct net_device *netdev, int stringset)
+--
+2.33.0
+
--- /dev/null
+From fc863f52a731724f375a77bf82a491a1eef2bfd5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Sep 2021 23:03:19 +0800
+Subject: net: ipv4: Fix rtnexthop len when RTA_FLOW is present
+
+From: Xiao Liang <shaw.leon@gmail.com>
+
+[ Upstream commit 597aa16c782496bf74c5dc3b45ff472ade6cee64 ]
+
+Multipath RTA_FLOW is embedded in nexthop. Dump it in fib_add_nexthop()
+to get the length of rtnexthop correct.
+
+Fixes: b0f60193632e ("ipv4: Refactor nexthop attributes in fib_dump_info")
+Signed-off-by: Xiao Liang <shaw.leon@gmail.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/ip_fib.h | 2 +-
+ include/net/nexthop.h | 2 +-
+ net/ipv4/fib_semantics.c | 16 +++++++++-------
+ net/ipv6/route.c | 5 +++--
+ 4 files changed, 14 insertions(+), 11 deletions(-)
+
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 2ec062aaa978..4d431d7b4415 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -553,5 +553,5 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
+ int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
+ u8 rt_family, unsigned char *flags, bool skip_oif);
+ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
+- int nh_weight, u8 rt_family);
++ int nh_weight, u8 rt_family, u32 nh_tclassid);
+ #endif /* _NET_FIB_H */
+diff --git a/include/net/nexthop.h b/include/net/nexthop.h
+index 4c8c9fe9a3f0..fd87d727aa21 100644
+--- a/include/net/nexthop.h
++++ b/include/net/nexthop.h
+@@ -211,7 +211,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
+ struct fib_nh_common *nhc = &nhi->fib_nhc;
+ int weight = nhg->nh_entries[i].weight;
+
+- if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
++ if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
+ return -EMSGSIZE;
+ }
+
+diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
+index 1f75dc686b6b..642503e89924 100644
+--- a/net/ipv4/fib_semantics.c
++++ b/net/ipv4/fib_semantics.c
+@@ -1663,7 +1663,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info);
+
+ #if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
+ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
+- int nh_weight, u8 rt_family)
++ int nh_weight, u8 rt_family, u32 nh_tclassid)
+ {
+ const struct net_device *dev = nhc->nhc_dev;
+ struct rtnexthop *rtnh;
+@@ -1681,6 +1681,9 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
+
+ rtnh->rtnh_flags = flags;
+
++ if (nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh_tclassid))
++ goto nla_put_failure;
++
+ /* length of rtnetlink header + attributes */
+ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
+
+@@ -1708,14 +1711,13 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
+ }
+
+ for_nexthops(fi) {
+- if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
+- AF_INET) < 0)
+- goto nla_put_failure;
++ u32 nh_tclassid = 0;
+ #ifdef CONFIG_IP_ROUTE_CLASSID
+- if (nh->nh_tclassid &&
+- nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
+- goto nla_put_failure;
++ nh_tclassid = nh->nh_tclassid;
+ #endif
++ if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
++ AF_INET, nh_tclassid) < 0)
++ goto nla_put_failure;
+ } endfor_nexthops(fi);
+
+ mp_end:
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 168a7b4d957a..a68a7d7c0728 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -5566,14 +5566,15 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
+ goto nla_put_failure;
+
+ if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
+- rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
++ rt->fib6_nh->fib_nh_weight, AF_INET6,
++ 0) < 0)
+ goto nla_put_failure;
+
+ list_for_each_entry_safe(sibling, next_sibling,
+ &rt->fib6_siblings, fib6_siblings) {
+ if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
+ sibling->fib6_nh->fib_nh_weight,
+- AF_INET6) < 0)
++ AF_INET6, 0) < 0)
+ goto nla_put_failure;
+ }
+
+--
+2.33.0
+
--- /dev/null
+From 9bb966df1d03e27acd0c71167a099f05e5d868b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Sep 2021 16:13:02 +0200
+Subject: net: ks8851: fix link error
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 51bb08dd04a05035a64504faa47651d36b0f3125 ]
+
+An object file cannot be built for both loadable module and built-in
+use at the same time:
+
+arm-linux-gnueabi-ld: drivers/net/ethernet/micrel/ks8851_common.o: in function `ks8851_probe_common':
+ks8851_common.c:(.text+0xf80): undefined reference to `__this_module'
+
+Change the ks8851_common code to be a standalone module instead,
+and use Makefile logic to ensure this is built-in if at least one
+of its two users is.
+
+Fixes: 797047f875b5 ("net: ks8851: Implement Parallel bus operations")
+Link: https://lore.kernel.org/netdev/20210125121937.3900988-1-arnd@kernel.org/
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Acked-by: Marek Vasut <marex@denx.de>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/micrel/Makefile | 6 ++----
+ drivers/net/ethernet/micrel/ks8851_common.c | 8 ++++++++
+ 2 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile
+index 5cc00d22c708..6ecc4eb30e74 100644
+--- a/drivers/net/ethernet/micrel/Makefile
++++ b/drivers/net/ethernet/micrel/Makefile
+@@ -4,8 +4,6 @@
+ #
+
+ obj-$(CONFIG_KS8842) += ks8842.o
+-obj-$(CONFIG_KS8851) += ks8851.o
+-ks8851-objs = ks8851_common.o ks8851_spi.o
+-obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
+-ks8851_mll-objs = ks8851_common.o ks8851_par.o
++obj-$(CONFIG_KS8851) += ks8851_common.o ks8851_spi.o
++obj-$(CONFIG_KS8851_MLL) += ks8851_common.o ks8851_par.o
+ obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
+diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
+index d65872172229..f74eae8eed02 100644
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -1031,6 +1031,7 @@ int ks8851_suspend(struct device *dev)
+
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(ks8851_suspend);
+
+ int ks8851_resume(struct device *dev)
+ {
+@@ -1044,6 +1045,7 @@ int ks8851_resume(struct device *dev)
+
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(ks8851_resume);
+ #endif
+
+ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
+@@ -1175,6 +1177,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
+ err_reg_io:
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(ks8851_probe_common);
+
+ int ks8851_remove_common(struct device *dev)
+ {
+@@ -1191,3 +1194,8 @@ int ks8851_remove_common(struct device *dev)
+
+ return 0;
+ }
++EXPORT_SYMBOL_GPL(ks8851_remove_common);
++
++MODULE_DESCRIPTION("KS8851 Network driver");
++MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
++MODULE_LICENSE("GPL");
+--
+2.33.0
+
--- /dev/null
+From 3b2629e0ea77c2572c5c8959f1e9bae5e424bd79 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Sep 2021 13:32:33 -0700
+Subject: net: phy: bcm7xxx: Fixed indirect MMD operations
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit d88fd1b546ff19c8040cfaea76bf16aed1c5a0bb ]
+
+When EEE support was added to the 28nm EPHY it was assumed that it would
+be able to support the standard clause 45 over clause 22 register access
+method. It turns out that the PHY does not support that, which is the
+very reason for using the indirect shadow mode 2 bank 3 access method.
+
+Implement {read,write}_mmd to allow the standard PHY library routines
+pertaining to EEE querying and configuration to work correctly on these
+PHYs. This forces us to implement a __phy_set_clr_bits() function that
+does not grab the MDIO bus lock since the PHY driver's {read,write}_mmd
+functions are always called with that lock held.
+
+Fixes: 83ee102a6998 ("net: phy: bcm7xxx: add support for 28nm EPHY")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/bcm7xxx.c | 114 ++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 110 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
+index 15812001b3ff..115044e21c74 100644
+--- a/drivers/net/phy/bcm7xxx.c
++++ b/drivers/net/phy/bcm7xxx.c
+@@ -27,7 +27,12 @@
+ #define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
+ #define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
+ #define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
++#define MII_BCM7XXX_SHD_3_PCS_CTRL 0x0
++#define MII_BCM7XXX_SHD_3_PCS_STATUS 0x1
++#define MII_BCM7XXX_SHD_3_EEE_CAP 0x2
+ #define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
++#define MII_BCM7XXX_SHD_3_EEE_LP 0x4
++#define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x5
+ #define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
+ #define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
+ #define MII_BCM7XXX_SHD_3_AN_STAT 0xb
+@@ -216,25 +221,37 @@ static int bcm7xxx_28nm_resume(struct phy_device *phydev)
+ return genphy_config_aneg(phydev);
+ }
+
+-static int phy_set_clr_bits(struct phy_device *dev, int location,
+- int set_mask, int clr_mask)
++static int __phy_set_clr_bits(struct phy_device *dev, int location,
++ int set_mask, int clr_mask)
+ {
+ int v, ret;
+
+- v = phy_read(dev, location);
++ v = __phy_read(dev, location);
+ if (v < 0)
+ return v;
+
+ v &= ~clr_mask;
+ v |= set_mask;
+
+- ret = phy_write(dev, location, v);
++ ret = __phy_write(dev, location, v);
+ if (ret < 0)
+ return ret;
+
+ return v;
+ }
+
++static int phy_set_clr_bits(struct phy_device *dev, int location,
++ int set_mask, int clr_mask)
++{
++ int ret;
++
++ mutex_lock(&dev->mdio.bus->mdio_lock);
++ ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);
++ mutex_unlock(&dev->mdio.bus->mdio_lock);
++
++ return ret;
++}
++
+ static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
+ {
+ int ret;
+@@ -398,6 +415,93 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
+ return bcm7xxx_28nm_ephy_apd_enable(phydev);
+ }
+
++#define MII_BCM7XXX_REG_INVALID 0xff
++
++static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
++{
++ switch (regnum) {
++ case MDIO_CTRL1:
++ return MII_BCM7XXX_SHD_3_PCS_CTRL;
++ case MDIO_STAT1:
++ return MII_BCM7XXX_SHD_3_PCS_STATUS;
++ case MDIO_PCS_EEE_ABLE:
++ return MII_BCM7XXX_SHD_3_EEE_CAP;
++ case MDIO_AN_EEE_ADV:
++ return MII_BCM7XXX_SHD_3_AN_EEE_ADV;
++ case MDIO_AN_EEE_LPABLE:
++ return MII_BCM7XXX_SHD_3_EEE_LP;
++ case MDIO_PCS_EEE_WK_ERR:
++ return MII_BCM7XXX_SHD_3_EEE_WK_ERR;
++ default:
++ return MII_BCM7XXX_REG_INVALID;
++ }
++}
++
++static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)
++{
++ return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;
++}
++
++static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,
++ int devnum, u16 regnum)
++{
++ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
++ int ret;
++
++ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
++ shd == MII_BCM7XXX_REG_INVALID)
++ return -EOPNOTSUPP;
++
++ /* set shadow mode 2 */
++ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
++ MII_BCM7XXX_SHD_MODE_2, 0);
++ if (ret < 0)
++ return ret;
++
++ /* Access the desired shadow register address */
++ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
++ if (ret < 0)
++ goto reset_shadow_mode;
++
++ ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);
++
++reset_shadow_mode:
++ /* reset shadow mode 2 */
++ __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
++ MII_BCM7XXX_SHD_MODE_2);
++ return ret;
++}
++
++static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,
++ int devnum, u16 regnum, u16 val)
++{
++ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
++ int ret;
++
++ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
++ shd == MII_BCM7XXX_REG_INVALID)
++ return -EOPNOTSUPP;
++
++ /* set shadow mode 2 */
++ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
++ MII_BCM7XXX_SHD_MODE_2, 0);
++ if (ret < 0)
++ return ret;
++
++ /* Access the desired shadow register address */
++ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
++ if (ret < 0)
++ goto reset_shadow_mode;
++
++ /* Write the desired value in the shadow register */
++ __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);
++
++reset_shadow_mode:
++ /* reset shadow mode 2 */
++ return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
++ MII_BCM7XXX_SHD_MODE_2);
++}
++
+ static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
+ {
+ int ret;
+@@ -595,6 +699,8 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
+ .get_stats = bcm7xxx_28nm_get_phy_stats, \
+ .probe = bcm7xxx_28nm_probe, \
+ .remove = bcm7xxx_28nm_remove, \
++ .read_mmd = bcm7xxx_28nm_ephy_read_mmd, \
++ .write_mmd = bcm7xxx_28nm_ephy_write_mmd, \
+ }
+
+ #define BCM7XXX_40NM_EPHY(_oui, _name) \
+--
+2.33.0
+
--- /dev/null
+From d3058774d5c572ff901a10e9d82355f500171275 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Sep 2021 18:08:49 +0300
+Subject: net: sched: flower: protect fl_walk() with rcu
+
+From: Vlad Buslov <vladbu@nvidia.com>
+
+[ Upstream commit d5ef190693a7d76c5c192d108e8dec48307b46ee ]
+
+Patch that refactored fl_walk() to use idr_for_each_entry_continue_ul()
+also removed rcu protection of individual filters which causes following
+use-after-free when filter is deleted concurrently. Fix fl_walk() to obtain
+rcu read lock while iterating and taking the filter reference and temporary
+release the lock while calling arg->fn() callback that can sleep.
+
+KASAN trace:
+
+[ 352.773640] ==================================================================
+[ 352.775041] BUG: KASAN: use-after-free in fl_walk+0x159/0x240 [cls_flower]
+[ 352.776304] Read of size 4 at addr ffff8881c8251480 by task tc/2987
+
+[ 352.777862] CPU: 3 PID: 2987 Comm: tc Not tainted 5.15.0-rc2+ #2
+[ 352.778980] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014
+[ 352.781022] Call Trace:
+[ 352.781573] dump_stack_lvl+0x46/0x5a
+[ 352.782332] print_address_description.constprop.0+0x1f/0x140
+[ 352.783400] ? fl_walk+0x159/0x240 [cls_flower]
+[ 352.784292] ? fl_walk+0x159/0x240 [cls_flower]
+[ 352.785138] kasan_report.cold+0x83/0xdf
+[ 352.785851] ? fl_walk+0x159/0x240 [cls_flower]
+[ 352.786587] kasan_check_range+0x145/0x1a0
+[ 352.787337] fl_walk+0x159/0x240 [cls_flower]
+[ 352.788163] ? fl_put+0x10/0x10 [cls_flower]
+[ 352.789007] ? __mutex_unlock_slowpath.constprop.0+0x220/0x220
+[ 352.790102] tcf_chain_dump+0x231/0x450
+[ 352.790878] ? tcf_chain_tp_delete_empty+0x170/0x170
+[ 352.791833] ? __might_sleep+0x2e/0xc0
+[ 352.792594] ? tfilter_notify+0x170/0x170
+[ 352.793400] ? __mutex_unlock_slowpath.constprop.0+0x220/0x220
+[ 352.794477] tc_dump_tfilter+0x385/0x4b0
+[ 352.795262] ? tc_new_tfilter+0x1180/0x1180
+[ 352.796103] ? __mod_node_page_state+0x1f/0xc0
+[ 352.796974] ? __build_skb_around+0x10e/0x130
+[ 352.797826] netlink_dump+0x2c0/0x560
+[ 352.798563] ? netlink_getsockopt+0x430/0x430
+[ 352.799433] ? __mutex_unlock_slowpath.constprop.0+0x220/0x220
+[ 352.800542] __netlink_dump_start+0x356/0x440
+[ 352.801397] rtnetlink_rcv_msg+0x3ff/0x550
+[ 352.802190] ? tc_new_tfilter+0x1180/0x1180
+[ 352.802872] ? rtnl_calcit.isra.0+0x1f0/0x1f0
+[ 352.803668] ? tc_new_tfilter+0x1180/0x1180
+[ 352.804344] ? _copy_from_iter_nocache+0x800/0x800
+[ 352.805202] ? kasan_set_track+0x1c/0x30
+[ 352.805900] netlink_rcv_skb+0xc6/0x1f0
+[ 352.806587] ? rht_deferred_worker+0x6b0/0x6b0
+[ 352.807455] ? rtnl_calcit.isra.0+0x1f0/0x1f0
+[ 352.808324] ? netlink_ack+0x4d0/0x4d0
+[ 352.809086] ? netlink_deliver_tap+0x62/0x3d0
+[ 352.809951] netlink_unicast+0x353/0x480
+[ 352.810744] ? netlink_attachskb+0x430/0x430
+[ 352.811586] ? __alloc_skb+0xd7/0x200
+[ 352.812349] netlink_sendmsg+0x396/0x680
+[ 352.813132] ? netlink_unicast+0x480/0x480
+[ 352.813952] ? __import_iovec+0x192/0x210
+[ 352.814759] ? netlink_unicast+0x480/0x480
+[ 352.815580] sock_sendmsg+0x6c/0x80
+[ 352.816299] ____sys_sendmsg+0x3a5/0x3c0
+[ 352.817096] ? kernel_sendmsg+0x30/0x30
+[ 352.817873] ? __ia32_sys_recvmmsg+0x150/0x150
+[ 352.818753] ___sys_sendmsg+0xd8/0x140
+[ 352.819518] ? sendmsg_copy_msghdr+0x110/0x110
+[ 352.820402] ? ___sys_recvmsg+0xf4/0x1a0
+[ 352.821110] ? __copy_msghdr_from_user+0x260/0x260
+[ 352.821934] ? _raw_spin_lock+0x81/0xd0
+[ 352.822680] ? __handle_mm_fault+0xef3/0x1b20
+[ 352.823549] ? rb_insert_color+0x2a/0x270
+[ 352.824373] ? copy_page_range+0x16b0/0x16b0
+[ 352.825209] ? perf_event_update_userpage+0x2d0/0x2d0
+[ 352.826190] ? __fget_light+0xd9/0xf0
+[ 352.826941] __sys_sendmsg+0xb3/0x130
+[ 352.827613] ? __sys_sendmsg_sock+0x20/0x20
+[ 352.828377] ? do_user_addr_fault+0x2c5/0x8a0
+[ 352.829184] ? fpregs_assert_state_consistent+0x52/0x60
+[ 352.830001] ? exit_to_user_mode_prepare+0x32/0x160
+[ 352.830845] do_syscall_64+0x35/0x80
+[ 352.831445] entry_SYSCALL_64_after_hwframe+0x44/0xae
+[ 352.832331] RIP: 0033:0x7f7bee973c17
+[ 352.833078] Code: 0c 00 f7 d8 64 89 02 48 c7 c0 ff ff ff ff eb b7 0f 1f 00 f3 0f 1e fa 64 8b 04 25 18 00 00 00 85 c0 75 10 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 51 c3 48 83 ec 28 89 54 24 1c 48 89 74 24 10
+[ 352.836202] RSP: 002b:00007ffcbb368e28 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+[ 352.837524] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f7bee973c17
+[ 352.838715] RDX: 0000000000000000 RSI: 00007ffcbb368e50 RDI: 0000000000000003
+[ 352.839838] RBP: 00007ffcbb36d090 R08: 00000000cea96d79 R09: 00007f7beea34a40
+[ 352.841021] R10: 00000000004059bb R11: 0000000000000246 R12: 000000000046563f
+[ 352.842208] R13: 0000000000000000 R14: 0000000000000000 R15: 00007ffcbb36d088
+
+[ 352.843784] Allocated by task 2960:
+[ 352.844451] kasan_save_stack+0x1b/0x40
+[ 352.845173] __kasan_kmalloc+0x7c/0x90
+[ 352.845873] fl_change+0x282/0x22db [cls_flower]
+[ 352.846696] tc_new_tfilter+0x6cf/0x1180
+[ 352.847493] rtnetlink_rcv_msg+0x471/0x550
+[ 352.848323] netlink_rcv_skb+0xc6/0x1f0
+[ 352.849097] netlink_unicast+0x353/0x480
+[ 352.849886] netlink_sendmsg+0x396/0x680
+[ 352.850678] sock_sendmsg+0x6c/0x80
+[ 352.851398] ____sys_sendmsg+0x3a5/0x3c0
+[ 352.852202] ___sys_sendmsg+0xd8/0x140
+[ 352.852967] __sys_sendmsg+0xb3/0x130
+[ 352.853718] do_syscall_64+0x35/0x80
+[ 352.854457] entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+[ 352.855830] Freed by task 7:
+[ 352.856421] kasan_save_stack+0x1b/0x40
+[ 352.857139] kasan_set_track+0x1c/0x30
+[ 352.857854] kasan_set_free_info+0x20/0x30
+[ 352.858609] __kasan_slab_free+0xed/0x130
+[ 352.859348] kfree+0xa7/0x3c0
+[ 352.859951] process_one_work+0x44d/0x780
+[ 352.860685] worker_thread+0x2e2/0x7e0
+[ 352.861390] kthread+0x1f4/0x220
+[ 352.862022] ret_from_fork+0x1f/0x30
+
+[ 352.862955] Last potentially related work creation:
+[ 352.863758] kasan_save_stack+0x1b/0x40
+[ 352.864378] kasan_record_aux_stack+0xab/0xc0
+[ 352.865028] insert_work+0x30/0x160
+[ 352.865617] __queue_work+0x351/0x670
+[ 352.866261] rcu_work_rcufn+0x30/0x40
+[ 352.866917] rcu_core+0x3b2/0xdb0
+[ 352.867561] __do_softirq+0xf6/0x386
+
+[ 352.868708] Second to last potentially related work creation:
+[ 352.869779] kasan_save_stack+0x1b/0x40
+[ 352.870560] kasan_record_aux_stack+0xab/0xc0
+[ 352.871426] call_rcu+0x5f/0x5c0
+[ 352.872108] queue_rcu_work+0x44/0x50
+[ 352.872855] __fl_put+0x17c/0x240 [cls_flower]
+[ 352.873733] fl_delete+0xc7/0x100 [cls_flower]
+[ 352.874607] tc_del_tfilter+0x510/0xb30
+[ 352.886085] rtnetlink_rcv_msg+0x471/0x550
+[ 352.886875] netlink_rcv_skb+0xc6/0x1f0
+[ 352.887636] netlink_unicast+0x353/0x480
+[ 352.888285] netlink_sendmsg+0x396/0x680
+[ 352.888942] sock_sendmsg+0x6c/0x80
+[ 352.889583] ____sys_sendmsg+0x3a5/0x3c0
+[ 352.890311] ___sys_sendmsg+0xd8/0x140
+[ 352.891019] __sys_sendmsg+0xb3/0x130
+[ 352.891716] do_syscall_64+0x35/0x80
+[ 352.892395] entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+[ 352.893666] The buggy address belongs to the object at ffff8881c8251000
+ which belongs to the cache kmalloc-2k of size 2048
+[ 352.895696] The buggy address is located 1152 bytes inside of
+ 2048-byte region [ffff8881c8251000, ffff8881c8251800)
+[ 352.897640] The buggy address belongs to the page:
+[ 352.898492] page:00000000213bac35 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x1c8250
+[ 352.900110] head:00000000213bac35 order:3 compound_mapcount:0 compound_pincount:0
+[ 352.901541] flags: 0x2ffff800010200(slab|head|node=0|zone=2|lastcpupid=0x1ffff)
+[ 352.902908] raw: 002ffff800010200 0000000000000000 dead000000000122 ffff888100042f00
+[ 352.904391] raw: 0000000000000000 0000000000080008 00000001ffffffff 0000000000000000
+[ 352.905861] page dumped because: kasan: bad access detected
+
+[ 352.907323] Memory state around the buggy address:
+[ 352.908218] ffff8881c8251380: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 352.909471] ffff8881c8251400: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 352.910735] >ffff8881c8251480: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 352.912012] ^
+[ 352.912642] ffff8881c8251500: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 352.913919] ffff8881c8251580: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+[ 352.915185] ==================================================================
+
+Fixes: d39d714969cd ("idr: introduce idr_for_each_entry_continue_ul()")
+Signed-off-by: Vlad Buslov <vladbu@nvidia.com>
+Acked-by: Cong Wang <cong.wang@bytedance.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/cls_flower.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
+index a5212a3f86e2..8ff6945b9f8f 100644
+--- a/net/sched/cls_flower.c
++++ b/net/sched/cls_flower.c
+@@ -2169,18 +2169,24 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
+
+ arg->count = arg->skip;
+
++ rcu_read_lock();
+ idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
+ /* don't return filters that are being deleted */
+ if (!refcount_inc_not_zero(&f->refcnt))
+ continue;
++ rcu_read_unlock();
++
+ if (arg->fn(tp, f, arg) < 0) {
+ __fl_put(f);
+ arg->stop = 1;
++ rcu_read_lock();
+ break;
+ }
+ __fl_put(f);
+ arg->count++;
++ rcu_read_lock();
+ }
++ rcu_read_unlock();
+ arg->cookie = id;
+ }
+
+--
+2.33.0
+
--- /dev/null
+From d1af538f41117bf9646e6eae2dfe8779df1ec8b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Sep 2021 08:19:03 -0700
+Subject: perf/x86/intel: Update event constraints for ICX
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit ecc2123e09f9e71ddc6c53d71e283b8ada685fe2 ]
+
+According to the latest event list, the event encoding 0xEF is only
+available on the first 4 counters. Add it into the event constraints
+table.
+
+Fixes: 6017608936c1 ("perf/x86/intel: Add Icelake support")
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/1632842343-25862-1-git-send-email-kan.liang@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/intel/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 3b8b8eede1a8..4684bf9fcc42 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
+ INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
++ INTEL_EVENT_CONSTRAINT(0xef, 0xf),
+ INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
+ EVENT_CONSTRAINT_END
+ };
+--
+2.33.0
+
--- /dev/null
+From dea3a6a92c8a41463cb7aa76db9fc707ec2849a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Sep 2021 17:33:44 +0800
+Subject: RDMA/cma: Fix listener leak in rdma_cma_listen_on_all() failure
+
+From: Tao Liu <thomas.liu@ucloud.cn>
+
+[ Upstream commit ca465e1f1f9b38fe916a36f7d80c5d25f2337c81 ]
+
+If cma_listen_on_all() fails it leaves the per-device ID still on the
+listen_list but the state is not set to RDMA_CM_ADDR_BOUND.
+
+When the cmid is eventually destroyed cma_cancel_listens() is not called
+due to the wrong state, however the per-device IDs are still holding the
+refcount preventing the ID from being destroyed, thus deadlocking:
+
+ task:rping state:D stack: 0 pid:19605 ppid: 47036 flags:0x00000084
+ Call Trace:
+ __schedule+0x29a/0x780
+ ? free_unref_page_commit+0x9b/0x110
+ schedule+0x3c/0xa0
+ schedule_timeout+0x215/0x2b0
+ ? __flush_work+0x19e/0x1e0
+ wait_for_completion+0x8d/0xf0
+ _destroy_id+0x144/0x210 [rdma_cm]
+ ucma_close_id+0x2b/0x40 [rdma_ucm]
+ __destroy_id+0x93/0x2c0 [rdma_ucm]
+ ? __xa_erase+0x4a/0xa0
+ ucma_destroy_id+0x9a/0x120 [rdma_ucm]
+ ucma_write+0xb8/0x130 [rdma_ucm]
+ vfs_write+0xb4/0x250
+ ksys_write+0xb5/0xd0
+ ? syscall_trace_enter.isra.19+0x123/0x190
+ do_syscall_64+0x33/0x40
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Ensure that cma_listen_on_all() atomically unwinds its action under the
+lock during error.
+
+Fixes: c80a0c52d85c ("RDMA/cma: Add missing error handling of listen_id")
+Link: https://lore.kernel.org/r/20210913093344.17230-1-thomas.liu@ucloud.cn
+Signed-off-by: Tao Liu <thomas.liu@ucloud.cn>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cma.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 3029e96161b5..8e54184566f7 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -1750,15 +1750,16 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
+ }
+ }
+
+-static void cma_cancel_listens(struct rdma_id_private *id_priv)
++static void _cma_cancel_listens(struct rdma_id_private *id_priv)
+ {
+ struct rdma_id_private *dev_id_priv;
+
++ lockdep_assert_held(&lock);
++
+ /*
+ * Remove from listen_any_list to prevent added devices from spawning
+ * additional listen requests.
+ */
+- mutex_lock(&lock);
+ list_del(&id_priv->list);
+
+ while (!list_empty(&id_priv->listen_list)) {
+@@ -1772,6 +1773,12 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
+ rdma_destroy_id(&dev_id_priv->id);
+ mutex_lock(&lock);
+ }
++}
++
++static void cma_cancel_listens(struct rdma_id_private *id_priv)
++{
++ mutex_lock(&lock);
++ _cma_cancel_listens(id_priv);
+ mutex_unlock(&lock);
+ }
+
+@@ -2582,7 +2589,7 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
+ return 0;
+
+ err_listen:
+- list_del(&id_priv->list);
++ _cma_cancel_listens(id_priv);
+ mutex_unlock(&lock);
+ if (to_destroy)
+ rdma_destroy_id(&to_destroy->id);
+--
+2.33.0
+
--- /dev/null
+From ef11d67f5ce52731a68a00a7b2c3f992673194ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Dec 2020 09:37:36 +0800
+Subject: RDMA/hns: Fix inaccurate prints
+
+From: Yixing Liu <liuyixing1@huawei.com>
+
+[ Upstream commit 61918e9b008492f48577692428aca3cebf56111a ]
+
+Some %d in print format string should be %u, and some prints miss the
+useful errno or are in nonstandard format. Just fix above issues.
+
+Link: https://lore.kernel.org/r/1607650657-35992-11-git-send-email-liweihang@huawei.com
+Signed-off-by: Yixing Liu <liuyixing1@huawei.com>
+Signed-off-by: Weihang Li <liweihang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_alloc.c | 4 +-
+ drivers/infiniband/hw/hns/hns_roce_cq.c | 35 +++++++------
+ drivers/infiniband/hw/hns/hns_roce_hem.c | 18 +++----
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 30 +++++------
+ drivers/infiniband/hw/hns/hns_roce_mr.c | 10 ++--
+ drivers/infiniband/hw/hns/hns_roce_pd.c | 2 +-
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 61 +++++++++++++---------
+ drivers/infiniband/hw/hns/hns_roce_srq.c | 37 +++++++------
+ 8 files changed, 107 insertions(+), 90 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
+index a6b23dec1adc..5b2baf89d110 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
++++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
+@@ -240,7 +240,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+ end = start + buf_cnt;
+ if (end > buf->npages) {
+ dev_err(hr_dev->dev,
+- "Failed to check kmem bufs, end %d + %d total %d!\n",
++ "failed to check kmem bufs, end %d + %d total %u!\n",
+ start, buf_cnt, buf->npages);
+ return -EINVAL;
+ }
+@@ -262,7 +262,7 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+ u64 addr;
+
+ if (page_shift < HNS_HW_PAGE_SHIFT) {
+- dev_err(hr_dev->dev, "Failed to check umem page shift %d!\n",
++ dev_err(hr_dev->dev, "failed to check umem page shift %u!\n",
+ page_shift);
+ return -EINVAL;
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
+index da346129f6e9..8a6bded9c11c 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
+@@ -50,29 +50,29 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+
+ ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
+ &dma_handle);
+- if (ret < 1) {
+- ibdev_err(ibdev, "Failed to find CQ mtr\n");
++ if (!ret) {
++ ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
+ return -EINVAL;
+ }
+
+ cq_table = &hr_dev->cq_table;
+ ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc CQ bitmap, err %d\n", ret);
++ ibdev_err(ibdev, "failed to alloc CQ bitmap, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* Get CQC memory HEM(Hardware Entry Memory) table */
+ ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to get CQ(0x%lx) context, err %d\n",
++ ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
+ hr_cq->cqn, ret);
+ goto err_out;
+ }
+
+ ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
+ if (ret) {
+- ibdev_err(ibdev, "Failed to xa_store CQ\n");
++ ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
+ goto err_put;
+ }
+
+@@ -91,7 +91,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret) {
+ ibdev_err(ibdev,
+- "Failed to send create cmd for CQ(0x%lx), err %d\n",
++ "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
+ hr_cq->cqn, ret);
+ goto err_xa;
+ }
+@@ -147,7 +147,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ {
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ struct hns_roce_buf_attr buf_attr = {};
+- int err;
++ int ret;
+
+ buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
+@@ -155,13 +155,13 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ buf_attr.region_count = 1;
+ buf_attr.fixed_page = true;
+
+- err = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
++ ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
+ hr_dev->caps.cqe_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, addr);
+- if (err)
+- ibdev_err(ibdev, "Failed to alloc CQ mtr, err %d\n", err);
++ if (ret)
++ ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
+
+- return err;
++ return ret;
+ }
+
+ static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+@@ -252,13 +252,13 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ int ret;
+
+ if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
+- ibdev_err(ibdev, "Failed to check CQ count %d max=%d\n",
++ ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
+ cq_entries, hr_dev->caps.max_cqes);
+ return -EINVAL;
+ }
+
+ if (vector >= hr_dev->caps.num_comp_vectors) {
+- ibdev_err(ibdev, "Failed to check CQ vector=%d max=%d\n",
++ ibdev_err(ibdev, "failed to check CQ vector = %d, max = %d.\n",
+ vector, hr_dev->caps.num_comp_vectors);
+ return -EINVAL;
+ }
+@@ -276,7 +276,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+ ret = ib_copy_from_udata(&ucmd, udata,
+ min(udata->inlen, sizeof(ucmd)));
+ if (ret) {
+- ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n",
++ ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n",
+ ret);
+ return ret;
+ }
+@@ -286,19 +286,20 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
+
+ ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
++ ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc CQ db, err %d\n", ret);
++ ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret);
+ goto err_cq_buf;
+ }
+
+ ret = alloc_cqc(hr_dev, hr_cq);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc CQ context, err %d\n", ret);
++ ibdev_err(ibdev,
++ "failed to alloc CQ context, ret = %d.\n", ret);
+ goto err_cq_db;
+ }
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index 66f9f036ef94..c880a8be7e3c 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -184,7 +184,7 @@ static int get_hem_table_config(struct hns_roce_dev *hr_dev,
+ mhop->hop_num = hr_dev->caps.srqc_hop_num;
+ break;
+ default:
+- dev_err(dev, "Table %d not support multi-hop addressing!\n",
++ dev_err(dev, "table %u not support multi-hop addressing!\n",
+ type);
+ return -EINVAL;
+ }
+@@ -232,8 +232,8 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
+ mhop->l0_idx = table_idx;
+ break;
+ default:
+- dev_err(dev, "Table %d not support hop_num = %d!\n",
+- table->type, mhop->hop_num);
++ dev_err(dev, "table %u not support hop_num = %u!\n",
++ table->type, mhop->hop_num);
+ return -EINVAL;
+ }
+ if (mhop->l0_idx >= mhop->ba_l0_num)
+@@ -438,13 +438,13 @@ static int calc_hem_config(struct hns_roce_dev *hr_dev,
+ index->buf = l0_idx;
+ break;
+ default:
+- ibdev_err(ibdev, "Table %d not support mhop.hop_num = %d!\n",
++ ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n",
+ table->type, mhop->hop_num);
+ return -EINVAL;
+ }
+
+ if (unlikely(index->buf >= table->num_hem)) {
+- ibdev_err(ibdev, "Table %d exceed hem limt idx %llu,max %lu!\n",
++ ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n",
+ table->type, index->buf, table->num_hem);
+ return -EINVAL;
+ }
+@@ -714,15 +714,15 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
+ step_idx = hop_num;
+
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx))
+- ibdev_warn(ibdev, "Clear hop%d HEM failed.\n", hop_num);
++ ibdev_warn(ibdev, "failed to clear hop%u HEM.\n", hop_num);
+
+ if (index->inited & HEM_INDEX_L1)
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+- ibdev_warn(ibdev, "Clear HEM step 1 failed.\n");
++ ibdev_warn(ibdev, "failed to clear HEM step 1.\n");
+
+ if (index->inited & HEM_INDEX_L0)
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+- ibdev_warn(ibdev, "Clear HEM step 0 failed.\n");
++ ibdev_warn(ibdev, "failed to clear HEM step 0.\n");
+ }
+ }
+
+@@ -1234,7 +1234,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
+ }
+
+ if (offset < r->offset) {
+- dev_err(hr_dev->dev, "invalid offset %d,min %d!\n",
++ dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
+ offset, r->offset);
+ return -EINVAL;
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index ebcf26dec1e3..c29ba8ee51e2 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -361,7 +361,7 @@ static int check_send_valid(struct hns_roce_dev *hr_dev,
+ } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
+ hr_qp->state == IB_QPS_INIT ||
+ hr_qp->state == IB_QPS_RTR)) {
+- ibdev_err(ibdev, "failed to post WQE, QP state %d!\n",
++ ibdev_err(ibdev, "failed to post WQE, QP state %hhu!\n",
+ hr_qp->state);
+ return -EINVAL;
+ } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
+@@ -665,7 +665,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
+ wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
+
+ if (unlikely(wr->num_sge > qp->sq.max_gs)) {
+- ibdev_err(ibdev, "num_sge=%d > qp->sq.max_gs=%d\n",
++ ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n",
+ wr->num_sge, qp->sq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+@@ -750,7 +750,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
+ wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
+
+ if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
+- ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
++ ibdev_err(ibdev, "num_sge = %d >= max_sge = %u.\n",
+ wr->num_sge, hr_qp->rq.max_gs);
+ ret = -EINVAL;
+ *bad_wr = wr;
+@@ -1920,8 +1920,8 @@ static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
+ obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
+ break;
+ default:
+- pr_err("Table %d not support hop_num = %d!\n", hem_type,
+- hop_num);
++ pr_err("table %u not support hop_num = %u!\n", hem_type,
++ hop_num);
+ return;
+ }
+
+@@ -3562,7 +3562,7 @@ static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
+ break;
+ default:
+ dev_warn(hr_dev->dev,
+- "Table %d not to be written by mailbox!\n", type);
++ "table %u not to be written by mailbox!\n", type);
+ return -EINVAL;
+ }
+
+@@ -3681,7 +3681,7 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
+ op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+ break;
+ default:
+- dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
++ dev_warn(dev, "table %u not to be destroyed by mailbox!\n",
+ table->type);
+ return 0;
+ }
+@@ -4318,7 +4318,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
+
+ ret = config_qp_sq_buf(hr_dev, hr_qp, context, qpc_mask);
+ if (ret) {
+- ibdev_err(ibdev, "failed to config sq buf, ret %d\n", ret);
++ ibdev_err(ibdev, "failed to config sq buf, ret = %d.\n", ret);
+ return ret;
+ }
+
+@@ -4804,7 +4804,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
+ /* SW pass context to HW */
+ ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp);
+ if (ret) {
+- ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret);
++ ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret);
+ goto out;
+ }
+
+@@ -4897,7 +4897,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+
+ ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
+ if (ret) {
+- ibdev_err(ibdev, "failed to query QPC, ret = %d\n", ret);
++ ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret);
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -5018,7 +5018,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ hr_qp->state, IB_QPS_RESET);
+ if (ret)
+ ibdev_err(ibdev,
+- "failed to modify QP to RST, ret = %d\n",
++ "failed to modify QP to RST, ret = %d.\n",
+ ret);
+ }
+
+@@ -5057,7 +5057,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
+ if (ret)
+ ibdev_err(&hr_dev->ib_dev,
+- "failed to destroy QP 0x%06lx, ret = %d\n",
++ "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n",
+ hr_qp->qpn, ret);
+
+ hns_roce_qp_destroy(hr_dev, hr_qp, udata);
+@@ -5080,7 +5080,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+- ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d\n", ret);
++ ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d.\n", ret);
+ goto out;
+ }
+
+@@ -5090,7 +5090,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
+ clr->qpn = cpu_to_le32(hr_qp->qpn);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+- ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d\n", ret);
++ ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d.\n", ret);
+ goto out;
+ }
+
+@@ -5339,7 +5339,7 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret)
+ ibdev_err(&hr_dev->ib_dev,
+- "failed to process cmd when modifying CQ, ret = %d\n",
++ "failed to process cmd when modifying CQ, ret = %d.\n",
+ ret);
+
+ return ret;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 7f81a695e9af..027ec8413ac2 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -185,14 +185,14 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
+ else
+ ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
+ if (ret) {
+- dev_err(dev, "Write mtpt fail!\n");
++ dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
+ goto err_page;
+ }
+
+ ret = hns_roce_hw_create_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ if (ret) {
+- dev_err(dev, "CREATE_MPT failed (%d)\n", ret);
++ dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
+ goto err_page;
+ }
+
+@@ -495,7 +495,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+
+ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+ if (ret < 1) {
+- ibdev_err(ibdev, "failed to store sg pages %d %d, cnt = %d.\n",
++ ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
+ mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret);
+ goto err_page_list;
+ }
+@@ -862,7 +862,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ if (r->offset + r->count > page_cnt) {
+ err = -EINVAL;
+ ibdev_err(ibdev,
+- "Failed to check mtr%d end %d + %d, max %d\n",
++ "failed to check mtr%u end %u + %u, max %u.\n",
+ i, r->offset, r->count, page_cnt);
+ return err;
+ }
+@@ -870,7 +870,7 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ err = mtr_map_region(hr_dev, mtr, &pages[r->offset], r);
+ if (err) {
+ ibdev_err(ibdev,
+- "Failed to map mtr%d offset %d, err %d\n",
++ "failed to map mtr%u offset %u, ret = %d.\n",
+ i, r->offset, err);
+ return err;
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
+index f78fa1d3d807..012a769d6a6a 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
++++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
+@@ -65,7 +65,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+
+ ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
+ if (ret) {
+- ibdev_err(ib_dev, "failed to alloc pd, ret = %d\n", ret);
++ ibdev_err(ib_dev, "failed to alloc pd, ret = %d.\n", ret);
+ return ret;
+ }
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 7ce9ad8aee1e..291e06d63150 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -452,12 +452,12 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
+ /* Sanity check SQ size before proceeding */
+ if (ucmd->log_sq_stride > max_sq_stride ||
+ ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
+- ibdev_err(&hr_dev->ib_dev, "Failed to check SQ stride size\n");
++ ibdev_err(&hr_dev->ib_dev, "failed to check SQ stride size.\n");
+ return -EINVAL;
+ }
+
+ if (cap->max_send_sge > hr_dev->caps.max_sq_sg) {
+- ibdev_err(&hr_dev->ib_dev, "Failed to check SQ SGE size %d\n",
++ ibdev_err(&hr_dev->ib_dev, "failed to check SQ SGE size %u.\n",
+ cap->max_send_sge);
+ return -EINVAL;
+ }
+@@ -563,7 +563,7 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
+
+ cnt = roundup_pow_of_two(max(cap->max_send_wr, hr_dev->caps.min_wqes));
+ if (cnt > hr_dev->caps.max_wqes) {
+- ibdev_err(ibdev, "failed to check WQE num, WQE num = %d.\n",
++ ibdev_err(ibdev, "failed to check WQE num, WQE num = %u.\n",
+ cnt);
+ return -EINVAL;
+ }
+@@ -736,7 +736,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ &hr_qp->sdb);
+ if (ret) {
+ ibdev_err(ibdev,
+- "Failed to map user SQ doorbell\n");
++ "failed to map user SQ doorbell, ret = %d.\n",
++ ret);
+ goto err_out;
+ }
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB;
+@@ -747,7 +748,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ &hr_qp->rdb);
+ if (ret) {
+ ibdev_err(ibdev,
+- "Failed to map user RQ doorbell\n");
++ "failed to map user RQ doorbell, ret = %d.\n",
++ ret);
+ goto err_sdb;
+ }
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB;
+@@ -763,7 +765,8 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0);
+ if (ret) {
+ ibdev_err(ibdev,
+- "Failed to alloc kernel RQ doorbell\n");
++ "failed to alloc kernel RQ doorbell, ret = %d.\n",
++ ret);
+ goto err_out;
+ }
+ *hr_qp->rdb.db_record = 0;
+@@ -806,14 +809,14 @@ static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev,
+
+ sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(sq_wrid)) {
+- ibdev_err(ibdev, "Failed to alloc SQ wrid\n");
++ ibdev_err(ibdev, "failed to alloc SQ wrid.\n");
+ return -ENOMEM;
+ }
+
+ if (hr_qp->rq.wqe_cnt) {
+ rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(rq_wrid)) {
+- ibdev_err(ibdev, "Failed to alloc RQ wrid\n");
++ ibdev_err(ibdev, "failed to alloc RQ wrid.\n");
+ ret = -ENOMEM;
+ goto err_sq;
+ }
+@@ -873,7 +876,9 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+
+ ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd);
+ if (ret)
+- ibdev_err(ibdev, "Failed to set user SQ size\n");
++ ibdev_err(ibdev,
++ "failed to set user SQ size, ret = %d.\n",
++ ret);
+ } else {
+ if (init_attr->create_flags &
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
+@@ -888,7 +893,9 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+
+ ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp);
+ if (ret)
+- ibdev_err(ibdev, "Failed to set kernel SQ size\n");
++ ibdev_err(ibdev,
++ "failed to set kernel SQ size, ret = %d.\n",
++ ret);
+ }
+
+ return ret;
+@@ -914,45 +921,48 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+
+ ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to set QP param\n");
++ ibdev_err(ibdev, "failed to set QP param, ret = %d.\n", ret);
+ return ret;
+ }
+
+ if (!udata) {
+ ret = alloc_kernel_wrid(hr_dev, hr_qp);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc wrid\n");
++ ibdev_err(ibdev, "failed to alloc wrid, ret = %d.\n",
++ ret);
+ return ret;
+ }
+ }
+
+ ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc QP doorbell\n");
++ ibdev_err(ibdev, "failed to alloc QP doorbell, ret = %d.\n",
++ ret);
+ goto err_wrid;
+ }
+
+ ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc QP buffer\n");
++ ibdev_err(ibdev, "failed to alloc QP buffer, ret = %d.\n", ret);
+ goto err_db;
+ }
+
+ ret = alloc_qpn(hr_dev, hr_qp);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc QPN\n");
++ ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
+ goto err_buf;
+ }
+
+ ret = alloc_qpc(hr_dev, hr_qp);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc QP context\n");
++ ibdev_err(ibdev, "failed to alloc QP context, ret = %d.\n",
++ ret);
+ goto err_qpn;
+ }
+
+ ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to store QP\n");
++ ibdev_err(ibdev, "failed to store QP, ret = %d.\n", ret);
+ goto err_qpc;
+ }
+
+@@ -1098,9 +1108,8 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+
+ if ((attr_mask & IB_QP_PORT) &&
+ (attr->port_num == 0 || attr->port_num > hr_dev->caps.num_ports)) {
+- ibdev_err(&hr_dev->ib_dev,
+- "attr port_num invalid.attr->port_num=%d\n",
+- attr->port_num);
++ ibdev_err(&hr_dev->ib_dev, "invalid attr, port_num = %u.\n",
++ attr->port_num);
+ return -EINVAL;
+ }
+
+@@ -1108,8 +1117,8 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
+ if (attr->pkey_index >= hr_dev->caps.pkey_table_len[p]) {
+ ibdev_err(&hr_dev->ib_dev,
+- "attr pkey_index invalid.attr->pkey_index=%d\n",
+- attr->pkey_index);
++ "invalid attr, pkey_index = %u.\n",
++ attr->pkey_index);
+ return -EINVAL;
+ }
+ }
+@@ -1117,16 +1126,16 @@ static int hns_roce_check_qp_attr(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
+ ibdev_err(&hr_dev->ib_dev,
+- "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
+- attr->max_rd_atomic);
++ "invalid attr, max_rd_atomic = %u.\n",
++ attr->max_rd_atomic);
+ return -EINVAL;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > hr_dev->caps.max_qp_dest_rdma) {
+ ibdev_err(&hr_dev->ib_dev,
+- "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
+- attr->max_dest_rd_atomic);
++ "invalid attr, max_dest_rd_atomic = %u.\n",
++ attr->max_dest_rd_atomic);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
+index 75d74f4bb52c..f27523e1a12d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -93,7 +93,8 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
+ ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
+ if (ret < 1) {
+- ibdev_err(ibdev, "Failed to find mtr for SRQ WQE\n");
++ ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
++ ret);
+ return -ENOBUFS;
+ }
+
+@@ -101,32 +102,34 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ ret = hns_roce_mtr_find(hr_dev, &srq->idx_que.mtr, 0, mtts_idx,
+ ARRAY_SIZE(mtts_idx), &dma_handle_idx);
+ if (ret < 1) {
+- ibdev_err(ibdev, "Failed to find mtr for SRQ idx\n");
++ ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
++ ret);
+ return -ENOBUFS;
+ }
+
+ ret = hns_roce_bitmap_alloc(&srq_table->bitmap, &srq->srqn);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc SRQ number, err %d\n", ret);
++ ibdev_err(ibdev,
++ "failed to alloc SRQ number, ret = %d.\n", ret);
+ return -ENOMEM;
+ }
+
+ ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to get SRQC table, err %d\n", ret);
++ ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
+ goto err_out;
+ }
+
+ ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
+ if (ret) {
+- ibdev_err(ibdev, "Failed to store SRQC, err %d\n", ret);
++ ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
+ goto err_put;
+ }
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR_OR_NULL(mailbox)) {
+ ret = -ENOMEM;
+- ibdev_err(ibdev, "Failed to alloc mailbox for SRQC\n");
++ ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
+ goto err_xa;
+ }
+
+@@ -137,7 +140,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ ret = hns_roce_hw_create_srq(hr_dev, mailbox, srq->srqn);
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to config SRQC, err %d\n", ret);
++ ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
+ goto err_xa;
+ }
+
+@@ -198,7 +201,8 @@ static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ hr_dev->caps.srqwqe_ba_pg_sz +
+ HNS_HW_PAGE_SHIFT, udata, addr);
+ if (err)
+- ibdev_err(ibdev, "Failed to alloc SRQ buf mtr, err %d\n", err);
++ ibdev_err(ibdev,
++ "failed to alloc SRQ buf mtr, ret = %d.\n", err);
+
+ return err;
+ }
+@@ -229,14 +233,15 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ hr_dev->caps.idx_ba_pg_sz + HNS_HW_PAGE_SHIFT,
+ udata, addr);
+ if (err) {
+- ibdev_err(ibdev, "Failed to alloc SRQ idx mtr, err %d\n", err);
++ ibdev_err(ibdev,
++ "failed to alloc SRQ idx mtr, ret = %d.\n", err);
+ return err;
+ }
+
+ if (!udata) {
+ idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
+ if (!idx_que->bitmap) {
+- ibdev_err(ibdev, "Failed to alloc SRQ idx bitmap\n");
++ ibdev_err(ibdev, "failed to alloc SRQ idx bitmap.\n");
+ err = -ENOMEM;
+ goto err_idx_mtr;
+ }
+@@ -303,7 +308,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
+ ret = ib_copy_from_udata(&ucmd, udata,
+ min(udata->inlen, sizeof(ucmd)));
+ if (ret) {
+- ibdev_err(ibdev, "Failed to copy SRQ udata, err %d\n",
++ ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n",
+ ret);
+ return ret;
+ }
+@@ -311,20 +316,21 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
+
+ ret = alloc_srq_buf(hr_dev, srq, udata, ucmd.buf_addr);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc SRQ buffer, err %d\n", ret);
++ ibdev_err(ibdev,
++ "failed to alloc SRQ buffer, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = alloc_srq_idx(hr_dev, srq, udata, ucmd.que_addr);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc SRQ idx, err %d\n", ret);
++ ibdev_err(ibdev, "failed to alloc SRQ idx, ret = %d.\n", ret);
+ goto err_buf_alloc;
+ }
+
+ if (!udata) {
+ ret = alloc_srq_wrid(hr_dev, srq);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc SRQ wrid, err %d\n",
++ ibdev_err(ibdev, "failed to alloc SRQ wrid, ret = %d.\n",
+ ret);
+ goto err_idx_alloc;
+ }
+@@ -336,7 +342,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
+
+ ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0);
+ if (ret) {
+- ibdev_err(ibdev, "Failed to alloc SRQ context, err %d\n", ret);
++ ibdev_err(ibdev,
++ "failed to alloc SRQ context, ret = %d.\n", ret);
+ goto err_wrid_alloc;
+ }
+
+--
+2.33.0
+
--- /dev/null
+From 1dffc4ca97fe53bc24592853657e492a40d063d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Sep 2021 06:33:15 -0600
+Subject: Revert "block, bfq: honor already-setup queue merges"
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit ebc69e897e17373fbe1daaff1debaa77583a5284 ]
+
+This reverts commit 2d52c58b9c9bdae0ca3df6a1eab5745ab3f7d80b.
+
+We have had several folks complain that this causes hangs for them, which
+is especially problematic as the commit has also hit stable already.
+
+As no resolution seems to be forthcoming right now, revert the patch.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=214503
+Fixes: 2d52c58b9c9b ("block, bfq: honor already-setup queue merges")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/bfq-iosched.c | 16 +++-------------
+ 1 file changed, 3 insertions(+), 13 deletions(-)
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 65c200e0ecb5..b8c2ddc01aec 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2526,15 +2526,6 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+ * are likely to increase the throughput.
+ */
+ bfqq->new_bfqq = new_bfqq;
+- /*
+- * The above assignment schedules the following redirections:
+- * each time some I/O for bfqq arrives, the process that
+- * generated that I/O is disassociated from bfqq and
+- * associated with new_bfqq. Here we increases new_bfqq->ref
+- * in advance, adding the number of processes that are
+- * expected to be associated with new_bfqq as they happen to
+- * issue I/O.
+- */
+ new_bfqq->ref += process_refs;
+ return new_bfqq;
+ }
+@@ -2594,10 +2585,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ {
+ struct bfq_queue *in_service_bfqq, *new_bfqq;
+
+- /* if a merge has already been setup, then proceed with that first */
+- if (bfqq->new_bfqq)
+- return bfqq->new_bfqq;
+-
+ /*
+ * Do not perform queue merging if the device is non
+ * rotational and performs internal queueing. In fact, such a
+@@ -2652,6 +2639,9 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ if (bfq_too_late_for_merging(bfqq))
+ return NULL;
+
++ if (bfqq->new_bfqq)
++ return bfqq->new_bfqq;
++
+ if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
+ return NULL;
+
+--
+2.33.0
+
--- /dev/null
+From 82bac7309849ee4d7c54b2aa273a5b335e75db62 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Sep 2021 21:44:08 +0530
+Subject: scsi: csiostor: Add module softdep on cxgb4
+
+From: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+
+[ Upstream commit 79a7482249a7353bc86aff8127954d5febf02472 ]
+
+Both cxgb4 and csiostor drivers run on their own independent Physical
+Function. But when cxgb4 and csiostor are both being loaded in parallel via
+modprobe, there is a race when firmware upgrade is attempted by both the
+drivers.
+
+When the cxgb4 driver initiates the firmware upgrade, it halts the firmware
+and the chip until upgrade is complete. When the csiostor driver is coming
+up in parallel, the firmware mailbox communication fails with timeouts and
+the csiostor driver probe fails.
+
+Add a module soft dependency on cxgb4 driver to ensure loading csiostor
+triggers cxgb4 to load first when available to avoid the firmware upgrade
+race.
+
+Link: https://lore.kernel.org/r/1632759248-15382-1-git-send-email-rahul.lakkireddy@chelsio.com
+Fixes: a3667aaed569 ("[SCSI] csiostor: Chelsio FCoE offload driver")
+Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/csiostor/csio_init.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
+index 390b07bf92b9..ccbded3353bd 100644
+--- a/drivers/scsi/csiostor/csio_init.c
++++ b/drivers/scsi/csiostor/csio_init.c
+@@ -1254,3 +1254,4 @@ MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
+ MODULE_VERSION(CSIO_DRV_VERSION);
+ MODULE_FIRMWARE(FW_FNAME_T5);
+ MODULE_FIRMWARE(FW_FNAME_T6);
++MODULE_SOFTDEP("pre: cxgb4");
+--
+2.33.0
+
--- /dev/null
+From 463003f4d1351c7739cd502a8dfa690efb8127c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Sep 2021 00:05:04 -0400
+Subject: sctp: break out if skb_header_pointer returns NULL in sctp_rcv_ootb
+
+From: Xin Long <lucien.xin@gmail.com>
+
+[ Upstream commit f7e745f8e94492a8ac0b0a26e25f2b19d342918f ]
+
+We should always check if skb_header_pointer's return is NULL before
+using it, otherwise it may cause null-ptr-deref, as syzbot reported:
+
+ KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
+ RIP: 0010:sctp_rcv_ootb net/sctp/input.c:705 [inline]
+ RIP: 0010:sctp_rcv+0x1d84/0x3220 net/sctp/input.c:196
+ Call Trace:
+ <IRQ>
+ sctp6_rcv+0x38/0x60 net/sctp/ipv6.c:1109
+ ip6_protocol_deliver_rcu+0x2e9/0x1ca0 net/ipv6/ip6_input.c:422
+ ip6_input_finish+0x62/0x170 net/ipv6/ip6_input.c:463
+ NF_HOOK include/linux/netfilter.h:307 [inline]
+ NF_HOOK include/linux/netfilter.h:301 [inline]
+ ip6_input+0x9c/0xd0 net/ipv6/ip6_input.c:472
+ dst_input include/net/dst.h:460 [inline]
+ ip6_rcv_finish net/ipv6/ip6_input.c:76 [inline]
+ NF_HOOK include/linux/netfilter.h:307 [inline]
+ NF_HOOK include/linux/netfilter.h:301 [inline]
+ ipv6_rcv+0x28c/0x3c0 net/ipv6/ip6_input.c:297
+
+Fixes: 3acb50c18d8d ("sctp: delay as much as possible skb_linearize")
+Reported-by: syzbot+581aff2ae6b860625116@syzkaller.appspotmail.com
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sctp/input.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index 49c49a4d203f..34494a0b28bd 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -677,7 +677,7 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
+ ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
+
+ /* Break out if chunk length is less then minimal. */
+- if (ntohs(ch->length) < sizeof(_ch))
++ if (!ch || ntohs(ch->length) < sizeof(_ch))
+ break;
+
+ ch_end = offset + SCTP_PAD4(ntohs(ch->length));
+--
+2.33.0
+
--- /dev/null
+From 98252a14c8bbc7f9c5bd0a1e4959a80e90a7569b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Sep 2021 18:01:36 +0200
+Subject: selftests, bpf: Fix makefile dependencies on libbpf
+
+From: Jiri Benc <jbenc@redhat.com>
+
+[ Upstream commit d888eaac4fb1df30320bb1305a8f78efe86524c6 ]
+
+When building bpf selftest with make -j, I'm randomly getting build failures
+such as this one:
+
+ In file included from progs/bpf_flow.c:19:
+ [...]/tools/testing/selftests/bpf/tools/include/bpf/bpf_helpers.h:11:10: fatal error: 'bpf_helper_defs.h' file not found
+ #include "bpf_helper_defs.h"
+ ^~~~~~~~~~~~~~~~~~~
+
+The file that fails the build varies between runs but it's always in the
+progs/ subdir.
+
+The reason is a missing make dependency on libbpf for the .o files in
+progs/. There was a dependency before commit 3ac2e20fba07e but that commit
+removed it to prevent unneeded rebuilds. However, that only works if libbpf
+has been built already; the 'wildcard' prerequisite does not trigger when
+there's no bpf_helper_defs.h generated yet.
+
+Keep the libbpf as an order-only prerequisite to satisfy both goals. It is
+always built before the progs/ objects but it does not trigger unnecessary
+rebuilds by itself.
+
+Fixes: 3ac2e20fba07e ("selftests/bpf: BPF object files should depend only on libbpf headers")
+Signed-off-by: Jiri Benc <jbenc@redhat.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/ee84ab66436fba05a197f952af23c98d90eb6243.1632758415.git.jbenc@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/bpf/Makefile | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index b5322d60068c..1d9155533360 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -326,7 +326,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
+ $(TRUNNER_BPF_PROGS_DIR)/%.c \
+ $(TRUNNER_BPF_PROGS_DIR)/*.h \
+ $$(INCLUDE_DIR)/vmlinux.h \
+- $(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT)
++ $(wildcard $(BPFDIR)/bpf_*.h) \
++ | $(TRUNNER_OUTPUT) $$(BPFOBJ)
+ $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
+ $(TRUNNER_BPF_CFLAGS), \
+ $(TRUNNER_BPF_LDFLAGS))
+--
+2.33.0
+
--- /dev/null
+From 2325f37340514f31f1a8d23912e07d59346fa525 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Sep 2021 10:40:22 +0200
+Subject: selftests, bpf: test_lwt_ip_encap: Really disable rp_filter
+
+From: Jiri Benc <jbenc@redhat.com>
+
+[ Upstream commit 79e2c306667542b8ee2d9a9d947eadc7039f0a3c ]
+
+It's not enough to set net.ipv4.conf.all.rp_filter=0, that does not override
+a greater rp_filter value on the individual interfaces. We also need to set
+net.ipv4.conf.default.rp_filter=0 before creating the interfaces. That way,
+they'll also get their own rp_filter value of zero.
+
+Fixes: 0fde56e4385b0 ("selftests: bpf: add test_lwt_ip_encap selftest")
+Signed-off-by: Jiri Benc <jbenc@redhat.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/b1cdd9d469f09ea6e01e9c89a6071c79b7380f89.1632386362.git.jbenc@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/bpf/test_lwt_ip_encap.sh | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+index 59ea56945e6c..b497bb85b667 100755
+--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
++++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+@@ -112,6 +112,14 @@ setup()
+ ip netns add "${NS2}"
+ ip netns add "${NS3}"
+
++ # rp_filter gets confused by what these tests are doing, so disable it
++ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
++ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
++ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
++ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0
++ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
++ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
++
+ ip link add veth1 type veth peer name veth2
+ ip link add veth3 type veth peer name veth4
+ ip link add veth5 type veth peer name veth6
+@@ -236,11 +244,6 @@ setup()
+ ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}
+ ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}
+
+- # rp_filter gets confused by what these tests are doing, so disable it
+- ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
+- ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
+- ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
+-
+ TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
+
+ sleep 1 # reduce flakiness
+--
+2.33.0
+
rdma-cma-do-not-change-route.addr.src_addr.ss_family.patch
drm-amd-display-pass-pci-deviceid-into-dc.patch
drm-amdgpu-correct-initial-cp_hqd_quantum-for-gfx9.patch
+ipvs-check-that-ip_vs_conn_tab_bits-is-between-8-and.patch
+bpf-handle-return-value-of-bpf_prog_type_struct_ops-.patch
+ib-cma-do-not-send-igmp-leaves-for-sendonly-multicas.patch
+rdma-cma-fix-listener-leak-in-rdma_cma_listen_on_all.patch
+bpf-mips-validate-conditional-branch-offsets.patch
+hwmon-mlxreg-fan-return-non-zero-value-when-fan-curr.patch
+mac80211-fix-ieee80211_amsdu_aggregate-frag_tail-bug.patch
+mac80211-limit-injected-vht-mcs-nss-in-ieee80211_par.patch
+mac80211-mesh-fix-potentially-unaligned-access.patch
+mac80211-hwsim-fix-late-beacon-hrtimer-handling.patch
+sctp-break-out-if-skb_header_pointer-returns-null-in.patch
+mptcp-don-t-return-sockets-in-foreign-netns.patch
+hwmon-tmp421-report-pvld-condition-as-fault.patch
+hwmon-tmp421-fix-rounding-for-negative-values.patch
+net-enetc-fix-the-incorrect-clearing-of-if_mode-bits.patch
+net-ipv4-fix-rtnexthop-len-when-rta_flow-is-present.patch
+smsc95xx-fix-stalled-rx-after-link-change.patch
+drm-i915-request-fix-early-tracepoints.patch
+dsa-mv88e6xxx-6161-use-chip-wide-max-mtu.patch
+dsa-mv88e6xxx-fix-mtu-definition.patch
+dsa-mv88e6xxx-include-tagger-overhead-when-setting-m.patch
+e100-fix-length-calculation-in-e100_get_regs_len.patch
+e100-fix-buffer-overrun-in-e100_get_regs.patch
+rdma-hns-fix-inaccurate-prints.patch
+bpf-exempt-cap_bpf-from-checks-against-bpf_jit_limit.patch
+selftests-bpf-fix-makefile-dependencies-on-libbpf.patch
+selftests-bpf-test_lwt_ip_encap-really-disable-rp_fi.patch
+net-ks8851-fix-link-error.patch
+revert-block-bfq-honor-already-setup-queue-merges.patch
+scsi-csiostor-add-module-softdep-on-cxgb4.patch
+ixgbe-fix-null-pointer-dereference-in-ixgbe_xdp_setu.patch
+net-hns3-do-not-allow-call-hns3_nic_net_open-repeate.patch
+net-hns3-keep-mac-pause-mode-when-multiple-tcs-are-e.patch
+net-hns3-fix-mixed-flag-hclge_flag_mqprio_enable-and.patch
+net-hns3-fix-show-wrong-state-when-add-existing-uc-m.patch
+net-hns3-fix-prototype-warning.patch
+net-hns3-reconstruct-function-hns3_self_test.patch
+net-hns3-fix-always-enable-rx-vlan-filter-problem-af.patch
+net-phy-bcm7xxx-fixed-indirect-mmd-operations.patch
+net-sched-flower-protect-fl_walk-with-rcu.patch
+af_unix-fix-races-in-sk_peer_pid-and-sk_peer_cred-ac.patch
+perf-x86-intel-update-event-constraints-for-icx.patch
+hwmon-pmbus-mp2975-add-missed-pout-attribute-for-pag.patch
--- /dev/null
+From f3f83ae19532c07445f2e1d0eb369c6734c3226d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Sep 2021 01:00:16 +0300
+Subject: smsc95xx: fix stalled rx after link change
+
+From: Aaro Koskinen <aaro.koskinen@iki.fi>
+
+[ Upstream commit 5ab8a447bcfee1ded709e7ff5dc7608ca9f66ae2 ]
+
+After commit 05b35e7eb9a1 ("smsc95xx: add phylib support"), link changes
+are no longer propagated to usbnet. As a result, rx URB allocation won't
+happen until there is a packet sent out first (this might never happen,
+e.g. running just ssh server with a static IP). Fix by triggering usbnet
+EVENT_LINK_CHANGE.
+
+Fixes: 05b35e7eb9a1 ("smsc95xx: add phylib support")
+Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/smsc95xx.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index ea0d5f04dc3a..465e11dcdf12 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -1178,7 +1178,10 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
+
+ static void smsc95xx_handle_link_change(struct net_device *net)
+ {
++ struct usbnet *dev = netdev_priv(net);
++
+ phy_print_status(net->phydev);
++ usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
+ }
+
+ static int smsc95xx_start_phy(struct usbnet *dev)
+--
+2.33.0
+