From 6d37be3dd7057eb3858e5d345beba46cab5f759f Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Sun, 7 Apr 2024 08:53:25 -0400 Subject: [PATCH] Fixes for 6.8 Signed-off-by: Sasha Levin --- .../bpf-introduce-in_sleepable-helper.patch | 94 +++++++ ...le-flag-from-bpf_prog_aux-to-bpf_pro.patch | 264 ++++++++++++++++++ ...rring-bpf_link-dealloc-to-after-rcu-.patch | 171 ++++++++++++ ...ush-gfxoff-requests-in-prepare-stage.patch | 47 ++++ ...w-correction-in-e1000_shutdown-funct.patch | 51 ++++ ...e-smbus-from-enable-ulp-function-to-.patch | 99 +++++++ ...-support-for-allowing-zero-sev-asids.patch | 102 +++++++ ...gned-integers-when-dealing-with-asid.patch | 139 +++++++++ ...d_variance-drop-always-failing-tests.patch | 87 ++++++ ...unt-accept-of-non-mpc-client-as-fall.patch | 112 ++++++++ ...vb-always-process-tx-descriptor-ring.patch | 55 ++++ ...et-ravb-always-update-error-counters.patch | 66 +++++ ...specific-receive-function-to-interro.patch | 63 +++++ ...csi-sg-avoid-sg-device-teardown-race.patch | 56 ++++ ...ptcp-connect-fix-shellcheck-warnings.patch | 242 ++++++++++++++++ ...mptcp-use-operator-to-append-strings.patch | 259 +++++++++++++++++ queue-6.8/series | 17 ++ ...i-check-for-notifications-after-init.patch | 70 +++++ 18 files changed, 1994 insertions(+) create mode 100644 queue-6.8/bpf-introduce-in_sleepable-helper.patch create mode 100644 queue-6.8/bpf-move-sleepable-flag-from-bpf_prog_aux-to-bpf_pro.patch create mode 100644 queue-6.8/bpf-support-deferring-bpf_link-dealloc-to-after-rcu-.patch create mode 100644 queue-6.8/drm-amd-flush-gfxoff-requests-in-prepare-stage.patch create mode 100644 queue-6.8/e1000e-minor-flow-correction-in-e1000_shutdown-funct.patch create mode 100644 queue-6.8/e1000e-move-force-smbus-from-enable-ulp-function-to-.patch create mode 100644 queue-6.8/kvm-svm-add-support-for-allowing-zero-sev-asids.patch create mode 100644 queue-6.8/kvm-svm-use-unsigned-integers-when-dealing-with-asid.patch create mode 100644 queue-6.8/mean_and_variance-drop-always-failing-tests.patch create mode 100644 queue-6.8/mptcp-don-t-account-accept-of-non-mpc-client-as-fall.patch create mode 100644 queue-6.8/net-ravb-always-process-tx-descriptor-ring.patch create mode 100644 queue-6.8/net-ravb-always-update-error-counters.patch create mode 100644 queue-6.8/net-ravb-let-ip-specific-receive-function-to-interro.patch create mode 100644 queue-6.8/scsi-sg-avoid-sg-device-teardown-race.patch create mode 100644 queue-6.8/selftests-mptcp-connect-fix-shellcheck-warnings.patch create mode 100644 queue-6.8/selftests-mptcp-use-operator-to-append-strings.patch create mode 100644 queue-6.8/usb-typec-ucsi-check-for-notifications-after-init.patch diff --git a/queue-6.8/bpf-introduce-in_sleepable-helper.patch b/queue-6.8/bpf-introduce-in_sleepable-helper.patch new file mode 100644 index 00000000000..c7fa49bf2f5 --- /dev/null +++ b/queue-6.8/bpf-introduce-in_sleepable-helper.patch @@ -0,0 +1,94 @@ +From 52f440367cd83e89dfbaffacc620b302f4af9e52 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 21 Feb 2024 17:25:18 +0100 +Subject: bpf: introduce in_sleepable() helper + +From: Benjamin Tissoires + +[ Upstream commit dfe6625df48ec54c6dc9b86d361f26962d09de88 ] + +No code change, but it'll allow to have only one place to change +everything when we add in_sleepable in cur_state. + +Signed-off-by: Benjamin Tissoires +Link: https://lore.kernel.org/r/20240221-hid-bpf-sleepable-v3-2-1fb378ca6301@kernel.org +Signed-off-by: Alexei Starovoitov +Stable-dep-of: 1a80dbcb2dba ("bpf: support deferring bpf_link dealloc to after RCU grace period") +Signed-off-by: Sasha Levin +--- + kernel/bpf/verifier.c | 17 +++++++++++------ + 1 file changed, 11 insertions(+), 6 deletions(-) + +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 19e575e6b7fe0..a3cfe6f458d3b 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -5211,6 +5211,11 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, + return -EINVAL; + } + ++static bool in_sleepable(struct bpf_verifier_env *env) ++{ ++ return env->prog->aux->sleepable; ++} ++ + /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() + * can dereference RCU protected pointers and result is PTR_TRUSTED. + */ +@@ -5218,7 +5223,7 @@ static bool in_rcu_cs(struct bpf_verifier_env *env) + { + return env->cur_state->active_rcu_lock || + env->cur_state->active_lock.ptr || +- !env->prog->aux->sleepable; ++ !in_sleepable(env); + } + + /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */ +@@ -10099,7 +10104,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn + return -EINVAL; + } + +- if (!env->prog->aux->sleepable && fn->might_sleep) { ++ if (!in_sleepable(env) && fn->might_sleep) { + verbose(env, "helper call might sleep in a non-sleepable prog\n"); + return -EINVAL; + } +@@ -10129,7 +10134,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn + return -EINVAL; + } + +- if (env->prog->aux->sleepable && is_storage_get_function(func_id)) ++ if (in_sleepable(env) && is_storage_get_function(func_id)) + env->insn_aux_data[insn_idx].storage_get_func_atomic = true; + } + +@@ -11488,7 +11493,7 @@ static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env) + return true; + fallthrough; + default: +- return env->prog->aux->sleepable; ++ return in_sleepable(env); + } + } + +@@ -12009,7 +12014,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, + } + + sleepable = is_kfunc_sleepable(&meta); +- if (sleepable && !env->prog->aux->sleepable) { ++ if (sleepable && !in_sleepable(env)) { + verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name); + return -EACCES; + } +@@ -19575,7 +19580,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) + } + + if (is_storage_get_function(insn->imm)) { +- if (!env->prog->aux->sleepable || ++ if (!in_sleepable(env) || + env->insn_aux_data[i + delta].storage_get_func_atomic) + insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC); + else +-- +2.43.0 + diff --git a/queue-6.8/bpf-move-sleepable-flag-from-bpf_prog_aux-to-bpf_pro.patch b/queue-6.8/bpf-move-sleepable-flag-from-bpf_prog_aux-to-bpf_pro.patch new file mode 100644 index 00000000000..9e88ebc0ebc --- /dev/null +++ b/queue-6.8/bpf-move-sleepable-flag-from-bpf_prog_aux-to-bpf_pro.patch @@ -0,0 +1,264 @@ +From 457a0367cb033532fe9d0947057bc49ac7ce393c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 16:47:39 -0800 +Subject: bpf: move sleepable flag from bpf_prog_aux to bpf_prog + +From: Andrii Nakryiko + +[ Upstream commit 66c8473135c62f478301a0e5b3012f203562dfa6 ] + +prog->aux->sleepable is checked very frequently as part of (some) BPF +program run hot paths. So this extra aux indirection seems wasteful and +on busy systems might cause unnecessary memory cache misses. + +Let's move sleepable flag into prog itself to eliminate unnecessary +pointer dereference. + +Signed-off-by: Andrii Nakryiko +Acked-by: Jiri Olsa +Message-ID: <20240309004739.2961431-1-andrii@kernel.org> +Signed-off-by: Alexei Starovoitov +Stable-dep-of: 1a80dbcb2dba ("bpf: support deferring bpf_link dealloc to after RCU grace period") +Signed-off-by: Sasha Levin +--- + include/linux/bpf.h | 8 ++++---- + kernel/bpf/bpf_iter.c | 4 ++-- + kernel/bpf/core.c | 2 +- + kernel/bpf/syscall.c | 6 +++--- + kernel/bpf/trampoline.c | 4 ++-- + kernel/bpf/verifier.c | 12 ++++++------ + kernel/events/core.c | 2 +- + kernel/trace/bpf_trace.c | 2 +- + net/bpf/bpf_dummy_struct_ops.c | 2 +- + 9 files changed, 21 insertions(+), 21 deletions(-) + +diff --git a/include/linux/bpf.h b/include/linux/bpf.h +index e30100597d0a9..1b6b590451284 100644 +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -1451,7 +1451,6 @@ struct bpf_prog_aux { + bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ + bool attach_tracing_prog; /* true if tracing another tracing program */ + bool func_proto_unreliable; +- bool sleepable; + bool tail_call_reachable; + bool xdp_has_frags; + bool exception_cb; +@@ -1535,7 +1534,8 @@ struct bpf_prog { + enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ + call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_func_ip:1, /* Do we call get_func_ip() */ +- tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */ ++ tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */ ++ sleepable:1; /* BPF program is sleepable */ + enum bpf_prog_type type; /* Type of BPF program */ + enum bpf_attach_type expected_attach_type; /* For some prog types */ + u32 len; /* Number of filter blocks */ +@@ -2029,14 +2029,14 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu, + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { +- if (!prog->aux->sleepable) ++ if (!prog->sleepable) + rcu_read_lock(); + + run_ctx.bpf_cookie = item->bpf_cookie; + ret &= run_prog(prog, ctx); + item++; + +- if (!prog->aux->sleepable) ++ if (!prog->sleepable) + rcu_read_unlock(); + } + bpf_reset_run_ctx(old_run_ctx); +diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c +index 0fae791641870..112581cf97e7f 100644 +--- a/kernel/bpf/bpf_iter.c ++++ b/kernel/bpf/bpf_iter.c +@@ -548,7 +548,7 @@ int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, + return -ENOENT; + + /* Only allow sleepable program for resched-able iterator */ +- if (prog->aux->sleepable && !bpf_iter_target_support_resched(tinfo)) ++ if (prog->sleepable && !bpf_iter_target_support_resched(tinfo)) + return -EINVAL; + + link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); +@@ -697,7 +697,7 @@ int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx) + struct bpf_run_ctx run_ctx, *old_run_ctx; + int ret; + +- if (prog->aux->sleepable) { ++ if (prog->sleepable) { + rcu_read_lock_trace(); + migrate_disable(); + might_fault(); +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c +index 026627226ec48..5fe749de7fde6 100644 +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -2700,7 +2700,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux, + bool sleepable; + u32 i; + +- sleepable = aux->sleepable; ++ sleepable = aux->prog->sleepable; + for (i = 0; i < len; i++) { + map = used_maps[i]; + if (map->ops->map_poke_untrack) +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index a1f18681721c7..11a8ea854c1d5 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -2160,7 +2160,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) + btf_put(prog->aux->attach_btf); + + if (deferred) { +- if (prog->aux->sleepable) ++ if (prog->sleepable) + call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu); + else + call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); +@@ -2691,11 +2691,11 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size) + } + + prog->expected_attach_type = attr->expected_attach_type; ++ prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE); + prog->aux->attach_btf = attach_btf; + prog->aux->attach_btf_id = attr->attach_btf_id; + prog->aux->dst_prog = dst_prog; + prog->aux->dev_bound = !!attr->prog_ifindex; +- prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE; + prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS; + + err = security_bpf_prog_alloc(prog->aux); +@@ -5394,7 +5394,7 @@ static int bpf_prog_bind_map(union bpf_attr *attr) + /* The bpf program will not access the bpf map, but for the sake of + * simplicity, increase sleepable_refcnt for sleepable program as well. + */ +- if (prog->aux->sleepable) ++ if (prog->sleepable) + atomic64_inc(&map->sleepable_refcnt); + memcpy(used_maps_new, used_maps_old, + sizeof(used_maps_old[0]) * prog->aux->used_map_cnt); +diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c +index d382f5ebe06c8..db7599c59c78a 100644 +--- a/kernel/bpf/trampoline.c ++++ b/kernel/bpf/trampoline.c +@@ -1014,7 +1014,7 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr) + + bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog) + { +- bool sleepable = prog->aux->sleepable; ++ bool sleepable = prog->sleepable; + + if (bpf_prog_check_recur(prog)) + return sleepable ? __bpf_prog_enter_sleepable_recur : +@@ -1029,7 +1029,7 @@ bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog) + + bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog) + { +- bool sleepable = prog->aux->sleepable; ++ bool sleepable = prog->sleepable; + + if (bpf_prog_check_recur(prog)) + return sleepable ? __bpf_prog_exit_sleepable_recur : +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index a3cfe6f458d3b..af3ef7a5a013c 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -5213,7 +5213,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env, + + static bool in_sleepable(struct bpf_verifier_env *env) + { +- return env->prog->aux->sleepable; ++ return env->prog->sleepable; + } + + /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() +@@ -17919,7 +17919,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, + return -EINVAL; + } + +- if (prog->aux->sleepable) ++ if (prog->sleepable) + switch (map->map_type) { + case BPF_MAP_TYPE_HASH: + case BPF_MAP_TYPE_LRU_HASH: +@@ -18104,7 +18104,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env) + return -E2BIG; + } + +- if (env->prog->aux->sleepable) ++ if (env->prog->sleepable) + atomic64_inc(&map->sleepable_refcnt); + /* hold the map. If the program is rejected by verifier, + * the map will be released by release_maps() or it +@@ -20552,7 +20552,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log, + } + } + +- if (prog->aux->sleepable) { ++ if (prog->sleepable) { + ret = -EINVAL; + switch (prog->type) { + case BPF_PROG_TYPE_TRACING: +@@ -20663,14 +20663,14 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) + u64 key; + + if (prog->type == BPF_PROG_TYPE_SYSCALL) { +- if (prog->aux->sleepable) ++ if (prog->sleepable) + /* attach_btf_id checked to be zero already */ + return 0; + verbose(env, "Syscall programs can only be sleepable\n"); + return -EINVAL; + } + +- if (prog->aux->sleepable && !can_be_sleepable(prog)) { ++ if (prog->sleepable && !can_be_sleepable(prog)) { + verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n"); + return -EINVAL; + } +diff --git a/kernel/events/core.c b/kernel/events/core.c +index f0f0f71213a1d..b4305b3ae84fb 100644 +--- a/kernel/events/core.c ++++ b/kernel/events/core.c +@@ -10557,7 +10557,7 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, + (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) + return -EINVAL; + +- if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe) ++ if (prog->type == BPF_PROG_TYPE_KPROBE && prog->sleepable && !is_uprobe) + /* only uprobe programs are allowed to be sleepable */ + return -EINVAL; + +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index 7ac6c52b25ebc..22d555a42404f 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -3241,7 +3241,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe, + .uprobe = uprobe, + }; + struct bpf_prog *prog = link->link.prog; +- bool sleepable = prog->aux->sleepable; ++ bool sleepable = prog->sleepable; + struct bpf_run_ctx *old_run_ctx; + int err = 0; + +diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c +index 8906f7bdf4a92..0b1083d7ca03a 100644 +--- a/net/bpf/bpf_dummy_struct_ops.c ++++ b/net/bpf/bpf_dummy_struct_ops.c +@@ -169,7 +169,7 @@ static int bpf_dummy_ops_check_member(const struct btf_type *t, + case offsetof(struct bpf_dummy_ops, test_sleepable): + break; + default: +- if (prog->aux->sleepable) ++ if (prog->sleepable) + return -EINVAL; + } + +-- +2.43.0 + diff --git a/queue-6.8/bpf-support-deferring-bpf_link-dealloc-to-after-rcu-.patch b/queue-6.8/bpf-support-deferring-bpf_link-dealloc-to-after-rcu-.patch new file mode 100644 index 00000000000..f7b4a76e6b5 --- /dev/null +++ b/queue-6.8/bpf-support-deferring-bpf_link-dealloc-to-after-rcu-.patch @@ -0,0 +1,171 @@ +From a37ac5c2b029abcd8bbd8fa268ad0d3004da8b23 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 27 Mar 2024 22:24:26 -0700 +Subject: bpf: support deferring bpf_link dealloc to after RCU grace period + +From: Andrii Nakryiko + +[ Upstream commit 1a80dbcb2dbaf6e4c216e62e30fa7d3daa8001ce ] + +BPF link for some program types is passed as a "context" which can be +used by those BPF programs to look up additional information. E.g., for +multi-kprobes and multi-uprobes, link is used to fetch BPF cookie values. + +Because of this runtime dependency, when bpf_link refcnt drops to zero +there could still be active BPF programs running accessing link data. + +This patch adds generic support to defer bpf_link dealloc callback to +after RCU GP, if requested. This is done by exposing two different +deallocation callbacks, one synchronous and one deferred. If deferred +one is provided, bpf_link_free() will schedule dealloc_deferred() +callback to happen after RCU GP. + +BPF is using two flavors of RCU: "classic" non-sleepable one and RCU +tasks trace one. The latter is used when sleepable BPF programs are +used. bpf_link_free() accommodates that by checking underlying BPF +program's sleepable flag, and goes either through normal RCU GP only for +non-sleepable, or through RCU tasks trace GP *and* then normal RCU GP +(taking into account rcu_trace_implies_rcu_gp() optimization), if BPF +program is sleepable. + +We use this for multi-kprobe and multi-uprobe links, which dereference +link during program run. We also preventively switch raw_tp link to use +deferred dealloc callback, as upcoming changes in bpf-next tree expose +raw_tp link data (specifically, cookie value) to BPF program at runtime +as well. + +Fixes: 0dcac2725406 ("bpf: Add multi kprobe link") +Fixes: 89ae89f53d20 ("bpf: Add multi uprobe link") +Reported-by: syzbot+981935d9485a560bfbcb@syzkaller.appspotmail.com +Reported-by: syzbot+2cb5a6c573e98db598cc@syzkaller.appspotmail.com +Reported-by: syzbot+62d8b26793e8a2bd0516@syzkaller.appspotmail.com +Signed-off-by: Andrii Nakryiko +Acked-by: Jiri Olsa +Link: https://lore.kernel.org/r/20240328052426.3042617-2-andrii@kernel.org +Signed-off-by: Alexei Starovoitov +Signed-off-by: Sasha Levin +--- + include/linux/bpf.h | 16 +++++++++++++++- + kernel/bpf/syscall.c | 35 ++++++++++++++++++++++++++++++++--- + kernel/trace/bpf_trace.c | 4 ++-- + 3 files changed, 49 insertions(+), 6 deletions(-) + +diff --git a/include/linux/bpf.h b/include/linux/bpf.h +index 1b6b590451284..893a7ec57bf25 100644 +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -1568,12 +1568,26 @@ struct bpf_link { + enum bpf_link_type type; + const struct bpf_link_ops *ops; + struct bpf_prog *prog; +- struct work_struct work; ++ /* rcu is used before freeing, work can be used to schedule that ++ * RCU-based freeing before that, so they never overlap ++ */ ++ union { ++ struct rcu_head rcu; ++ struct work_struct work; ++ }; + }; + + struct bpf_link_ops { + void (*release)(struct bpf_link *link); ++ /* deallocate link resources callback, called without RCU grace period ++ * waiting ++ */ + void (*dealloc)(struct bpf_link *link); ++ /* deallocate link resources callback, called after RCU grace period; ++ * if underlying BPF program is sleepable we go through tasks trace ++ * RCU GP and then "classic" RCU GP ++ */ ++ void (*dealloc_deferred)(struct bpf_link *link); + int (*detach)(struct bpf_link *link); + int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, + struct bpf_prog *old_prog); +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index 11a8ea854c1d5..83ec7f788a638 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -2895,17 +2895,46 @@ void bpf_link_inc(struct bpf_link *link) + atomic64_inc(&link->refcnt); + } + ++static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu) ++{ ++ struct bpf_link *link = container_of(rcu, struct bpf_link, rcu); ++ ++ /* free bpf_link and its containing memory */ ++ link->ops->dealloc_deferred(link); ++} ++ ++static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu) ++{ ++ if (rcu_trace_implies_rcu_gp()) ++ bpf_link_defer_dealloc_rcu_gp(rcu); ++ else ++ call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp); ++} ++ + /* bpf_link_free is guaranteed to be called from process context */ + static void bpf_link_free(struct bpf_link *link) + { ++ bool sleepable = false; ++ + bpf_link_free_id(link->id); + if (link->prog) { ++ sleepable = link->prog->sleepable; + /* detach BPF program, clean up used resources */ + link->ops->release(link); + bpf_prog_put(link->prog); + } +- /* free bpf_link and its containing memory */ +- link->ops->dealloc(link); ++ if (link->ops->dealloc_deferred) { ++ /* schedule BPF link deallocation; if underlying BPF program ++ * is sleepable, we need to first wait for RCU tasks trace ++ * sync, then go through "classic" RCU grace period ++ */ ++ if (sleepable) ++ call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp); ++ else ++ call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp); ++ } ++ if (link->ops->dealloc) ++ link->ops->dealloc(link); + } + + static void bpf_link_put_deferred(struct work_struct *work) +@@ -3415,7 +3444,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link, + + static const struct bpf_link_ops bpf_raw_tp_link_lops = { + .release = bpf_raw_tp_link_release, +- .dealloc = bpf_raw_tp_link_dealloc, ++ .dealloc_deferred = bpf_raw_tp_link_dealloc, + .show_fdinfo = bpf_raw_tp_link_show_fdinfo, + .fill_link_info = bpf_raw_tp_link_fill_link_info, + }; +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index 22d555a42404f..c7f9236eed628 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -2713,7 +2713,7 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link, + + static const struct bpf_link_ops bpf_kprobe_multi_link_lops = { + .release = bpf_kprobe_multi_link_release, +- .dealloc = bpf_kprobe_multi_link_dealloc, ++ .dealloc_deferred = bpf_kprobe_multi_link_dealloc, + .fill_link_info = bpf_kprobe_multi_link_fill_link_info, + }; + +@@ -3227,7 +3227,7 @@ static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link, + + static const struct bpf_link_ops bpf_uprobe_multi_link_lops = { + .release = bpf_uprobe_multi_link_release, +- .dealloc = bpf_uprobe_multi_link_dealloc, ++ .dealloc_deferred = bpf_uprobe_multi_link_dealloc, + .fill_link_info = bpf_uprobe_multi_link_fill_link_info, + }; + +-- +2.43.0 + diff --git a/queue-6.8/drm-amd-flush-gfxoff-requests-in-prepare-stage.patch b/queue-6.8/drm-amd-flush-gfxoff-requests-in-prepare-stage.patch new file mode 100644 index 00000000000..4856a1cd4f9 --- /dev/null +++ b/queue-6.8/drm-amd-flush-gfxoff-requests-in-prepare-stage.patch @@ -0,0 +1,47 @@ +From b1222a796ee965c013034f572b8eb8947147a7fd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 20 Mar 2024 13:32:21 -0500 +Subject: drm/amd: Flush GFXOFF requests in prepare stage + +From: Mario Limonciello + +[ Upstream commit ca299b4512d4b4f516732a48ce9aa19d91f4473e ] + +If the system hasn't entered GFXOFF when suspend starts it can cause +hangs accessing GC and RLC during the suspend stage. + +Cc: # 6.1.y: 5095d5418193 ("drm/amd: Evict resources during PM ops prepare() callback") +Cc: # 6.1.y: cb11ca3233aa ("drm/amd: Add concept of running prepare_suspend() sequence for IP blocks") +Cc: # 6.1.y: 2ceec37b0e3d ("drm/amd: Add missing kernel doc for prepare_suspend()") +Cc: # 6.1.y: 3a9626c816db ("drm/amd: Stop evicting resources on APUs in suspend") +Cc: # 6.6.y: 5095d5418193 ("drm/amd: Evict resources during PM ops prepare() callback") +Cc: # 6.6.y: cb11ca3233aa ("drm/amd: Add concept of running prepare_suspend() sequence for IP blocks") +Cc: # 6.6.y: 2ceec37b0e3d ("drm/amd: Add missing kernel doc for prepare_suspend()") +Cc: # 6.6.y: 3a9626c816db ("drm/amd: Stop evicting resources on APUs in suspend") +Cc: # 6.1+ +Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3132 +Fixes: ab4750332dbe ("drm/amdgpu/sdma5.2: add begin/end_use ring callbacks") +Reviewed-by: Alex Deucher +Signed-off-by: Mario Limonciello +Signed-off-by: Alex Deucher +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index 94bdb5fa6ebc6..1fbaf7b81d69a 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -4524,6 +4524,8 @@ int amdgpu_device_prepare(struct drm_device *dev) + if (r) + goto unprepare; + ++ flush_delayed_work(&adev->gfx.gfx_off_delay_work); ++ + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; +-- +2.43.0 + diff --git a/queue-6.8/e1000e-minor-flow-correction-in-e1000_shutdown-funct.patch b/queue-6.8/e1000e-minor-flow-correction-in-e1000_shutdown-funct.patch new file mode 100644 index 00000000000..793d006ff80 --- /dev/null +++ b/queue-6.8/e1000e-minor-flow-correction-in-e1000_shutdown-funct.patch @@ -0,0 +1,51 @@ +From c492bb781087bb3d7746e2cfe0c07d11b1edc1f3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 1 Mar 2024 10:48:05 -0800 +Subject: e1000e: Minor flow correction in e1000_shutdown function + +From: Vitaly Lifshits + +[ Upstream commit 662200e324daebe6859c1f0f3ea1538b0561425a ] + +Add curly braces to avoid entering to an if statement where it is not +always required in e1000_shutdown function. +This improves code readability and might prevent non-deterministic +behaviour in the future. + +Signed-off-by: Vitaly Lifshits +Tested-by: Naama Meir +Signed-off-by: Tony Nguyen +Link: https://lore.kernel.org/r/20240301184806.2634508-5-anthony.l.nguyen@intel.com +Signed-off-by: Jakub Kicinski +Stable-dep-of: 861e8086029e ("e1000e: move force SMBUS from enable ulp function to avoid PHY loss issue") +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/e1000e/netdev.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c +index af5d9d97a0d6c..cc8c531ec3dff 100644 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -6688,14 +6688,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) + if (adapter->hw.phy.type == e1000_phy_igp_3) { + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + } else if (hw->mac.type >= e1000_pch_lpt) { +- if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) ++ if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) { + /* ULP does not support wake from unicast, multicast + * or broadcast. + */ + retval = e1000_enable_ulp_lpt_lp(hw, !runtime); +- +- if (retval) +- return retval; ++ if (retval) ++ return retval; ++ } + } + + /* Ensure that the appropriate bits are set in LPI_CTRL +-- +2.43.0 + diff --git a/queue-6.8/e1000e-move-force-smbus-from-enable-ulp-function-to-.patch b/queue-6.8/e1000e-move-force-smbus-from-enable-ulp-function-to-.patch new file mode 100644 index 00000000000..7d8ad97f29c --- /dev/null +++ b/queue-6.8/e1000e-move-force-smbus-from-enable-ulp-function-to-.patch @@ -0,0 +1,99 @@ +From a7f6910558e173ccf83b7126f23a2702e4085015 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 3 Mar 2024 12:51:32 +0200 +Subject: e1000e: move force SMBUS from enable ulp function to avoid PHY loss + issue + +From: Vitaly Lifshits + +[ Upstream commit 861e8086029e003305750b4126ecd6617465f5c7 ] + +Forcing SMBUS inside the ULP enabling flow leads to sporadic PHY loss on +some systems. It is suspected to be caused by initiating PHY transactions +before the interface settles. + +Separating this configuration from the ULP enabling flow and moving it to +the shutdown function allows enough time for the interface to settle and +avoids adding a delay. + +Fixes: 6607c99e7034 ("e1000e: i219 - fix to enable both ULP and EEE in Sx state") +Co-developed-by: Dima Ruinskiy +Signed-off-by: Dima Ruinskiy +Signed-off-by: Vitaly Lifshits +Tested-by: Naama Meir +Signed-off-by: Tony Nguyen +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/intel/e1000e/ich8lan.c | 19 ------------------- + drivers/net/ethernet/intel/e1000e/netdev.c | 18 ++++++++++++++++++ + 2 files changed, 18 insertions(+), 19 deletions(-) + +diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c +index d8e97669f31b0..f9e94be36e97f 100644 +--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c ++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c +@@ -1165,25 +1165,6 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) + if (ret_val) + goto out; + +- /* Switching PHY interface always returns MDI error +- * so disable retry mechanism to avoid wasting time +- */ +- e1000e_disable_phy_retry(hw); +- +- /* Force SMBus mode in PHY */ +- ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); +- if (ret_val) +- goto release; +- phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; +- e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); +- +- e1000e_enable_phy_retry(hw); +- +- /* Force SMBus mode in MAC */ +- mac_reg = er32(CTRL_EXT); +- mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; +- ew32(CTRL_EXT, mac_reg); +- + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable + * LPLU and disable Gig speed when entering ULP + */ +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c +index cc8c531ec3dff..3692fce201959 100644 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -6623,6 +6623,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status, wufc; + int retval = 0; ++ u16 smb_ctrl; + + /* Runtime suspend should only enable wakeup for link changes */ + if (runtime) +@@ -6696,6 +6697,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) + if (retval) + return retval; + } ++ ++ /* Force SMBUS to allow WOL */ ++ /* Switching PHY interface always returns MDI error ++ * so disable retry mechanism to avoid wasting time ++ */ ++ e1000e_disable_phy_retry(hw); ++ ++ e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl); ++ smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS; ++ e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl); ++ ++ e1000e_enable_phy_retry(hw); ++ ++ /* Force SMBus mode in MAC */ ++ ctrl_ext = er32(CTRL_EXT); ++ ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS; ++ ew32(CTRL_EXT, ctrl_ext); + } + + /* Ensure that the appropriate bits are set in LPI_CTRL +-- +2.43.0 + diff --git a/queue-6.8/kvm-svm-add-support-for-allowing-zero-sev-asids.patch b/queue-6.8/kvm-svm-add-support-for-allowing-zero-sev-asids.patch new file mode 100644 index 00000000000..18478272539 --- /dev/null +++ b/queue-6.8/kvm-svm-add-support-for-allowing-zero-sev-asids.patch @@ -0,0 +1,102 @@ +From 401bf2f4c62c041204c60328e9c72536f4d5a4a1 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 Jan 2024 15:56:08 -0800 +Subject: KVM: SVM: Add support for allowing zero SEV ASIDs + +From: Ashish Kalra + +[ Upstream commit 0aa6b90ef9d75b4bd7b6d106d85f2a3437697f91 ] + +Some BIOSes allow the end user to set the minimum SEV ASID value +(CPUID 0x8000001F_EDX) to be greater than the maximum number of +encrypted guests, or maximum SEV ASID value (CPUID 0x8000001F_ECX) +in order to dedicate all the SEV ASIDs to SEV-ES or SEV-SNP. + +The SEV support, as coded, does not handle the case where the minimum +SEV ASID value can be greater than the maximum SEV ASID value. +As a result, the following confusing message is issued: + +[ 30.715724] kvm_amd: SEV enabled (ASIDs 1007 - 1006) + +Fix the support to properly handle this case. + +Fixes: 916391a2d1dc ("KVM: SVM: Add support for SEV-ES capability in KVM") +Suggested-by: Sean Christopherson +Signed-off-by: Ashish Kalra +Cc: stable@vger.kernel.org +Acked-by: Tom Lendacky +Link: https://lore.kernel.org/r/20240104190520.62510-1-Ashish.Kalra@amd.com +Link: https://lore.kernel.org/r/20240131235609.4161407-4-seanjc@google.com +Signed-off-by: Sean Christopherson +Signed-off-by: Sasha Levin +--- + arch/x86/kvm/svm/sev.c | 29 +++++++++++++++++++---------- + 1 file changed, 19 insertions(+), 10 deletions(-) + +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index 4c841610277dc..86088d1250b3d 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -144,10 +144,21 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev) + + static int sev_asid_new(struct kvm_sev_info *sev) + { +- unsigned int asid, min_asid, max_asid; ++ /* ++ * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. ++ * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. ++ * Note: min ASID can end up larger than the max if basic SEV support is ++ * effectively disabled by disallowing use of ASIDs for SEV guests. ++ */ ++ unsigned int min_asid = sev->es_active ? 1 : min_sev_asid; ++ unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; ++ unsigned int asid; + bool retry = true; + int ret; + ++ if (min_asid > max_asid) ++ return -ENOTTY; ++ + WARN_ON(sev->misc_cg); + sev->misc_cg = get_current_misc_cg(); + ret = sev_misc_cg_try_charge(sev); +@@ -159,12 +170,6 @@ static int sev_asid_new(struct kvm_sev_info *sev) + + mutex_lock(&sev_bitmap_lock); + +- /* +- * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. +- * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. +- */ +- min_asid = sev->es_active ? 1 : min_sev_asid; +- max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; + again: + asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); + if (asid > max_asid) { +@@ -2239,8 +2244,10 @@ void __init sev_hardware_setup(void) + goto out; + } + +- sev_asid_count = max_sev_asid - min_sev_asid + 1; +- WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)); ++ if (min_sev_asid <= max_sev_asid) { ++ sev_asid_count = max_sev_asid - min_sev_asid + 1; ++ WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)); ++ } + sev_supported = true; + + /* SEV-ES support requested? */ +@@ -2271,7 +2278,9 @@ void __init sev_hardware_setup(void) + out: + if (boot_cpu_has(X86_FEATURE_SEV)) + pr_info("SEV %s (ASIDs %u - %u)\n", +- sev_supported ? "enabled" : "disabled", ++ sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" : ++ "unusable" : ++ "disabled", + min_sev_asid, max_sev_asid); + if (boot_cpu_has(X86_FEATURE_SEV_ES)) + pr_info("SEV-ES %s (ASIDs %u - %u)\n", +-- +2.43.0 + diff --git a/queue-6.8/kvm-svm-use-unsigned-integers-when-dealing-with-asid.patch b/queue-6.8/kvm-svm-use-unsigned-integers-when-dealing-with-asid.patch new file mode 100644 index 00000000000..aacf0574089 --- /dev/null +++ b/queue-6.8/kvm-svm-use-unsigned-integers-when-dealing-with-asid.patch @@ -0,0 +1,139 @@ +From d9f99bb9361bb29fcc17fba29d890be78b5a9545 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 31 Jan 2024 15:56:07 -0800 +Subject: KVM: SVM: Use unsigned integers when dealing with ASIDs + +From: Sean Christopherson + +[ Upstream commit 466eec4a22a76c462781bf6d45cb02cbedf21a61 ] + +Convert all local ASID variables and parameters throughout the SEV code +from signed integers to unsigned integers. As ASIDs are fundamentally +unsigned values, and the global min/max variables are appropriately +unsigned integers, too. + +Functionally, this is a glorified nop as KVM guarantees min_sev_asid is +non-zero, and no CPU supports -1u as the _only_ asid, i.e. the signed vs. +unsigned goof won't cause problems in practice. + +Opportunistically use sev_get_asid() in sev_flush_encrypted_page() instead +of open coding an equivalent. + +Reviewed-by: Tom Lendacky +Link: https://lore.kernel.org/r/20240131235609.4161407-3-seanjc@google.com +Signed-off-by: Sean Christopherson +Stable-dep-of: 0aa6b90ef9d7 ("KVM: SVM: Add support for allowing zero SEV ASIDs") +Signed-off-by: Sasha Levin +--- + arch/x86/kvm/svm/sev.c | 18 ++++++++++-------- + arch/x86/kvm/trace.h | 10 +++++----- + 2 files changed, 15 insertions(+), 13 deletions(-) + +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c +index a8ce5226b3b57..4c841610277dc 100644 +--- a/arch/x86/kvm/svm/sev.c ++++ b/arch/x86/kvm/svm/sev.c +@@ -84,9 +84,10 @@ struct enc_region { + }; + + /* Called with the sev_bitmap_lock held, or on shutdown */ +-static int sev_flush_asids(int min_asid, int max_asid) ++static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid) + { +- int ret, asid, error = 0; ++ int ret, error = 0; ++ unsigned int asid; + + /* Check if there are any ASIDs to reclaim before performing a flush */ + asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid); +@@ -116,7 +117,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm) + } + + /* Must be called with the sev_bitmap_lock held */ +-static bool __sev_recycle_asids(int min_asid, int max_asid) ++static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid) + { + if (sev_flush_asids(min_asid, max_asid)) + return false; +@@ -143,8 +144,9 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev) + + static int sev_asid_new(struct kvm_sev_info *sev) + { +- int asid, min_asid, max_asid, ret; ++ unsigned int asid, min_asid, max_asid; + bool retry = true; ++ int ret; + + WARN_ON(sev->misc_cg); + sev->misc_cg = get_current_misc_cg(); +@@ -187,7 +189,7 @@ static int sev_asid_new(struct kvm_sev_info *sev) + return ret; + } + +-static int sev_get_asid(struct kvm *kvm) ++static unsigned int sev_get_asid(struct kvm *kvm) + { + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + +@@ -284,8 +286,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) + + static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error) + { ++ unsigned int asid = sev_get_asid(kvm); + struct sev_data_activate activate; +- int asid = sev_get_asid(kvm); + int ret; + + /* activate ASID on the given handle */ +@@ -2317,7 +2319,7 @@ int sev_cpu_init(struct svm_cpu_data *sd) + */ + static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va) + { +- int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid; ++ unsigned int asid = sev_get_asid(vcpu->kvm); + + /* + * Note! The address must be a kernel address, as regular page walk +@@ -2635,7 +2637,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm) + void pre_sev_run(struct vcpu_svm *svm, int cpu) + { + struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); +- int asid = sev_get_asid(svm->vcpu.kvm); ++ unsigned int asid = sev_get_asid(svm->vcpu.kvm); + + /* Assign the asid allocated with this SEV guest */ + svm->asid = asid; +diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h +index 83843379813ee..b82e6ed4f0241 100644 +--- a/arch/x86/kvm/trace.h ++++ b/arch/x86/kvm/trace.h +@@ -732,13 +732,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit, + * Tracepoint for nested #vmexit because of interrupt pending + */ + TRACE_EVENT(kvm_invlpga, +- TP_PROTO(__u64 rip, int asid, u64 address), ++ TP_PROTO(__u64 rip, unsigned int asid, u64 address), + TP_ARGS(rip, asid, address), + + TP_STRUCT__entry( +- __field( __u64, rip ) +- __field( int, asid ) +- __field( __u64, address ) ++ __field( __u64, rip ) ++ __field( unsigned int, asid ) ++ __field( __u64, address ) + ), + + TP_fast_assign( +@@ -747,7 +747,7 @@ TRACE_EVENT(kvm_invlpga, + __entry->address = address; + ), + +- TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx", ++ TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx", + __entry->rip, __entry->asid, __entry->address) + ); + +-- +2.43.0 + diff --git a/queue-6.8/mean_and_variance-drop-always-failing-tests.patch b/queue-6.8/mean_and_variance-drop-always-failing-tests.patch new file mode 100644 index 00000000000..7bb1fa5e7c5 --- /dev/null +++ b/queue-6.8/mean_and_variance-drop-always-failing-tests.patch @@ -0,0 +1,87 @@ +From b05228a68dffc30844ead89894739cf5d04366dc Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Sun, 25 Feb 2024 08:29:25 -0800 +Subject: mean_and_variance: Drop always failing tests + +From: Guenter Roeck + +[ Upstream commit 97ca7c1f93bbac6982717a7055cd727813c45e61 ] + +mean_and_variance_test_2 and mean_and_variance_test_4 always fail. +The input parameters to those tests are identical to the input parameters +to tests 1 and 3, yet the expected result for tests 2 and 4 is different +for the mean and stddev tests. That will always fail. + + Expected mean_and_variance_get_mean(mv) == mean[i], but + mean_and_variance_get_mean(mv) == 22 (0x16) + mean[i] == 10 (0xa) + +Drop the bad tests. + +Fixes: 65bc41090720 ("mean and variance: More tests") +Closes: https://lore.kernel.org/lkml/065b94eb-6a24-4248-b7d7-d3212efb4787@roeck-us.net/ +Cc: Kent Overstreet +Signed-off-by: Guenter Roeck +Signed-off-by: Kent Overstreet +Signed-off-by: Sasha Levin +--- + fs/bcachefs/mean_and_variance_test.c | 28 +--------------------------- + 1 file changed, 1 insertion(+), 27 deletions(-) + +diff --git a/fs/bcachefs/mean_and_variance_test.c b/fs/bcachefs/mean_and_variance_test.c +index 019583c3ca0ea..51093fa848202 100644 +--- a/fs/bcachefs/mean_and_variance_test.c ++++ b/fs/bcachefs/mean_and_variance_test.c +@@ -130,20 +130,8 @@ static void mean_and_variance_test_1(struct kunit *test) + d, mean, stddev, weighted_mean, weighted_stddev); + } + +-static void mean_and_variance_test_2(struct kunit *test) +-{ +- s64 d[] = { 100, 10, 10, 10, 10, 10, 10 }; +- s64 mean[] = { 10, 10, 10, 10, 10, 10, 10 }; +- s64 stddev[] = { 9, 9, 9, 9, 9, 9, 9 }; +- s64 weighted_mean[] = { 32, 27, 22, 19, 17, 15, 14 }; +- s64 weighted_stddev[] = { 38, 35, 31, 27, 24, 21, 18 }; +- +- do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2, +- d, mean, stddev, weighted_mean, weighted_stddev); +-} +- + /* Test behaviour where we switch from one steady state to another: */ +-static void mean_and_variance_test_3(struct kunit *test) ++static void mean_and_variance_test_2(struct kunit *test) + { + s64 d[] = { 100, 100, 100, 100, 100 }; + s64 mean[] = { 22, 32, 40, 46, 50 }; +@@ -155,18 +143,6 @@ static void mean_and_variance_test_3(struct kunit *test) + d, mean, stddev, weighted_mean, weighted_stddev); + } + +-static void mean_and_variance_test_4(struct kunit *test) +-{ +- s64 d[] = { 100, 100, 100, 100, 100 }; +- s64 mean[] = { 10, 11, 12, 13, 14 }; +- s64 stddev[] = { 9, 13, 15, 17, 19 }; +- s64 weighted_mean[] = { 32, 49, 61, 71, 78 }; +- s64 weighted_stddev[] = { 38, 44, 44, 41, 38 }; +- +- do_mean_and_variance_test(test, 10, 6, ARRAY_SIZE(d), 2, +- d, mean, stddev, weighted_mean, weighted_stddev); +-} +- + static void mean_and_variance_fast_divpow2(struct kunit *test) + { + s64 i; +@@ -224,8 +200,6 @@ static struct kunit_case mean_and_variance_test_cases[] = { + KUNIT_CASE(mean_and_variance_weighted_advanced_test), + KUNIT_CASE(mean_and_variance_test_1), + KUNIT_CASE(mean_and_variance_test_2), +- KUNIT_CASE(mean_and_variance_test_3), +- KUNIT_CASE(mean_and_variance_test_4), + {} + }; + +-- +2.43.0 + diff --git a/queue-6.8/mptcp-don-t-account-accept-of-non-mpc-client-as-fall.patch b/queue-6.8/mptcp-don-t-account-accept-of-non-mpc-client-as-fall.patch new file mode 100644 index 00000000000..0ccee8e9cbb --- /dev/null +++ b/queue-6.8/mptcp-don-t-account-accept-of-non-mpc-client-as-fall.patch @@ -0,0 +1,112 @@ +From 1b79139bcc5accd372070559d28497a8db97fc1a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 29 Mar 2024 13:08:52 +0100 +Subject: mptcp: don't account accept() of non-MPC client as fallback to TCP + +From: Davide Caratti + +[ Upstream commit 7a1b3490f47e88ec4cbde65f1a77a0f4bc972282 ] + +Current MPTCP servers increment MPTcpExtMPCapableFallbackACK when they +accept non-MPC connections. As reported by Christoph, this is "surprising" +because the counter might become greater than MPTcpExtMPCapableSYNRX. + +MPTcpExtMPCapableFallbackACK counter's name suggests it should only be +incremented when a connection was seen using MPTCP options, then a +fallback to TCP has been done. Let's do that by incrementing it when +the subflow context of an inbound MPC connection attempt is dropped. +Also, update mptcp_connect.sh kselftest, to ensure that the +above MIB does not increment in case a pure TCP client connects to a +MPTCP server. + +Fixes: fc518953bc9c ("mptcp: add and use MIB counter infrastructure") +Cc: stable@vger.kernel.org +Reported-by: Christoph Paasch +Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/449 +Signed-off-by: Davide Caratti +Reviewed-by: Mat Martineau +Reviewed-by: Matthieu Baerts (NGI0) +Signed-off-by: Matthieu Baerts (NGI0) +Link: https://lore.kernel.org/r/20240329-upstream-net-20240329-fallback-mib-v1-1-324a8981da48@kernel.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/mptcp/protocol.c | 2 -- + net/mptcp/subflow.c | 2 ++ + tools/testing/selftests/net/mptcp/mptcp_connect.sh | 9 +++++++++ + 3 files changed, 11 insertions(+), 2 deletions(-) + +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index 7833a49f6214a..2b921af2718d9 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -3916,8 +3916,6 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, + mptcp_set_state(newsk, TCP_CLOSE); + } + } else { +- MPTCP_INC_STATS(sock_net(ssk), +- MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); + tcpfallback: + newsk->sk_kern_sock = kern; + lock_sock(newsk); +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index 71ba86246ff89..13f66d11b7a0b 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -905,6 +905,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, + return child; + + fallback: ++ if (fallback) ++ SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); + mptcp_subflow_drop_ctx(child); + return child; + } +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh +index f8e1b3daa7489..713de81822227 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh +@@ -391,12 +391,14 @@ do_transfer() + local stat_cookierx_last + local stat_csum_err_s + local stat_csum_err_c ++ local stat_tcpfb_last_l + stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX") + stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX") + stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent") + stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv") + stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr") + stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr") ++ stat_tcpfb_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK") + + timeout ${timeout_test} \ + ip netns exec ${listener_ns} \ +@@ -465,11 +467,13 @@ do_transfer() + local stat_cookietx_now + local stat_cookierx_now + local stat_ooo_now ++ local stat_tcpfb_now_l + stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX") + stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX") + stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent") + stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv") + stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue") ++ stat_tcpfb_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableFallbackACK") + + expect_synrx=$((stat_synrx_last_l)) + expect_ackrx=$((stat_ackrx_last_l)) +@@ -516,6 +520,11 @@ do_transfer() + fi + fi + ++ if [ ${stat_ooo_now} -eq 0 ] && [ ${stat_tcpfb_last_l} -ne ${stat_tcpfb_now_l} ]; then ++ mptcp_lib_pr_fail "unexpected fallback to TCP" ++ rets=1 ++ fi ++ + if [ $cookies -eq 2 ];then + if [ $stat_cookietx_last -ge $stat_cookietx_now ] ;then + extra+=" WARN: CookieSent: did not advance" +-- +2.43.0 + diff --git a/queue-6.8/net-ravb-always-process-tx-descriptor-ring.patch b/queue-6.8/net-ravb-always-process-tx-descriptor-ring.patch new file mode 100644 index 00000000000..1b4ca54b38a --- /dev/null +++ b/queue-6.8/net-ravb-always-process-tx-descriptor-ring.patch @@ -0,0 +1,55 @@ +From 2a643463e7f70c95bc9aae1a888ba5eb3a61f273 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 2 Apr 2024 15:53:04 +0100 +Subject: net: ravb: Always process TX descriptor ring + +From: Paul Barker + +[ Upstream commit 596a4254915f94c927217fe09c33a6828f33fb25 ] + +The TX queue should be serviced each time the poll function is called, +even if the full RX work budget has been consumed. This prevents +starvation of the TX queue when RX bandwidth usage is high. + +Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") +Signed-off-by: Paul Barker +Reviewed-by: Sergey Shtylyov +Link: https://lore.kernel.org/r/20240402145305.82148-1-paul.barker.ct@bp.renesas.com +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/renesas/ravb_main.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index 266ed984a98fa..03c49bec6ee0b 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -1288,12 +1288,12 @@ static int ravb_poll(struct napi_struct *napi, int budget) + int q = napi - priv->napi; + int mask = BIT(q); + int quota = budget; ++ bool unmask; + + /* Processing RX Descriptor Ring */ + /* Clear RX interrupt */ + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); +- if (ravb_rx(ndev, "a, q)) +- goto out; ++ unmask = !ravb_rx(ndev, "a, q); + + /* Processing TX Descriptor Ring */ + spin_lock_irqsave(&priv->lock, flags); +@@ -1303,6 +1303,9 @@ static int ravb_poll(struct napi_struct *napi, int budget) + netif_wake_subqueue(ndev, q); + spin_unlock_irqrestore(&priv->lock, flags); + ++ if (!unmask) ++ goto out; ++ + napi_complete(napi); + + /* Re-enable RX/TX interrupts */ +-- +2.43.0 + diff --git a/queue-6.8/net-ravb-always-update-error-counters.patch b/queue-6.8/net-ravb-always-update-error-counters.patch new file mode 100644 index 00000000000..5cb0bac32f1 --- /dev/null +++ b/queue-6.8/net-ravb-always-update-error-counters.patch @@ -0,0 +1,66 @@ +From 722a4bdf7c75f6930ab932d835d63731be6293ad Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 2 Apr 2024 15:53:05 +0100 +Subject: net: ravb: Always update error counters + +From: Paul Barker + +[ Upstream commit 101b76418d7163240bc74a7e06867dca0e51183e ] + +The error statistics should be updated each time the poll function is +called, even if the full RX work budget has been consumed. This prevents +the counts from becoming stuck when RX bandwidth usage is high. + +This also ensures that error counters are not updated after we've +re-enabled interrupts as that could result in a race condition. + +Also drop an unnecessary space. + +Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper") +Signed-off-by: Paul Barker +Reviewed-by: Sergey Shtylyov +Link: https://lore.kernel.org/r/20240402145305.82148-2-paul.barker.ct@bp.renesas.com +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/renesas/ravb_main.c | 17 +++++++++-------- + 1 file changed, 9 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index 03c49bec6ee0b..1bdf0abb256cf 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -1303,6 +1303,15 @@ static int ravb_poll(struct napi_struct *napi, int budget) + netif_wake_subqueue(ndev, q); + spin_unlock_irqrestore(&priv->lock, flags); + ++ /* Receive error message handling */ ++ priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; ++ if (info->nc_queues) ++ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; ++ if (priv->rx_over_errors != ndev->stats.rx_over_errors) ++ ndev->stats.rx_over_errors = priv->rx_over_errors; ++ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) ++ ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; ++ + if (!unmask) + goto out; + +@@ -1319,14 +1328,6 @@ static int ravb_poll(struct napi_struct *napi, int budget) + } + spin_unlock_irqrestore(&priv->lock, flags); + +- /* Receive error message handling */ +- priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; +- if (info->nc_queues) +- priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; +- if (priv->rx_over_errors != ndev->stats.rx_over_errors) +- ndev->stats.rx_over_errors = priv->rx_over_errors; +- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) +- ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; + out: + return budget - quota; + } +-- +2.43.0 + diff --git a/queue-6.8/net-ravb-let-ip-specific-receive-function-to-interro.patch b/queue-6.8/net-ravb-let-ip-specific-receive-function-to-interro.patch new file mode 100644 index 00000000000..d9a8c3c710d --- /dev/null +++ b/queue-6.8/net-ravb-let-ip-specific-receive-function-to-interro.patch @@ -0,0 +1,63 @@ +From 54d26fc23887d2e6f5f01533e74e7e9f738dca12 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 2 Feb 2024 10:41:22 +0200 +Subject: net: ravb: Let IP-specific receive function to interrogate + descriptors + +From: Claudiu Beznea + +[ Upstream commit 2b993bfdb47b3aaafd8fe9cd5038b5e297b18ee1 ] + +ravb_poll() initial code used to interrogate the first descriptor of the +RX queue in case gPTP is false to determine if ravb_rx() should be called. +This is done for non-gPTP IPs. For gPTP IPs the driver PTP-specific +information was used to determine if receive function should be called. As +every IP has its own receive function that interrogates the RX descriptors +list in the same way the ravb_poll() was doing there is no need to double +check this in ravb_poll(). Removing the code from ravb_poll() leads to a +cleaner code. + +Signed-off-by: Claudiu Beznea +Reviewed-by: Sergey Shtylyov +Signed-off-by: Paolo Abeni +Stable-dep-of: 596a4254915f ("net: ravb: Always process TX descriptor ring") +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/renesas/ravb_main.c | 13 ++----------- + 1 file changed, 2 insertions(+), 11 deletions(-) + +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c +index f7566cfa45ca3..266ed984a98fa 100644 +--- a/drivers/net/ethernet/renesas/ravb_main.c ++++ b/drivers/net/ethernet/renesas/ravb_main.c +@@ -1284,25 +1284,16 @@ static int ravb_poll(struct napi_struct *napi, int budget) + struct net_device *ndev = napi->dev; + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; +- bool gptp = info->gptp || info->ccc_gac; +- struct ravb_rx_desc *desc; + unsigned long flags; + int q = napi - priv->napi; + int mask = BIT(q); + int quota = budget; +- unsigned int entry; + +- if (!gptp) { +- entry = priv->cur_rx[q] % priv->num_rx_ring[q]; +- desc = &priv->gbeth_rx_ring[entry]; +- } + /* Processing RX Descriptor Ring */ + /* Clear RX interrupt */ + ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); +- if (gptp || desc->die_dt != DT_FEMPTY) { +- if (ravb_rx(ndev, "a, q)) +- goto out; +- } ++ if (ravb_rx(ndev, "a, q)) ++ goto out; + + /* Processing TX Descriptor Ring */ + spin_lock_irqsave(&priv->lock, flags); +-- +2.43.0 + diff --git a/queue-6.8/scsi-sg-avoid-sg-device-teardown-race.patch b/queue-6.8/scsi-sg-avoid-sg-device-teardown-race.patch new file mode 100644 index 00000000000..4884eda153f --- /dev/null +++ b/queue-6.8/scsi-sg-avoid-sg-device-teardown-race.patch @@ -0,0 +1,56 @@ +From 959f530354e02b9de6f7881102dbae6053e4c6a4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 20 Mar 2024 22:30:32 +0100 +Subject: scsi: sg: Avoid sg device teardown race + +From: Alexander Wetzel + +[ Upstream commit 27f58c04a8f438078583041468ec60597841284d ] + +sg_remove_sfp_usercontext() must not use sg_device_destroy() after calling +scsi_device_put(). + +sg_device_destroy() is accessing the parent scsi_device request_queue which +will already be set to NULL when the preceding call to scsi_device_put() +removed the last reference to the parent scsi_device. + +The resulting NULL pointer exception will then crash the kernel. + +Link: https://lore.kernel.org/r/20240305150509.23896-1-Alexander@wetzel-home.de +Fixes: db59133e9279 ("scsi: sg: fix blktrace debugfs entries leakage") +Cc: stable@vger.kernel.org +Signed-off-by: Alexander Wetzel +Link: https://lore.kernel.org/r/20240320213032.18221-1-Alexander@wetzel-home.de +Reviewed-by: Bart Van Assche +Signed-off-by: Martin K. Petersen +Signed-off-by: Sasha Levin +--- + drivers/scsi/sg.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 86210e4dd0d35..ff6894ce5404e 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -2207,6 +2207,7 @@ sg_remove_sfp_usercontext(struct work_struct *work) + { + struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); + struct sg_device *sdp = sfp->parentdp; ++ struct scsi_device *device = sdp->device; + Sg_request *srp; + unsigned long iflags; + +@@ -2232,8 +2233,9 @@ sg_remove_sfp_usercontext(struct work_struct *work) + "sg_remove_sfp: sfp=0x%p\n", sfp)); + kfree(sfp); + +- scsi_device_put(sdp->device); ++ WARN_ON_ONCE(kref_read(&sdp->d_ref) != 1); + kref_put(&sdp->d_ref, sg_device_destroy); ++ scsi_device_put(device); + module_put(THIS_MODULE); + } + +-- +2.43.0 + diff --git a/queue-6.8/selftests-mptcp-connect-fix-shellcheck-warnings.patch b/queue-6.8/selftests-mptcp-connect-fix-shellcheck-warnings.patch new file mode 100644 index 00000000000..7ef78db5cfc --- /dev/null +++ b/queue-6.8/selftests-mptcp-connect-fix-shellcheck-warnings.patch @@ -0,0 +1,242 @@ +From 02ea0a714ab49fc1393ca81eb78b93a8bdc278b6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 6 Mar 2024 10:42:57 +0100 +Subject: selftests: mptcp: connect: fix shellcheck warnings + +From: Matthieu Baerts (NGI0) + +[ Upstream commit e3aae1098f109f0bd33c971deff1926f4e4441d0 ] + +shellcheck recently helped to prevent issues. It is then good to fix the +other harmless issues in order to spot "real" ones later. + +Here, two categories of warnings are now ignored: + +- SC2317: Command appears to be unreachable. The cleanup() function is + invoked indirectly via the EXIT trap. + +- SC2086: Double quote to prevent globbing and word splitting. This is + recommended, but the current usage is correct and there is no need to + do all these modifications to be compliant with this rule. + +For the modifications: + + - SC2034: ksft_skip appears unused. + - SC2181: Check exit code directly with e.g. 'if mycmd;', not + indirectly with $?. + - SC2004: $/${} is unnecessary on arithmetic variables. + - SC2155: Declare and assign separately to avoid masking return + values. + - SC2166: Prefer [ p ] && [ q ] as [ p -a q ] is not well defined. + - SC2059: Don't use variables in the printf format string. Use printf + '..%s..' "$foo". + +Now this script is shellcheck (0.9.0) compliant. We can easily spot new +issues. + +Reviewed-by: Mat Martineau +Signed-off-by: Matthieu Baerts (NGI0) +Link: https://lore.kernel.org/r/20240306-upstream-net-next-20240304-selftests-mptcp-shared-code-shellcheck-v2-8-bc79e6e5e6a0@kernel.org +Signed-off-by: Jakub Kicinski +Stable-dep-of: 7a1b3490f47e ("mptcp: don't account accept() of non-MPC client as fallback to TCP") +Signed-off-by: Sasha Levin +--- + .../selftests/net/mptcp/mptcp_connect.sh | 76 ++++++++++++------- + 1 file changed, 47 insertions(+), 29 deletions(-) + +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh +index 7898d62fce0b5..cce0e553976f2 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh +@@ -1,6 +1,11 @@ + #!/bin/bash + # SPDX-License-Identifier: GPL-2.0 + ++# Double quotes to prevent globbing and word splitting is recommended in new ++# code but we accept it, especially because there were too many before having ++# address all other issues detected by shellcheck. ++#shellcheck disable=SC2086 ++ + . "$(dirname "${0}")/mptcp_lib.sh" + + time_start=$(date +%s) +@@ -13,7 +18,6 @@ sout="" + cin_disconnect="" + cin="" + cout="" +-ksft_skip=4 + capture=false + timeout_poll=30 + timeout_test=$((timeout_poll * 2 + 1)) +@@ -131,6 +135,8 @@ ns4="ns4-$rndh" + TEST_COUNT=0 + TEST_GROUP="" + ++# This function is used in the cleanup trap ++#shellcheck disable=SC2317 + cleanup() + { + rm -f "$cin_disconnect" "$cout_disconnect" +@@ -225,8 +231,9 @@ set_ethtool_flags() { + local dev="$2" + local flags="$3" + +- ip netns exec $ns ethtool -K $dev $flags 2>/dev/null +- [ $? -eq 0 ] && echo "INFO: set $ns dev $dev: ethtool -K $flags" ++ if ip netns exec $ns ethtool -K $dev $flags 2>/dev/null; then ++ echo "INFO: set $ns dev $dev: ethtool -K $flags" ++ fi + } + + set_random_ethtool_flags() { +@@ -321,7 +328,7 @@ do_transfer() + local extra_args="$7" + + local port +- port=$((10000+$TEST_COUNT)) ++ port=$((10000+TEST_COUNT)) + TEST_COUNT=$((TEST_COUNT+1)) + + if [ "$rcvbuf" -gt 0 ]; then +@@ -378,12 +385,18 @@ do_transfer() + nstat -n + fi + +- local stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX") +- local stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX") +- local stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent") +- local stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv") +- local stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr") +- local stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr") ++ local stat_synrx_last_l ++ local stat_ackrx_last_l ++ local stat_cookietx_last ++ local stat_cookierx_last ++ local stat_csum_err_s ++ local stat_csum_err_c ++ stat_synrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX") ++ stat_ackrx_last_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX") ++ stat_cookietx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent") ++ stat_cookierx_last=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv") ++ stat_csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr") ++ stat_csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr") + + timeout ${timeout_test} \ + ip netns exec ${listener_ns} \ +@@ -446,11 +459,16 @@ do_transfer() + mptcp_lib_check_transfer $cin $sout "file received by server" + rets=$? + +- local stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX") +- local stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX") +- local stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent") +- local stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv") +- local stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue") ++ local stat_synrx_now_l ++ local stat_ackrx_now_l ++ local stat_cookietx_now ++ local stat_cookierx_now ++ local stat_ooo_now ++ stat_synrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableSYNRX") ++ stat_ackrx_now_l=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtMPCapableACKRX") ++ stat_cookietx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesSent") ++ stat_cookierx_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtSyncookiesRecv") ++ stat_ooo_now=$(mptcp_lib_get_counter "${listener_ns}" "TcpExtTCPOFOQueue") + + expect_synrx=$((stat_synrx_last_l)) + expect_ackrx=$((stat_ackrx_last_l)) +@@ -459,8 +477,8 @@ do_transfer() + cookies=${cookies##*=} + + if [ ${cl_proto} = "MPTCP" ] && [ ${srv_proto} = "MPTCP" ]; then +- expect_synrx=$((stat_synrx_last_l+$connect_per_transfer)) +- expect_ackrx=$((stat_ackrx_last_l+$connect_per_transfer)) ++ expect_synrx=$((stat_synrx_last_l+connect_per_transfer)) ++ expect_ackrx=$((stat_ackrx_last_l+connect_per_transfer)) + fi + + if [ ${stat_synrx_now_l} -lt ${expect_synrx} ]; then +@@ -468,7 +486,7 @@ do_transfer() + "${stat_synrx_now_l}" "${expect_synrx}" 1>&2 + retc=1 + fi +- if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} -a ${stat_ooo_now} -eq 0 ]; then ++ if [ ${stat_ackrx_now_l} -lt ${expect_ackrx} ] && [ ${stat_ooo_now} -eq 0 ]; then + if [ ${stat_ooo_now} -eq 0 ]; then + printf "[ FAIL ] lower MPC ACK rx (%d) than expected (%d)\n" \ + "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2 +@@ -479,18 +497,20 @@ do_transfer() + fi + + if $checksum; then +- local csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr") +- local csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr") ++ local csum_err_s ++ local csum_err_c ++ csum_err_s=$(mptcp_lib_get_counter "${listener_ns}" "MPTcpExtDataCsumErr") ++ csum_err_c=$(mptcp_lib_get_counter "${connector_ns}" "MPTcpExtDataCsumErr") + + local csum_err_s_nr=$((csum_err_s - stat_csum_err_s)) + if [ $csum_err_s_nr -gt 0 ]; then +- printf "[ FAIL ]\nserver got $csum_err_s_nr data checksum error[s]" ++ printf "[ FAIL ]\nserver got %d data checksum error[s]" ${csum_err_s_nr} + rets=1 + fi + + local csum_err_c_nr=$((csum_err_c - stat_csum_err_c)) + if [ $csum_err_c_nr -gt 0 ]; then +- printf "[ FAIL ]\nclient got $csum_err_c_nr data checksum error[s]" ++ printf "[ FAIL ]\nclient got %d data checksum error[s]" ${csum_err_c_nr} + retc=1 + fi + fi +@@ -658,7 +678,7 @@ run_test_transparent() + return + fi + +-ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF" ++ if ! ip netns exec "$listener_ns" nft -f /dev/stdin <<"EOF" + flush ruleset + table inet mangle { + chain divert { +@@ -669,7 +689,7 @@ table inet mangle { + } + } + EOF +- if [ $? -ne 0 ]; then ++ then + echo "SKIP: $msg, could not load nft ruleset" + mptcp_lib_fail_if_expected_feature "nft rules" + mptcp_lib_result_skip "${TEST_GROUP}" +@@ -684,8 +704,7 @@ EOF + local_addr="0.0.0.0" + fi + +- ip -net "$listener_ns" $r6flag rule add fwmark 1 lookup 100 +- if [ $? -ne 0 ]; then ++ if ! ip -net "$listener_ns" $r6flag rule add fwmark 1 lookup 100; then + ip netns exec "$listener_ns" nft flush ruleset + echo "SKIP: $msg, ip $r6flag rule failed" + mptcp_lib_fail_if_expected_feature "ip rule" +@@ -693,8 +712,7 @@ EOF + return + fi + +- ip -net "$listener_ns" route add local $local_addr/0 dev lo table 100 +- if [ $? -ne 0 ]; then ++ if ! ip -net "$listener_ns" route add local $local_addr/0 dev lo table 100; then + ip netns exec "$listener_ns" nft flush ruleset + ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100 + echo "SKIP: $msg, ip route add local $local_addr failed" +@@ -857,7 +875,7 @@ stop_if_error "Could not even run ping tests" + echo -n "INFO: Using loss of $tc_loss " + test "$tc_delay" -gt 0 && echo -n "delay $tc_delay ms " + +-reorder_delay=$(($tc_delay / 4)) ++reorder_delay=$((tc_delay / 4)) + + if [ -z "${tc_reorder}" ]; then + reorder1=$((RANDOM%10)) +-- +2.43.0 + diff --git a/queue-6.8/selftests-mptcp-use-operator-to-append-strings.patch b/queue-6.8/selftests-mptcp-use-operator-to-append-strings.patch new file mode 100644 index 00000000000..0c41407abd1 --- /dev/null +++ b/queue-6.8/selftests-mptcp-use-operator-to-append-strings.patch @@ -0,0 +1,259 @@ +From c9140490dcd89fe01dab7a0c51728b0698b1937f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Mar 2024 23:10:15 +0100 +Subject: selftests: mptcp: use += operator to append strings + +From: Geliang Tang + +[ Upstream commit e7c42bf4d320affe37337aa83ae0347832b3f568 ] + +This patch uses addition assignment operator (+=) to append strings +instead of duplicating the variable name in mptcp_connect.sh and +mptcp_join.sh. + +This can make the statements shorter. + +Note: in mptcp_connect.sh, add a local variable extra in do_transfer to +save the various extra warning logs, using += to append it. And add a +new variable tc_info to save various tc info, also using += to append it. +This can make the code more readable and prepare for the next commit. + +Signed-off-by: Geliang Tang +Reviewed-by: Matthieu Baerts (NGI0) +Signed-off-by: Matthieu Baerts (NGI0) +Link: https://lore.kernel.org/r/20240308-upstream-net-next-20240308-selftests-mptcp-unification-v1-8-4f42c347b653@kernel.org +Signed-off-by: Jakub Kicinski +Stable-dep-of: 7a1b3490f47e ("mptcp: don't account accept() of non-MPC client as fallback to TCP") +Signed-off-by: Sasha Levin +--- + .../selftests/net/mptcp/mptcp_connect.sh | 53 ++++++++++--------- + .../testing/selftests/net/mptcp/mptcp_join.sh | 30 +++++------ + 2 files changed, 43 insertions(+), 40 deletions(-) + +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh +index cce0e553976f2..f8e1b3daa7489 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh +@@ -332,15 +332,15 @@ do_transfer() + TEST_COUNT=$((TEST_COUNT+1)) + + if [ "$rcvbuf" -gt 0 ]; then +- extra_args="$extra_args -R $rcvbuf" ++ extra_args+=" -R $rcvbuf" + fi + + if [ "$sndbuf" -gt 0 ]; then +- extra_args="$extra_args -S $sndbuf" ++ extra_args+=" -S $sndbuf" + fi + + if [ -n "$testmode" ]; then +- extra_args="$extra_args -m $testmode" ++ extra_args+=" -m $testmode" + fi + + if [ -n "$extra_args" ] && $options_log; then +@@ -459,6 +459,7 @@ do_transfer() + mptcp_lib_check_transfer $cin $sout "file received by server" + rets=$? + ++ local extra="" + local stat_synrx_now_l + local stat_ackrx_now_l + local stat_cookietx_now +@@ -492,7 +493,7 @@ do_transfer() + "${stat_ackrx_now_l}" "${expect_ackrx}" 1>&2 + rets=1 + else +- printf "[ Note ] fallback due to TCP OoO" ++ extra+=" [ Note ] fallback due to TCP OoO" + fi + fi + +@@ -515,39 +516,41 @@ do_transfer() + fi + fi + +- if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then +- printf "[ OK ]" +- mptcp_lib_result_pass "${TEST_GROUP}: ${result_msg}" +- else +- mptcp_lib_result_fail "${TEST_GROUP}: ${result_msg}" +- fi +- + if [ $cookies -eq 2 ];then + if [ $stat_cookietx_last -ge $stat_cookietx_now ] ;then +- printf " WARN: CookieSent: did not advance" ++ extra+=" WARN: CookieSent: did not advance" + fi + if [ $stat_cookierx_last -ge $stat_cookierx_now ] ;then +- printf " WARN: CookieRecv: did not advance" ++ extra+=" WARN: CookieRecv: did not advance" + fi + else + if [ $stat_cookietx_last -ne $stat_cookietx_now ] ;then +- printf " WARN: CookieSent: changed" ++ extra+=" WARN: CookieSent: changed" + fi + if [ $stat_cookierx_last -ne $stat_cookierx_now ] ;then +- printf " WARN: CookieRecv: changed" ++ extra+=" WARN: CookieRecv: changed" + fi + fi + + if [ ${stat_synrx_now_l} -gt ${expect_synrx} ]; then +- printf " WARN: SYNRX: expect %d, got %d (probably retransmissions)" \ +- "${expect_synrx}" "${stat_synrx_now_l}" ++ extra+=" WARN: SYNRX: expect ${expect_synrx}," ++ extra+=" got ${stat_synrx_now_l} (probably retransmissions)" + fi + if [ ${stat_ackrx_now_l} -gt ${expect_ackrx} ]; then +- printf " WARN: ACKRX: expect %d, got %d (probably retransmissions)" \ +- "${expect_ackrx}" "${stat_ackrx_now_l}" ++ extra+=" WARN: ACKRX: expect ${expect_ackrx}," ++ extra+=" got ${stat_ackrx_now_l} (probably retransmissions)" ++ fi ++ ++ if [ $retc -eq 0 ] && [ $rets -eq 0 ]; then ++ printf "[ OK ]%s\n" "${extra:1}" ++ mptcp_lib_result_pass "${TEST_GROUP}: ${result_msg}" ++ else ++ if [ -n "${extra}" ]; then ++ printf "%s\n" "${extra:1}" ++ fi ++ mptcp_lib_result_fail "${TEST_GROUP}: ${result_msg}" + fi + +- echo + cat "$capout" + [ $retc -eq 0 ] && [ $rets -eq 0 ] + } +@@ -872,8 +875,8 @@ mptcp_lib_result_code "${ret}" "ping tests" + stop_if_error "Could not even run ping tests" + + [ -n "$tc_loss" ] && tc -net "$ns2" qdisc add dev ns2eth3 root netem loss random $tc_loss delay ${tc_delay}ms +-echo -n "INFO: Using loss of $tc_loss " +-test "$tc_delay" -gt 0 && echo -n "delay $tc_delay ms " ++tc_info="loss of $tc_loss " ++test "$tc_delay" -gt 0 && tc_info+="delay $tc_delay ms " + + reorder_delay=$((tc_delay / 4)) + +@@ -884,17 +887,17 @@ if [ -z "${tc_reorder}" ]; then + + if [ $reorder_delay -gt 0 ] && [ $reorder1 -lt 100 ] && [ $reorder2 -gt 0 ]; then + tc_reorder="reorder ${reorder1}% ${reorder2}%" +- echo -n "$tc_reorder with delay ${reorder_delay}ms " ++ tc_info+="$tc_reorder with delay ${reorder_delay}ms " + fi + elif [ "$tc_reorder" = "0" ];then + tc_reorder="" + elif [ "$reorder_delay" -gt 0 ];then + # reordering requires some delay + tc_reorder="reorder $tc_reorder" +- echo -n "$tc_reorder with delay ${reorder_delay}ms " ++ tc_info+="$tc_reorder with delay ${reorder_delay}ms " + fi + +-echo "on ns3eth4" ++echo "INFO: Using ${tc_info}on ns3eth4" + + tc -net "$ns3" qdisc add dev ns3eth4 root netem delay ${reorder_delay}ms $tc_reorder + +diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh +index 81f493ce58759..24be952b4d4a1 100755 +--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh ++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh +@@ -799,18 +799,18 @@ pm_nl_check_endpoint() + line="${line% }" + # the dump order is: address id flags port dev + [ -n "$addr" ] && expected_line="$addr" +- expected_line="$expected_line $id" +- [ -n "$_flags" ] && expected_line="$expected_line ${_flags//","/" "}" +- [ -n "$dev" ] && expected_line="$expected_line $dev" +- [ -n "$port" ] && expected_line="$expected_line $port" ++ expected_line+=" $id" ++ [ -n "$_flags" ] && expected_line+=" ${_flags//","/" "}" ++ [ -n "$dev" ] && expected_line+=" $dev" ++ [ -n "$port" ] && expected_line+=" $port" + else + line=$(ip netns exec $ns ./pm_nl_ctl get $_id) + # the dump order is: id flags dev address port + expected_line="$id" +- [ -n "$flags" ] && expected_line="$expected_line $flags" +- [ -n "$dev" ] && expected_line="$expected_line $dev" +- [ -n "$addr" ] && expected_line="$expected_line $addr" +- [ -n "$_port" ] && expected_line="$expected_line $_port" ++ [ -n "$flags" ] && expected_line+=" $flags" ++ [ -n "$dev" ] && expected_line+=" $dev" ++ [ -n "$addr" ] && expected_line+=" $addr" ++ [ -n "$_port" ] && expected_line+=" $_port" + fi + if [ "$line" = "$expected_line" ]; then + print_ok +@@ -1261,7 +1261,7 @@ chk_csum_nr() + print_check "sum" + count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr") + if [ "$count" != "$csum_ns1" ]; then +- extra_msg="$extra_msg ns1=$count" ++ extra_msg+=" ns1=$count" + fi + if [ -z "$count" ]; then + print_skip +@@ -1274,7 +1274,7 @@ chk_csum_nr() + print_check "csum" + count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr") + if [ "$count" != "$csum_ns2" ]; then +- extra_msg="$extra_msg ns2=$count" ++ extra_msg+=" ns2=$count" + fi + if [ -z "$count" ]; then + print_skip +@@ -1318,7 +1318,7 @@ chk_fail_nr() + print_check "ftx" + count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx") + if [ "$count" != "$fail_tx" ]; then +- extra_msg="$extra_msg,tx=$count" ++ extra_msg+=",tx=$count" + fi + if [ -z "$count" ]; then + print_skip +@@ -1332,7 +1332,7 @@ chk_fail_nr() + print_check "failrx" + count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx") + if [ "$count" != "$fail_rx" ]; then +- extra_msg="$extra_msg,rx=$count" ++ extra_msg+=",rx=$count" + fi + if [ -z "$count" ]; then + print_skip +@@ -1367,7 +1367,7 @@ chk_fclose_nr() + if [ -z "$count" ]; then + print_skip + elif [ "$count" != "$fclose_tx" ]; then +- extra_msg="$extra_msg,tx=$count" ++ extra_msg+=",tx=$count" + fail_test "got $count MP_FASTCLOSE[s] TX expected $fclose_tx" + else + print_ok +@@ -1378,7 +1378,7 @@ chk_fclose_nr() + if [ -z "$count" ]; then + print_skip + elif [ "$count" != "$fclose_rx" ]; then +- extra_msg="$extra_msg,rx=$count" ++ extra_msg+=",rx=$count" + fail_test "got $count MP_FASTCLOSE[s] RX expected $fclose_rx" + else + print_ok +@@ -1747,7 +1747,7 @@ chk_rm_nr() + count=$((count + cnt)) + if [ "$count" != "$rm_subflow_nr" ]; then + suffix="$count in [$rm_subflow_nr:$((rm_subflow_nr*2))]" +- extra_msg="$extra_msg simult" ++ extra_msg+=" simult" + fi + if [ $count -ge "$rm_subflow_nr" ] && \ + [ "$count" -le "$((rm_subflow_nr *2 ))" ]; then +-- +2.43.0 + diff --git a/queue-6.8/series b/queue-6.8/series index 76c872658c2..d7b3f502107 100644 --- a/queue-6.8/series +++ b/queue-6.8/series @@ -127,3 +127,20 @@ octeontx2-af-add-array-index-check.patch i40e-fix-i40e_count_filters-to-count-only-active-new-filters.patch i40e-fix-vf-may-be-used-uninitialized-in-this-function-warning.patch i40e-enforce-software-interrupt-during-busy-poll-exit.patch +scsi-sg-avoid-sg-device-teardown-race.patch +usb-typec-ucsi-check-for-notifications-after-init.patch +drm-amd-flush-gfxoff-requests-in-prepare-stage.patch +e1000e-minor-flow-correction-in-e1000_shutdown-funct.patch +e1000e-move-force-smbus-from-enable-ulp-function-to-.patch +bpf-introduce-in_sleepable-helper.patch +bpf-move-sleepable-flag-from-bpf_prog_aux-to-bpf_pro.patch +bpf-support-deferring-bpf_link-dealloc-to-after-rcu-.patch +mean_and_variance-drop-always-failing-tests.patch +net-ravb-let-ip-specific-receive-function-to-interro.patch +net-ravb-always-process-tx-descriptor-ring.patch +net-ravb-always-update-error-counters.patch +kvm-svm-use-unsigned-integers-when-dealing-with-asid.patch +kvm-svm-add-support-for-allowing-zero-sev-asids.patch +selftests-mptcp-connect-fix-shellcheck-warnings.patch +selftests-mptcp-use-operator-to-append-strings.patch +mptcp-don-t-account-accept-of-non-mpc-client-as-fall.patch diff --git a/queue-6.8/usb-typec-ucsi-check-for-notifications-after-init.patch b/queue-6.8/usb-typec-ucsi-check-for-notifications-after-init.patch new file mode 100644 index 00000000000..296a6e3777a --- /dev/null +++ b/queue-6.8/usb-typec-ucsi-check-for-notifications-after-init.patch @@ -0,0 +1,70 @@ +From 903bfed719f3e87b607956bbe4d855c71831a43a Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 20 Mar 2024 08:39:23 +0100 +Subject: usb: typec: ucsi: Check for notifications after init + +From: Christian A. Ehrhardt + +[ Upstream commit 808a8b9e0b87bbc72bcc1f7ddfe5d04746e7ce56 ] + +The completion notification for the final SET_NOTIFICATION_ENABLE +command during initialization can include a connector change +notification. However, at the time this completion notification is +processed, the ucsi struct is not ready to handle this notification. +As a result the notification is ignored and the controller +never sends an interrupt again. + +Re-check CCI for a pending connector state change after +initialization is complete. Adjust the corresponding debug +message accordingly. + +Fixes: 71a1fa0df2a3 ("usb: typec: ucsi: Store the notification mask") +Cc: stable@vger.kernel.org +Signed-off-by: Christian A. Ehrhardt +Reviewed-by: Heikki Krogerus +Tested-by: Neil Armstrong # on SM8550-QRD +Link: https://lore.kernel.org/r/20240320073927.1641788-3-lk@c--e.de +Signed-off-by: Greg Kroah-Hartman +Signed-off-by: Sasha Levin +--- + drivers/usb/typec/ucsi/ucsi.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index 0bfe5e906e543..96da828f556a9 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -962,7 +962,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num) + struct ucsi_connector *con = &ucsi->connector[num - 1]; + + if (!(ucsi->ntfy & UCSI_ENABLE_NTFY_CONNECTOR_CHANGE)) { +- dev_dbg(ucsi->dev, "Bogus connector change event\n"); ++ dev_dbg(ucsi->dev, "Early connector change event\n"); + return; + } + +@@ -1393,6 +1393,7 @@ static int ucsi_init(struct ucsi *ucsi) + { + struct ucsi_connector *con, *connector; + u64 command, ntfy; ++ u32 cci; + int ret; + int i; + +@@ -1445,6 +1446,13 @@ static int ucsi_init(struct ucsi *ucsi) + + ucsi->connector = connector; + ucsi->ntfy = ntfy; ++ ++ ret = ucsi->ops->read(ucsi, UCSI_CCI, &cci, sizeof(cci)); ++ if (ret) ++ return ret; ++ if (UCSI_CCI_CONNECTOR(READ_ONCE(cci))) ++ ucsi_connector_change(ucsi, cci); ++ + return 0; + + err_unregister: +-- +2.43.0 + -- 2.47.3