]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.8-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 8 Apr 2024 11:44:02 +0000 (13:44 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 8 Apr 2024 11:44:02 +0000 (13:44 +0200)
added patches:
bpf-put-uprobe-link-s-path-and-task-in-release-callback.patch
bpf-support-deferring-bpf_link-dealloc-to-after-rcu-grace-period.patch

queue-6.8/bpf-introduce-in_sleepable-helper.patch [deleted file]
queue-6.8/bpf-move-sleepable-flag-from-bpf_prog_aux-to-bpf_pro.patch [deleted file]
queue-6.8/bpf-put-uprobe-link-s-path-and-task-in-release-callback.patch [new file with mode: 0644]
queue-6.8/bpf-support-deferring-bpf_link-dealloc-to-after-rcu-grace-period.patch [moved from queue-6.8/bpf-support-deferring-bpf_link-dealloc-to-after-rcu-.patch with 85% similarity]
queue-6.8/series

diff --git a/queue-6.8/bpf-introduce-in_sleepable-helper.patch b/queue-6.8/bpf-introduce-in_sleepable-helper.patch
deleted file mode 100644 (file)
index c7fa49b..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-From 52f440367cd83e89dfbaffacc620b302f4af9e52 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 21 Feb 2024 17:25:18 +0100
-Subject: bpf: introduce in_sleepable() helper
-
-From: Benjamin Tissoires <bentiss@kernel.org>
-
-[ Upstream commit dfe6625df48ec54c6dc9b86d361f26962d09de88 ]
-
-No code change, but it'll allow to have only one place to change
-everything when we add in_sleepable in cur_state.
-
-Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
-Link: https://lore.kernel.org/r/20240221-hid-bpf-sleepable-v3-2-1fb378ca6301@kernel.org
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Stable-dep-of: 1a80dbcb2dba ("bpf: support deferring bpf_link dealloc to after RCU grace period")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- kernel/bpf/verifier.c | 17 +++++++++++------
- 1 file changed, 11 insertions(+), 6 deletions(-)
-
-diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
-index 19e575e6b7fe0..a3cfe6f458d3b 100644
---- a/kernel/bpf/verifier.c
-+++ b/kernel/bpf/verifier.c
-@@ -5211,6 +5211,11 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
-       return -EINVAL;
- }
-+static bool in_sleepable(struct bpf_verifier_env *env)
-+{
-+      return env->prog->aux->sleepable;
-+}
-+
- /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
-  * can dereference RCU protected pointers and result is PTR_TRUSTED.
-  */
-@@ -5218,7 +5223,7 @@ static bool in_rcu_cs(struct bpf_verifier_env *env)
- {
-       return env->cur_state->active_rcu_lock ||
-              env->cur_state->active_lock.ptr ||
--             !env->prog->aux->sleepable;
-+             !in_sleepable(env);
- }
- /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
-@@ -10099,7 +10104,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
-               return -EINVAL;
-       }
--      if (!env->prog->aux->sleepable && fn->might_sleep) {
-+      if (!in_sleepable(env) && fn->might_sleep) {
-               verbose(env, "helper call might sleep in a non-sleepable prog\n");
-               return -EINVAL;
-       }
-@@ -10129,7 +10134,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
-                       return -EINVAL;
-               }
--              if (env->prog->aux->sleepable && is_storage_get_function(func_id))
-+              if (in_sleepable(env) && is_storage_get_function(func_id))
-                       env->insn_aux_data[insn_idx].storage_get_func_atomic = true;
-       }
-@@ -11488,7 +11493,7 @@ static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
-                       return true;
-               fallthrough;
-       default:
--              return env->prog->aux->sleepable;
-+              return in_sleepable(env);
-       }
- }
-@@ -12009,7 +12014,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
-       }
-       sleepable = is_kfunc_sleepable(&meta);
--      if (sleepable && !env->prog->aux->sleepable) {
-+      if (sleepable && !in_sleepable(env)) {
-               verbose(env, "program must be sleepable to call sleepable kfunc %s\n", func_name);
-               return -EACCES;
-       }
-@@ -19575,7 +19580,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
-               }
-               if (is_storage_get_function(insn->imm)) {
--                      if (!env->prog->aux->sleepable ||
-+                      if (!in_sleepable(env) ||
-                           env->insn_aux_data[i + delta].storage_get_func_atomic)
-                               insn_buf[0] = BPF_MOV64_IMM(BPF_REG_5, (__force __s32)GFP_ATOMIC);
-                       else
--- 
-2.43.0
-
diff --git a/queue-6.8/bpf-move-sleepable-flag-from-bpf_prog_aux-to-bpf_pro.patch b/queue-6.8/bpf-move-sleepable-flag-from-bpf_prog_aux-to-bpf_pro.patch
deleted file mode 100644 (file)
index 9e88ebc..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-From 457a0367cb033532fe9d0947057bc49ac7ce393c Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 8 Mar 2024 16:47:39 -0800
-Subject: bpf: move sleepable flag from bpf_prog_aux to bpf_prog
-
-From: Andrii Nakryiko <andrii@kernel.org>
-
-[ Upstream commit 66c8473135c62f478301a0e5b3012f203562dfa6 ]
-
-prog->aux->sleepable is checked very frequently as part of (some) BPF
-program run hot paths. So this extra aux indirection seems wasteful and
-on busy systems might cause unnecessary memory cache misses.
-
-Let's move sleepable flag into prog itself to eliminate unnecessary
-pointer dereference.
-
-Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
-Acked-by: Jiri Olsa <jolsa@kernel.org>
-Message-ID: <20240309004739.2961431-1-andrii@kernel.org>
-Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Stable-dep-of: 1a80dbcb2dba ("bpf: support deferring bpf_link dealloc to after RCU grace period")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- include/linux/bpf.h            |  8 ++++----
- kernel/bpf/bpf_iter.c          |  4 ++--
- kernel/bpf/core.c              |  2 +-
- kernel/bpf/syscall.c           |  6 +++---
- kernel/bpf/trampoline.c        |  4 ++--
- kernel/bpf/verifier.c          | 12 ++++++------
- kernel/events/core.c           |  2 +-
- kernel/trace/bpf_trace.c       |  2 +-
- net/bpf/bpf_dummy_struct_ops.c |  2 +-
- 9 files changed, 21 insertions(+), 21 deletions(-)
-
-diff --git a/include/linux/bpf.h b/include/linux/bpf.h
-index e30100597d0a9..1b6b590451284 100644
---- a/include/linux/bpf.h
-+++ b/include/linux/bpf.h
-@@ -1451,7 +1451,6 @@ struct bpf_prog_aux {
-       bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
-       bool attach_tracing_prog; /* true if tracing another tracing program */
-       bool func_proto_unreliable;
--      bool sleepable;
-       bool tail_call_reachable;
-       bool xdp_has_frags;
-       bool exception_cb;
-@@ -1535,7 +1534,8 @@ struct bpf_prog {
-                               enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */
-                               call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */
-                               call_get_func_ip:1, /* Do we call get_func_ip() */
--                              tstamp_type_access:1; /* Accessed __sk_buff->tstamp_type */
-+                              tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */
-+                              sleepable:1;    /* BPF program is sleepable */
-       enum bpf_prog_type      type;           /* Type of BPF program */
-       enum bpf_attach_type    expected_attach_type; /* For some prog types */
-       u32                     len;            /* Number of filter blocks */
-@@ -2029,14 +2029,14 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
-       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
-       item = &array->items[0];
-       while ((prog = READ_ONCE(item->prog))) {
--              if (!prog->aux->sleepable)
-+              if (!prog->sleepable)
-                       rcu_read_lock();
-               run_ctx.bpf_cookie = item->bpf_cookie;
-               ret &= run_prog(prog, ctx);
-               item++;
--              if (!prog->aux->sleepable)
-+              if (!prog->sleepable)
-                       rcu_read_unlock();
-       }
-       bpf_reset_run_ctx(old_run_ctx);
-diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
-index 0fae791641870..112581cf97e7f 100644
---- a/kernel/bpf/bpf_iter.c
-+++ b/kernel/bpf/bpf_iter.c
-@@ -548,7 +548,7 @@ int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
-               return -ENOENT;
-       /* Only allow sleepable program for resched-able iterator */
--      if (prog->aux->sleepable && !bpf_iter_target_support_resched(tinfo))
-+      if (prog->sleepable && !bpf_iter_target_support_resched(tinfo))
-               return -EINVAL;
-       link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
-@@ -697,7 +697,7 @@ int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx)
-       struct bpf_run_ctx run_ctx, *old_run_ctx;
-       int ret;
--      if (prog->aux->sleepable) {
-+      if (prog->sleepable) {
-               rcu_read_lock_trace();
-               migrate_disable();
-               might_fault();
-diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
-index 026627226ec48..5fe749de7fde6 100644
---- a/kernel/bpf/core.c
-+++ b/kernel/bpf/core.c
-@@ -2700,7 +2700,7 @@ void __bpf_free_used_maps(struct bpf_prog_aux *aux,
-       bool sleepable;
-       u32 i;
--      sleepable = aux->sleepable;
-+      sleepable = aux->prog->sleepable;
-       for (i = 0; i < len; i++) {
-               map = used_maps[i];
-               if (map->ops->map_poke_untrack)
-diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
-index a1f18681721c7..11a8ea854c1d5 100644
---- a/kernel/bpf/syscall.c
-+++ b/kernel/bpf/syscall.c
-@@ -2160,7 +2160,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
-               btf_put(prog->aux->attach_btf);
-       if (deferred) {
--              if (prog->aux->sleepable)
-+              if (prog->sleepable)
-                       call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
-               else
-                       call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
-@@ -2691,11 +2691,11 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
-       }
-       prog->expected_attach_type = attr->expected_attach_type;
-+      prog->sleepable = !!(attr->prog_flags & BPF_F_SLEEPABLE);
-       prog->aux->attach_btf = attach_btf;
-       prog->aux->attach_btf_id = attr->attach_btf_id;
-       prog->aux->dst_prog = dst_prog;
-       prog->aux->dev_bound = !!attr->prog_ifindex;
--      prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
-       prog->aux->xdp_has_frags = attr->prog_flags & BPF_F_XDP_HAS_FRAGS;
-       err = security_bpf_prog_alloc(prog->aux);
-@@ -5394,7 +5394,7 @@ static int bpf_prog_bind_map(union bpf_attr *attr)
-       /* The bpf program will not access the bpf map, but for the sake of
-        * simplicity, increase sleepable_refcnt for sleepable program as well.
-        */
--      if (prog->aux->sleepable)
-+      if (prog->sleepable)
-               atomic64_inc(&map->sleepable_refcnt);
-       memcpy(used_maps_new, used_maps_old,
-              sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
-diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
-index d382f5ebe06c8..db7599c59c78a 100644
---- a/kernel/bpf/trampoline.c
-+++ b/kernel/bpf/trampoline.c
-@@ -1014,7 +1014,7 @@ void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
- bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
- {
--      bool sleepable = prog->aux->sleepable;
-+      bool sleepable = prog->sleepable;
-       if (bpf_prog_check_recur(prog))
-               return sleepable ? __bpf_prog_enter_sleepable_recur :
-@@ -1029,7 +1029,7 @@ bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog)
- bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog)
- {
--      bool sleepable = prog->aux->sleepable;
-+      bool sleepable = prog->sleepable;
-       if (bpf_prog_check_recur(prog))
-               return sleepable ? __bpf_prog_exit_sleepable_recur :
-diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
-index a3cfe6f458d3b..af3ef7a5a013c 100644
---- a/kernel/bpf/verifier.c
-+++ b/kernel/bpf/verifier.c
-@@ -5213,7 +5213,7 @@ static int map_kptr_match_type(struct bpf_verifier_env *env,
- static bool in_sleepable(struct bpf_verifier_env *env)
- {
--      return env->prog->aux->sleepable;
-+      return env->prog->sleepable;
- }
- /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock()
-@@ -17919,7 +17919,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
-               return -EINVAL;
-       }
--      if (prog->aux->sleepable)
-+      if (prog->sleepable)
-               switch (map->map_type) {
-               case BPF_MAP_TYPE_HASH:
-               case BPF_MAP_TYPE_LRU_HASH:
-@@ -18104,7 +18104,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
-                               return -E2BIG;
-                       }
--                      if (env->prog->aux->sleepable)
-+                      if (env->prog->sleepable)
-                               atomic64_inc(&map->sleepable_refcnt);
-                       /* hold the map. If the program is rejected by verifier,
-                        * the map will be released by release_maps() or it
-@@ -20552,7 +20552,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
-                       }
-               }
--              if (prog->aux->sleepable) {
-+              if (prog->sleepable) {
-                       ret = -EINVAL;
-                       switch (prog->type) {
-                       case BPF_PROG_TYPE_TRACING:
-@@ -20663,14 +20663,14 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
-       u64 key;
-       if (prog->type == BPF_PROG_TYPE_SYSCALL) {
--              if (prog->aux->sleepable)
-+              if (prog->sleepable)
-                       /* attach_btf_id checked to be zero already */
-                       return 0;
-               verbose(env, "Syscall programs can only be sleepable\n");
-               return -EINVAL;
-       }
--      if (prog->aux->sleepable && !can_be_sleepable(prog)) {
-+      if (prog->sleepable && !can_be_sleepable(prog)) {
-               verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
-               return -EINVAL;
-       }
-diff --git a/kernel/events/core.c b/kernel/events/core.c
-index f0f0f71213a1d..b4305b3ae84fb 100644
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -10557,7 +10557,7 @@ int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog,
-           (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT))
-               return -EINVAL;
--      if (prog->type == BPF_PROG_TYPE_KPROBE && prog->aux->sleepable && !is_uprobe)
-+      if (prog->type == BPF_PROG_TYPE_KPROBE && prog->sleepable && !is_uprobe)
-               /* only uprobe programs are allowed to be sleepable */
-               return -EINVAL;
-diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
-index 7ac6c52b25ebc..22d555a42404f 100644
---- a/kernel/trace/bpf_trace.c
-+++ b/kernel/trace/bpf_trace.c
-@@ -3241,7 +3241,7 @@ static int uprobe_prog_run(struct bpf_uprobe *uprobe,
-               .uprobe = uprobe,
-       };
-       struct bpf_prog *prog = link->link.prog;
--      bool sleepable = prog->aux->sleepable;
-+      bool sleepable = prog->sleepable;
-       struct bpf_run_ctx *old_run_ctx;
-       int err = 0;
-diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
-index 8906f7bdf4a92..0b1083d7ca03a 100644
---- a/net/bpf/bpf_dummy_struct_ops.c
-+++ b/net/bpf/bpf_dummy_struct_ops.c
-@@ -169,7 +169,7 @@ static int bpf_dummy_ops_check_member(const struct btf_type *t,
-       case offsetof(struct bpf_dummy_ops, test_sleepable):
-               break;
-       default:
--              if (prog->aux->sleepable)
-+              if (prog->sleepable)
-                       return -EINVAL;
-       }
--- 
-2.43.0
-
diff --git a/queue-6.8/bpf-put-uprobe-link-s-path-and-task-in-release-callback.patch b/queue-6.8/bpf-put-uprobe-link-s-path-and-task-in-release-callback.patch
new file mode 100644 (file)
index 0000000..a1d1eee
--- /dev/null
@@ -0,0 +1,51 @@
+From e9c856cabefb71d47b2eeb197f72c9c88e9b45b0 Mon Sep 17 00:00:00 2001
+From: Andrii Nakryiko <andrii@kernel.org>
+Date: Wed, 27 Mar 2024 22:24:25 -0700
+Subject: bpf: put uprobe link's path and task in release callback
+
+From: Andrii Nakryiko <andrii@kernel.org>
+
+commit e9c856cabefb71d47b2eeb197f72c9c88e9b45b0 upstream.
+
+There is no need to delay putting either path or task to deallocation
+step. It can be done right after bpf_uprobe_unregister. Between release
+and dealloc, there could be still some running BPF programs, but they
+don't access either task or path, only data in link->uprobes, so it is
+safe to do.
+
+On the other hand, doing path_put() in dealloc callback makes this
+dealloc sleepable because path_put() itself might sleep. Which is
+problematic due to the need to call uprobe's dealloc through call_rcu(),
+which is what is done in the next bug fix patch. So solve the problem by
+releasing these resources early.
+
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/r/20240328052426.3042617-1-andrii@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/bpf_trace.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -3142,6 +3142,9 @@ static void bpf_uprobe_multi_link_releas
+       umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+       bpf_uprobe_unregister(&umulti_link->path, umulti_link->uprobes, umulti_link->cnt);
++      if (umulti_link->task)
++              put_task_struct(umulti_link->task);
++      path_put(&umulti_link->path);
+ }
+ static void bpf_uprobe_multi_link_dealloc(struct bpf_link *link)
+@@ -3149,9 +3152,6 @@ static void bpf_uprobe_multi_link_deallo
+       struct bpf_uprobe_multi_link *umulti_link;
+       umulti_link = container_of(link, struct bpf_uprobe_multi_link, link);
+-      if (umulti_link->task)
+-              put_task_struct(umulti_link->task);
+-      path_put(&umulti_link->path);
+       kvfree(umulti_link->uprobes);
+       kfree(umulti_link);
+ }
similarity index 85%
rename from queue-6.8/bpf-support-deferring-bpf_link-dealloc-to-after-rcu-.patch
rename to queue-6.8/bpf-support-deferring-bpf_link-dealloc-to-after-rcu-grace-period.patch
index f7b4a76e6b5a066d8ccf0926129a3631e6a883c8..1b22aa5e52ac2fc6265ea3878c973199d037b7d6 100644 (file)
@@ -1,11 +1,11 @@
-From a37ac5c2b029abcd8bbd8fa268ad0d3004da8b23 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
+From 1a80dbcb2dbaf6e4c216e62e30fa7d3daa8001ce Mon Sep 17 00:00:00 2001
+From: Andrii Nakryiko <andrii@kernel.org>
 Date: Wed, 27 Mar 2024 22:24:26 -0700
 Subject: bpf: support deferring bpf_link dealloc to after RCU grace period
 
 From: Andrii Nakryiko <andrii@kernel.org>
 
-[ Upstream commit 1a80dbcb2dbaf6e4c216e62e30fa7d3daa8001ce ]
+commit 1a80dbcb2dbaf6e4c216e62e30fa7d3daa8001ce upstream.
 
 BPF link for some program types is passed as a "context" which can be
 used by those BPF programs to look up additional information. E.g., for
@@ -43,15 +43,13 @@ Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
 Acked-by: Jiri Olsa <jolsa@kernel.org>
 Link: https://lore.kernel.org/r/20240328052426.3042617-2-andrii@kernel.org
 Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 ---
- include/linux/bpf.h      | 16 +++++++++++++++-
- kernel/bpf/syscall.c     | 35 ++++++++++++++++++++++++++++++++---
- kernel/trace/bpf_trace.c |  4 ++--
+ include/linux/bpf.h      |   16 +++++++++++++++-
+ kernel/bpf/syscall.c     |   35 ++++++++++++++++++++++++++++++++---
+ kernel/trace/bpf_trace.c |    4 ++--
  3 files changed, 49 insertions(+), 6 deletions(-)
 
-diff --git a/include/linux/bpf.h b/include/linux/bpf.h
-index 1b6b590451284..893a7ec57bf25 100644
 --- a/include/linux/bpf.h
 +++ b/include/linux/bpf.h
 @@ -1568,12 +1568,26 @@ struct bpf_link {
@@ -82,8 +80,6 @@ index 1b6b590451284..893a7ec57bf25 100644
        int (*detach)(struct bpf_link *link);
        int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
                           struct bpf_prog *old_prog);
-diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
-index 11a8ea854c1d5..83ec7f788a638 100644
 --- a/kernel/bpf/syscall.c
 +++ b/kernel/bpf/syscall.c
 @@ -2895,17 +2895,46 @@ void bpf_link_inc(struct bpf_link *link)
@@ -135,7 +131,7 @@ index 11a8ea854c1d5..83ec7f788a638 100644
  }
  
  static void bpf_link_put_deferred(struct work_struct *work)
-@@ -3415,7 +3444,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
+@@ -3415,7 +3444,7 @@ static int bpf_raw_tp_link_fill_link_inf
  
  static const struct bpf_link_ops bpf_raw_tp_link_lops = {
        .release = bpf_raw_tp_link_release,
@@ -144,11 +140,9 @@ index 11a8ea854c1d5..83ec7f788a638 100644
        .show_fdinfo = bpf_raw_tp_link_show_fdinfo,
        .fill_link_info = bpf_raw_tp_link_fill_link_info,
  };
-diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
-index 22d555a42404f..c7f9236eed628 100644
 --- a/kernel/trace/bpf_trace.c
 +++ b/kernel/trace/bpf_trace.c
-@@ -2713,7 +2713,7 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
+@@ -2713,7 +2713,7 @@ static int bpf_kprobe_multi_link_fill_li
  
  static const struct bpf_link_ops bpf_kprobe_multi_link_lops = {
        .release = bpf_kprobe_multi_link_release,
@@ -157,7 +151,7 @@ index 22d555a42404f..c7f9236eed628 100644
        .fill_link_info = bpf_kprobe_multi_link_fill_link_info,
  };
  
-@@ -3227,7 +3227,7 @@ static int bpf_uprobe_multi_link_fill_link_info(const struct bpf_link *link,
+@@ -3227,7 +3227,7 @@ static int bpf_uprobe_multi_link_fill_li
  
  static const struct bpf_link_ops bpf_uprobe_multi_link_lops = {
        .release = bpf_uprobe_multi_link_release,
@@ -166,6 +160,3 @@ index 22d555a42404f..c7f9236eed628 100644
        .fill_link_info = bpf_uprobe_multi_link_fill_link_info,
  };
  
--- 
-2.43.0
-
index ae58f373d9fc063e69e5bcbd529cc5fff05aca2c..cd45852a2bfb680c20015065438621788f10e2cc 100644 (file)
@@ -132,9 +132,6 @@ usb-typec-ucsi-check-for-notifications-after-init.patch
 drm-amd-flush-gfxoff-requests-in-prepare-stage.patch
 e1000e-minor-flow-correction-in-e1000_shutdown-funct.patch
 e1000e-move-force-smbus-from-enable-ulp-function-to-.patch
-bpf-introduce-in_sleepable-helper.patch
-bpf-move-sleepable-flag-from-bpf_prog_aux-to-bpf_pro.patch
-bpf-support-deferring-bpf_link-dealloc-to-after-rcu-.patch
 mean_and_variance-drop-always-failing-tests.patch
 net-ravb-let-ip-specific-receive-function-to-interro.patch
 net-ravb-always-process-tx-descriptor-ring.patch
@@ -267,3 +264,5 @@ drm-i915-gt-enable-only-one-ccs-for-compute-workload.patch
 drm-xe-use-ring-ops-tlb-invalidation-for-rebinds.patch
 drm-xe-rework-rebinding.patch
 revert-x86-mpparse-register-apic-address-only-once.patch
+bpf-put-uprobe-link-s-path-and-task-in-release-callback.patch
+bpf-support-deferring-bpf_link-dealloc-to-after-rcu-grace-period.patch