From: Sasha Levin Date: Mon, 18 Mar 2024 00:20:45 +0000 (-0400) Subject: Fixes for 5.10 X-Git-Tag: v6.8.2~102 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=824e13950c8b9063718e18f562c72ac67f7cc616;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 5.10 Signed-off-by: Sasha Levin --- diff --git a/queue-5.10/bpf-defer-the-free-of-inner-map-when-necessary.patch b/queue-5.10/bpf-defer-the-free-of-inner-map-when-necessary.patch new file mode 100644 index 00000000000..0b6b17f673e --- /dev/null +++ b/queue-5.10/bpf-defer-the-free-of-inner-map-when-necessary.patch @@ -0,0 +1,133 @@ +From 3f2c2da3768b2cabf14a051cfc5377348ab776f8 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 11 Mar 2024 17:44:35 -0700 +Subject: bpf: Defer the free of inner map when necessary + +From: Hou Tao + +[ Upstream commit 876673364161da50eed6b472d746ef88242b2368 ] + +When updating or deleting an inner map in map array or map htab, the map +may still be accessed by non-sleepable program or sleepable program. +However bpf_map_fd_put_ptr() decreases the ref-counter of the inner map +directly through bpf_map_put(), if the ref-counter is the last one +(which is true for most cases), the inner map will be freed by +ops->map_free() in a kworker. But for now, most .map_free() callbacks +don't use synchronize_rcu() or its variants to wait for the elapse of a +RCU grace period, so after the invocation of ops->map_free completes, +the bpf program which is accessing the inner map may incur +use-after-free problem. + +Fix the free of inner map by invoking bpf_map_free_deferred() after both +one RCU grace period and one tasks trace RCU grace period if the inner +map has been removed from the outer map before. The deferment is +accomplished by using call_rcu() or call_rcu_tasks_trace() when +releasing the last ref-counter of bpf map. The newly-added rcu_head +field in bpf_map shares the same storage space with work field to +reduce the size of bpf_map. + +Fixes: bba1dc0b55ac ("bpf: Remove redundant synchronize_rcu.") +Fixes: 638e4b825d52 ("bpf: Allows per-cpu maps and map-in-map in sleepable programs") +Signed-off-by: Hou Tao +Link: https://lore.kernel.org/r/20231204140425.1480317-5-houtao@huaweicloud.com +Signed-off-by: Alexei Starovoitov +Signed-off-by: Sasha Levin +(cherry picked from commit 62fca83303d608ad4fec3f7428c8685680bb01b0) +Signed-off-by: Robert Kolchmeyer +Signed-off-by: Sasha Levin +--- + include/linux/bpf.h | 7 ++++++- + kernel/bpf/map_in_map.c | 11 ++++++++--- + kernel/bpf/syscall.c | 26 ++++++++++++++++++++++++-- + 3 files changed, 38 insertions(+), 6 deletions(-) + +diff --git a/include/linux/bpf.h b/include/linux/bpf.h +index bfdf40be5360a..a75faf437e750 100644 +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -175,9 +175,14 @@ struct bpf_map { + */ + atomic64_t refcnt ____cacheline_aligned; + atomic64_t usercnt; +- struct work_struct work; ++ /* rcu is used before freeing and work is only used during freeing */ ++ union { ++ struct work_struct work; ++ struct rcu_head rcu; ++ }; + struct mutex freeze_mutex; + atomic64_t writecnt; ++ bool free_after_mult_rcu_gp; + }; + + static inline bool map_value_has_spin_lock(const struct bpf_map *map) +diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c +index 0cf4cb6858105..caa1a17cbae15 100644 +--- a/kernel/bpf/map_in_map.c ++++ b/kernel/bpf/map_in_map.c +@@ -102,10 +102,15 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map, + + void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) + { +- /* ptr->ops->map_free() has to go through one +- * rcu grace period by itself. ++ struct bpf_map *inner_map = ptr; ++ ++ /* The inner map may still be used by both non-sleepable and sleepable ++ * bpf program, so free it after one RCU grace period and one tasks ++ * trace RCU grace period. + */ +- bpf_map_put(ptr); ++ if (need_defer) ++ WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true); ++ bpf_map_put(inner_map); + } + + u32 bpf_map_fd_sys_lookup_elem(void *ptr) +diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c +index 16affa09db5c9..e1bee8cd34044 100644 +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -493,6 +493,25 @@ static void bpf_map_put_uref(struct bpf_map *map) + } + } + ++static void bpf_map_free_in_work(struct bpf_map *map) ++{ ++ INIT_WORK(&map->work, bpf_map_free_deferred); ++ schedule_work(&map->work); ++} ++ ++static void bpf_map_free_rcu_gp(struct rcu_head *rcu) ++{ ++ bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu)); ++} ++ ++static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu) ++{ ++ if (rcu_trace_implies_rcu_gp()) ++ bpf_map_free_rcu_gp(rcu); ++ else ++ call_rcu(rcu, bpf_map_free_rcu_gp); ++} ++ + /* decrement map refcnt and schedule it for freeing via workqueue + * (unrelying map implementation ops->map_free() might sleep) + */ +@@ -502,8 +521,11 @@ static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) + /* bpf_map_free_id() must be called first */ + bpf_map_free_id(map, do_idr_lock); + btf_put(map->btf); +- INIT_WORK(&map->work, bpf_map_free_deferred); +- schedule_work(&map->work); ++ ++ if (READ_ONCE(map->free_after_mult_rcu_gp)) ++ call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp); ++ else ++ bpf_map_free_in_work(map); + } + } + +-- +2.43.0 + diff --git a/queue-5.10/io_uring-drop-any-code-related-to-scm_rights.patch b/queue-5.10/io_uring-drop-any-code-related-to-scm_rights.patch new file mode 100644 index 00000000000..0d888f98eb7 --- /dev/null +++ b/queue-5.10/io_uring-drop-any-code-related-to-scm_rights.patch @@ -0,0 +1,300 @@ +From a458ebb6f7f776a97a92a459297a763cddd611fb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 17:59:01 -0600 +Subject: io_uring: drop any code related to SCM_RIGHTS + +From: Jens Axboe + +Commit 6e5e6d274956305f1fc0340522b38f5f5be74bdb upstream. + +This is dead code after we dropped support for passing io_uring fds +over SCM_RIGHTS, get rid of it. + +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + io_uring/io_uring.c | 211 +------------------------------------------- + 1 file changed, 3 insertions(+), 208 deletions(-) + +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c +index a80c808d3a0ef..67a355f27ad80 100644 +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -62,7 +62,6 @@ + #include + #include + #include +-#include + #include + #include + #include +@@ -440,9 +439,6 @@ struct io_ring_ctx { + + /* Keep this last, we don't need it for the fast path */ + struct { +- #if defined(CONFIG_UNIX) +- struct socket *ring_sock; +- #endif + /* hashed buffered write serialization */ + struct io_wq_hash *hash_map; + +@@ -7976,15 +7972,6 @@ static void io_free_file_tables(struct io_file_table *table) + + static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) + { +-#if defined(CONFIG_UNIX) +- if (ctx->ring_sock) { +- struct sock *sock = ctx->ring_sock->sk; +- struct sk_buff *skb; +- +- while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL) +- kfree_skb(skb); +- } +-#else + int i; + + for (i = 0; i < ctx->nr_user_files; i++) { +@@ -7994,7 +7981,6 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) + if (file) + fput(file); + } +-#endif + io_free_file_tables(&ctx->file_table); + io_rsrc_data_free(ctx->file_data); + ctx->file_data = NULL; +@@ -8146,170 +8132,11 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, + return sqd; + } + +-#if defined(CONFIG_UNIX) +-/* +- * Ensure the UNIX gc is aware of our file set, so we are certain that +- * the io_uring can be safely unregistered on process exit, even if we have +- * loops in the file referencing. +- */ +-static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) +-{ +- struct sock *sk = ctx->ring_sock->sk; +- struct scm_fp_list *fpl; +- struct sk_buff *skb; +- int i, nr_files; +- +- fpl = kzalloc(sizeof(*fpl), GFP_KERNEL); +- if (!fpl) +- return -ENOMEM; +- +- skb = alloc_skb(0, GFP_KERNEL); +- if (!skb) { +- kfree(fpl); +- return -ENOMEM; +- } +- +- skb->sk = sk; +- skb->scm_io_uring = 1; +- +- nr_files = 0; +- fpl->user = get_uid(current_user()); +- for (i = 0; i < nr; i++) { +- struct file *file = io_file_from_index(ctx, i + offset); +- +- if (!file) +- continue; +- fpl->fp[nr_files] = get_file(file); +- unix_inflight(fpl->user, fpl->fp[nr_files]); +- nr_files++; +- } +- +- if (nr_files) { +- fpl->max = SCM_MAX_FD; +- fpl->count = nr_files; +- UNIXCB(skb).fp = fpl; +- skb->destructor = unix_destruct_scm; +- refcount_add(skb->truesize, &sk->sk_wmem_alloc); +- skb_queue_head(&sk->sk_receive_queue, skb); +- +- for (i = 0; i < nr; i++) { +- struct file *file = io_file_from_index(ctx, i + offset); +- +- if (file) +- fput(file); +- } +- } else { +- kfree_skb(skb); +- free_uid(fpl->user); +- kfree(fpl); +- } +- +- return 0; +-} +- +-/* +- * If UNIX sockets are enabled, fd passing can cause a reference cycle which +- * causes regular reference counting to break down. We rely on the UNIX +- * garbage collection to take care of this problem for us. +- */ +-static int io_sqe_files_scm(struct io_ring_ctx *ctx) +-{ +- unsigned left, total; +- int ret = 0; +- +- total = 0; +- left = ctx->nr_user_files; +- while (left) { +- unsigned this_files = min_t(unsigned, left, SCM_MAX_FD); +- +- ret = __io_sqe_files_scm(ctx, this_files, total); +- if (ret) +- break; +- left -= this_files; +- total += this_files; +- } +- +- if (!ret) +- return 0; +- +- while (total < ctx->nr_user_files) { +- struct file *file = io_file_from_index(ctx, total); +- +- if (file) +- fput(file); +- total++; +- } +- +- return ret; +-} +-#else +-static int io_sqe_files_scm(struct io_ring_ctx *ctx) +-{ +- return 0; +-} +-#endif +- + static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) + { + struct file *file = prsrc->file; +-#if defined(CONFIG_UNIX) +- struct sock *sock = ctx->ring_sock->sk; +- struct sk_buff_head list, *head = &sock->sk_receive_queue; +- struct sk_buff *skb; +- int i; + +- __skb_queue_head_init(&list); +- +- /* +- * Find the skb that holds this file in its SCM_RIGHTS. When found, +- * remove this entry and rearrange the file array. +- */ +- skb = skb_dequeue(head); +- while (skb) { +- struct scm_fp_list *fp; +- +- fp = UNIXCB(skb).fp; +- for (i = 0; i < fp->count; i++) { +- int left; +- +- if (fp->fp[i] != file) +- continue; +- +- unix_notinflight(fp->user, fp->fp[i]); +- left = fp->count - 1 - i; +- if (left) { +- memmove(&fp->fp[i], &fp->fp[i + 1], +- left * sizeof(struct file *)); +- } +- fp->count--; +- if (!fp->count) { +- kfree_skb(skb); +- skb = NULL; +- } else { +- __skb_queue_tail(&list, skb); +- } +- fput(file); +- file = NULL; +- break; +- } +- +- if (!file) +- break; +- +- __skb_queue_tail(&list, skb); +- +- skb = skb_dequeue(head); +- } +- +- if (skb_peek(&list)) { +- spin_lock_irq(&head->lock); +- while ((skb = __skb_dequeue(&list)) != NULL) +- __skb_queue_tail(head, skb); +- spin_unlock_irq(&head->lock); +- } +-#else + fput(file); +-#endif + } + + static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) +@@ -8420,12 +8247,6 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, + io_fixed_file_set(io_fixed_file_slot(&ctx->file_table, i), file); + } + +- ret = io_sqe_files_scm(ctx); +- if (ret) { +- __io_sqe_files_unregister(ctx); +- return ret; +- } +- + io_rsrc_node_switch(ctx, NULL); + return ret; + out_fput: +@@ -9382,12 +9203,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) + WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); + WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist)); + +-#if defined(CONFIG_UNIX) +- if (ctx->ring_sock) { +- ctx->ring_sock->file = NULL; /* so that iput() is called */ +- sock_release(ctx->ring_sock); +- } +-#endif + WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); + + if (ctx->mm_account) { +@@ -10329,32 +10144,12 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file) + /* + * Allocate an anonymous fd, this is what constitutes the application + * visible backing of an io_uring instance. The application mmaps this +- * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled, +- * we have to tie this fd to a socket for file garbage collection purposes. ++ * fd to gain access to the SQ/CQ ring details. + */ + static struct file *io_uring_get_file(struct io_ring_ctx *ctx) + { +- struct file *file; +-#if defined(CONFIG_UNIX) +- int ret; +- +- ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP, +- &ctx->ring_sock); +- if (ret) +- return ERR_PTR(ret); +-#endif +- +- file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx, +- O_RDWR | O_CLOEXEC); +-#if defined(CONFIG_UNIX) +- if (IS_ERR(file)) { +- sock_release(ctx->ring_sock); +- ctx->ring_sock = NULL; +- } else { +- ctx->ring_sock->file = file; +- } +-#endif +- return file; ++ return anon_inode_getfile("[io_uring]", &io_uring_fops, ctx, ++ O_RDWR | O_CLOEXEC); + } + + static int io_uring_create(unsigned entries, struct io_uring_params *p, +-- +2.43.0 + diff --git a/queue-5.10/io_uring-unix-drop-usage-of-io_uring-socket.patch b/queue-5.10/io_uring-unix-drop-usage-of-io_uring-socket.patch new file mode 100644 index 00000000000..2637b999c93 --- /dev/null +++ b/queue-5.10/io_uring-unix-drop-usage-of-io_uring-socket.patch @@ -0,0 +1,131 @@ +From 931f025b4ea6cdf87bad8feb503ee592a7de87fb Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 13 Mar 2024 17:54:49 -0600 +Subject: io_uring/unix: drop usage of io_uring socket + +From: Jens Axboe + +Commit a4104821ad651d8a0b374f0b2474c345bbb42f82 upstream. + +Since we no longer allow sending io_uring fds over SCM_RIGHTS, move to +using io_is_uring_fops() to detect whether this is a io_uring fd or not. +With that done, kill off io_uring_get_socket() as nobody calls it +anymore. + +This is in preparation to yanking out the rest of the core related to +unix gc with io_uring. + +Signed-off-by: Jens Axboe +Signed-off-by: Sasha Levin +--- + include/linux/io_uring.h | 10 +++++----- + io_uring/io_uring.c | 18 +++++------------- + net/core/scm.c | 2 +- + net/unix/scm.c | 4 +--- + 4 files changed, 12 insertions(+), 22 deletions(-) + +diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h +index 649a4d7c241bc..55d09f594cd14 100644 +--- a/include/linux/io_uring.h ++++ b/include/linux/io_uring.h +@@ -6,9 +6,9 @@ + #include + + #if defined(CONFIG_IO_URING) +-struct sock *io_uring_get_socket(struct file *file); + void __io_uring_cancel(bool cancel_all); + void __io_uring_free(struct task_struct *tsk); ++bool io_is_uring_fops(struct file *file); + + static inline void io_uring_files_cancel(void) + { +@@ -26,10 +26,6 @@ static inline void io_uring_free(struct task_struct *tsk) + __io_uring_free(tsk); + } + #else +-static inline struct sock *io_uring_get_socket(struct file *file) +-{ +- return NULL; +-} + static inline void io_uring_task_cancel(void) + { + } +@@ -39,6 +35,10 @@ static inline void io_uring_files_cancel(void) + static inline void io_uring_free(struct task_struct *tsk) + { + } ++static inline bool io_is_uring_fops(struct file *file) ++{ ++ return false; ++} + #endif + + #endif +diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c +index 936abc6ee450c..a80c808d3a0ef 100644 +--- a/io_uring/io_uring.c ++++ b/io_uring/io_uring.c +@@ -1113,19 +1113,6 @@ static struct kmem_cache *req_cachep; + + static const struct file_operations io_uring_fops; + +-struct sock *io_uring_get_socket(struct file *file) +-{ +-#if defined(CONFIG_UNIX) +- if (file->f_op == &io_uring_fops) { +- struct io_ring_ctx *ctx = file->private_data; +- +- return ctx->ring_sock->sk; +- } +-#endif +- return NULL; +-} +-EXPORT_SYMBOL(io_uring_get_socket); +- + static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) + { + if (!*locked) { +@@ -10275,6 +10262,11 @@ static const struct file_operations io_uring_fops = { + #endif + }; + ++bool io_is_uring_fops(struct file *file) ++{ ++ return file->f_op == &io_uring_fops; ++} ++ + static int io_allocate_scq_urings(struct io_ring_ctx *ctx, + struct io_uring_params *p) + { +diff --git a/net/core/scm.c b/net/core/scm.c +index 3c7f160720d34..d09849cb60f08 100644 +--- a/net/core/scm.c ++++ b/net/core/scm.c +@@ -105,7 +105,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) + if (fd < 0 || !(file = fget_raw(fd))) + return -EBADF; + /* don't allow io_uring files */ +- if (io_uring_get_socket(file)) { ++ if (io_is_uring_fops(file)) { + fput(file); + return -EINVAL; + } +diff --git a/net/unix/scm.c b/net/unix/scm.c +index e8e2a00bb0f58..d1048b4c2baaf 100644 +--- a/net/unix/scm.c ++++ b/net/unix/scm.c +@@ -34,10 +34,8 @@ struct sock *unix_get_socket(struct file *filp) + /* PF_UNIX ? */ + if (s && sock->ops && sock->ops->family == PF_UNIX) + u_sock = s; +- } else { +- /* Could be an io_uring instance */ +- u_sock = io_uring_get_socket(filp); + } ++ + return u_sock; + } + EXPORT_SYMBOL(unix_get_socket); +-- +2.43.0 + diff --git a/queue-5.10/rcu-tasks-provide-rcu_trace_implies_rcu_gp.patch b/queue-5.10/rcu-tasks-provide-rcu_trace_implies_rcu_gp.patch new file mode 100644 index 00000000000..8ecc4ec5328 --- /dev/null +++ b/queue-5.10/rcu-tasks-provide-rcu_trace_implies_rcu_gp.patch @@ -0,0 +1,70 @@ +From d5424b4e3563175e4f0ff119b143dc445039ac29 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 11 Mar 2024 17:44:34 -0700 +Subject: rcu-tasks: Provide rcu_trace_implies_rcu_gp() + +From: Paul E. McKenney + +[ Upstream commit e6c86c513f440bec5f1046539c7e3c6c653842da ] + +As an accident of implementation, an RCU Tasks Trace grace period also +acts as an RCU grace period. However, this could change at any time. +This commit therefore creates an rcu_trace_implies_rcu_gp() that currently +returns true to codify this accident. Code relying on this accident +must call this function to verify that this accident is still happening. + +Reported-by: Hou Tao +Signed-off-by: Paul E. McKenney +Cc: Alexei Starovoitov +Cc: Martin KaFai Lau +Link: https://lore.kernel.org/r/20221014113946.965131-2-houtao@huaweicloud.com +Signed-off-by: Alexei Starovoitov +Stable-dep-of: 876673364161 ("bpf: Defer the free of inner map when necessary") +Signed-off-by: Sasha Levin +(cherry picked from commit 10108826191ab30388e8ae9d54505a628f78a7ec) +Signed-off-by: Robert Kolchmeyer +Signed-off-by: Sasha Levin +--- + include/linux/rcupdate.h | 12 ++++++++++++ + kernel/rcu/tasks.h | 2 ++ + 2 files changed, 14 insertions(+) + +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h +index 8716a17063518..0122c03da24af 100644 +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -201,6 +201,18 @@ static inline void exit_tasks_rcu_stop(void) { } + static inline void exit_tasks_rcu_finish(void) { } + #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ + ++/** ++ * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period? ++ * ++ * As an accident of implementation, an RCU Tasks Trace grace period also ++ * acts as an RCU grace period. However, this could change at any time. ++ * Code relying on this accident must call this function to verify that ++ * this accident is still happening. ++ * ++ * You have been warned! ++ */ ++static inline bool rcu_trace_implies_rcu_gp(void) { return true; } ++ + /** + * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU + * +diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h +index c5624ab0580c5..105fdc2bb004c 100644 +--- a/kernel/rcu/tasks.h ++++ b/kernel/rcu/tasks.h +@@ -1015,6 +1015,8 @@ static void rcu_tasks_trace_postscan(struct list_head *hop) + + // Wait for late-stage exiting tasks to finish exiting. + // These might have passed the call to exit_tasks_rcu_finish(). ++ ++ // If you remove the following line, update rcu_trace_implies_rcu_gp()!!! + synchronize_rcu(); + // Any tasks that exit after this point will set ->trc_reader_checked. + } +-- +2.43.0 + diff --git a/queue-5.10/series b/queue-5.10/series new file mode 100644 index 00000000000..8fca5ae2cdd --- /dev/null +++ b/queue-5.10/series @@ -0,0 +1,4 @@ +io_uring-unix-drop-usage-of-io_uring-socket.patch +io_uring-drop-any-code-related-to-scm_rights.patch +rcu-tasks-provide-rcu_trace_implies_rcu_gp.patch +bpf-defer-the-free-of-inner-map-when-necessary.patch