From: Greg Kroah-Hartman Date: Fri, 15 Aug 2025 17:07:27 +0000 (+0200) Subject: 5.15-stable patches X-Git-Tag: v6.12.43~76 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=fcda3857012fa5cd01fa1baa54e8d9217425e155;p=thirdparty%2Fkernel%2Fstable-queue.git 5.15-stable patches added patches: eventpoll-fix-semi-unbounded-recursion.patch --- diff --git a/queue-5.15/eventpoll-fix-semi-unbounded-recursion.patch b/queue-5.15/eventpoll-fix-semi-unbounded-recursion.patch new file mode 100644 index 0000000000..fbbbccda2d --- /dev/null +++ b/queue-5.15/eventpoll-fix-semi-unbounded-recursion.patch @@ -0,0 +1,166 @@ +From f2e467a48287c868818085aa35389a224d226732 Mon Sep 17 00:00:00 2001 +From: Jann Horn +Date: Fri, 11 Jul 2025 18:33:36 +0200 +Subject: eventpoll: Fix semi-unbounded recursion + +From: Jann Horn + +commit f2e467a48287c868818085aa35389a224d226732 upstream. + +Ensure that epoll instances can never form a graph deeper than +EP_MAX_NESTS+1 links. + +Currently, ep_loop_check_proc() ensures that the graph is loop-free and +does some recursion depth checks, but those recursion depth checks don't +limit the depth of the resulting tree for two reasons: + + - They don't look upwards in the tree. + - If there are multiple downwards paths of different lengths, only one of + the paths is actually considered for the depth check since commit + 28d82dc1c4ed ("epoll: limit paths"). + +Essentially, the current recursion depth check in ep_loop_check_proc() just +serves to prevent it from recursing too deeply while checking for loops. + +A more thorough check is done in reverse_path_check() after the new graph +edge has already been created; this checks, among other things, that no +paths going upwards from any non-epoll file with a length of more than 5 +edges exist. However, this check does not apply to non-epoll files. + +As a result, it is possible to recurse to a depth of at least roughly 500, +tested on v6.15. (I am unsure if deeper recursion is possible; and this may +have changed with commit 8c44dac8add7 ("eventpoll: Fix priority inversion +problem").) + +To fix it: + +1. In ep_loop_check_proc(), note the subtree depth of each visited node, +and use subtree depths for the total depth calculation even when a subtree +has already been visited. +2. Add ep_get_upwards_depth_proc() for similarly determining the maximum +depth of an upwards walk. +3. In ep_loop_check(), use these values to limit the total path length +between epoll nodes to EP_MAX_NESTS edges. + +Fixes: 22bacca48a17 ("epoll: prevent creating circular epoll structures") +Cc: stable@vger.kernel.org +Signed-off-by: Jann Horn +Link: https://lore.kernel.org/20250711-epoll-recursion-fix-v1-1-fb2457c33292@google.com +Signed-off-by: Christian Brauner +Signed-off-by: Greg Kroah-Hartman +--- + fs/eventpoll.c | 60 +++++++++++++++++++++++++++++++++++++++++++-------------- + 1 file changed, 46 insertions(+), 14 deletions(-) + +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -216,6 +216,7 @@ struct eventpoll { + /* used to optimize loop detection check */ + u64 gen; + struct hlist_head refs; ++ u8 loop_check_depth; + + #ifdef CONFIG_NET_RX_BUSY_POLL + /* used to track busy poll napi_id */ +@@ -1944,23 +1945,24 @@ static int ep_poll(struct eventpoll *ep, + } + + /** +- * ep_loop_check_proc - verify that adding an epoll file inside another +- * epoll structure does not violate the constraints, in +- * terms of closed loops, or too deep chains (which can +- * result in excessive stack usage). ++ * ep_loop_check_proc - verify that adding an epoll file @ep inside another ++ * epoll file does not create closed loops, and ++ * determine the depth of the subtree starting at @ep + * + * @ep: the &struct eventpoll to be currently checked. + * @depth: Current depth of the path being checked. + * +- * Return: %zero if adding the epoll @file inside current epoll +- * structure @ep does not violate the constraints, or %-1 otherwise. ++ * Return: depth of the subtree, or INT_MAX if we found a loop or went too deep. + */ + static int ep_loop_check_proc(struct eventpoll *ep, int depth) + { +- int error = 0; ++ int result = 0; + struct rb_node *rbp; + struct epitem *epi; + ++ if (ep->gen == loop_check_gen) ++ return ep->loop_check_depth; ++ + mutex_lock_nested(&ep->mtx, depth + 1); + ep->gen = loop_check_gen; + for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { +@@ -1968,13 +1970,11 @@ static int ep_loop_check_proc(struct eve + if (unlikely(is_file_epoll(epi->ffd.file))) { + struct eventpoll *ep_tovisit; + ep_tovisit = epi->ffd.file->private_data; +- if (ep_tovisit->gen == loop_check_gen) +- continue; + if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS) +- error = -1; ++ result = INT_MAX; + else +- error = ep_loop_check_proc(ep_tovisit, depth + 1); +- if (error != 0) ++ result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1); ++ if (result > EP_MAX_NESTS) + break; + } else { + /* +@@ -1988,9 +1988,27 @@ static int ep_loop_check_proc(struct eve + list_file(epi->ffd.file); + } + } ++ ep->loop_check_depth = result; + mutex_unlock(&ep->mtx); + +- return error; ++ return result; ++} ++ ++/** ++ * ep_get_upwards_depth_proc - determine depth of @ep when traversed upwards ++ */ ++static int ep_get_upwards_depth_proc(struct eventpoll *ep, int depth) ++{ ++ int result = 0; ++ struct epitem *epi; ++ ++ if (ep->gen == loop_check_gen) ++ return ep->loop_check_depth; ++ hlist_for_each_entry_rcu(epi, &ep->refs, fllink) ++ result = max(result, ep_get_upwards_depth_proc(epi->ep, depth + 1) + 1); ++ ep->gen = loop_check_gen; ++ ep->loop_check_depth = result; ++ return result; + } + + /** +@@ -2006,8 +2024,22 @@ static int ep_loop_check_proc(struct eve + */ + static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to) + { ++ int depth, upwards_depth; ++ + inserting_into = ep; +- return ep_loop_check_proc(to, 0); ++ /* ++ * Check how deep down we can get from @to, and whether it is possible ++ * to loop up to @ep. ++ */ ++ depth = ep_loop_check_proc(to, 0); ++ if (depth > EP_MAX_NESTS) ++ return -1; ++ /* Check how far up we can go from @ep. */ ++ rcu_read_lock(); ++ upwards_depth = ep_get_upwards_depth_proc(ep, 0); ++ rcu_read_unlock(); ++ ++ return (depth+1+upwards_depth > EP_MAX_NESTS) ? -1 : 0; + } + + static void clear_tfile_check_list(void) diff --git a/queue-5.15/series b/queue-5.15/series index 9c37271aa9..afaa300bbb 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -267,3 +267,4 @@ nfsd-handle-get_client_locked-failure-in-nfsd4_setclientid_confirm.patch nfsd-detect-mismatch-of-file-handle-and-delegation-stateid-in-open-op.patch sunvdc-balance-device-refcount-in-vdc_port_mpgroup_check.patch fs-prevent-file-descriptor-table-allocations-exceeding-int_max.patch +eventpoll-fix-semi-unbounded-recursion.patch