From: Greg Kroah-Hartman Date: Thu, 15 Aug 2024 10:17:55 +0000 (+0200) Subject: 6.6-stable patches X-Git-Tag: v4.19.320~32 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=0efae94376e1a03e1bfbb411b5e86316de79f979;p=thirdparty%2Fkernel%2Fstable-queue.git 6.6-stable patches added patches: cgroup-make-operations-on-the-cgroup-root_list-rcu-safe.patch --- diff --git a/queue-6.6/cgroup-make-operations-on-the-cgroup-root_list-rcu-safe.patch b/queue-6.6/cgroup-make-operations-on-the-cgroup-root_list-rcu-safe.patch new file mode 100644 index 00000000000..0abe272d381 --- /dev/null +++ b/queue-6.6/cgroup-make-operations-on-the-cgroup-root_list-rcu-safe.patch @@ -0,0 +1,123 @@ +From d23b5c577715892c87533b13923306acc6243f93 Mon Sep 17 00:00:00 2001 +From: Yafang Shao +Date: Sun, 29 Oct 2023 06:14:29 +0000 +Subject: cgroup: Make operations on the cgroup root_list RCU safe + +From: Yafang Shao + +commit d23b5c577715892c87533b13923306acc6243f93 upstream. + +At present, when we perform operations on the cgroup root_list, we must +hold the cgroup_mutex, which is a relatively heavyweight lock. In reality, +we can make operations on this list RCU-safe, eliminating the need to hold +the cgroup_mutex during traversal. Modifications to the list only occur in +the cgroup root setup and destroy paths, which should be infrequent in a +production environment. In contrast, traversal may occur frequently. +Therefore, making it RCU-safe would be beneficial. + +Signed-off-by: Yafang Shao +Signed-off-by: Tejun Heo +To: Michal Koutný +Signed-off-by: Greg Kroah-Hartman +--- + include/linux/cgroup-defs.h | 1 + + kernel/cgroup/cgroup-internal.h | 3 ++- + kernel/cgroup/cgroup.c | 23 ++++++++++++++++------- + 3 files changed, 19 insertions(+), 8 deletions(-) + +--- a/include/linux/cgroup-defs.h ++++ b/include/linux/cgroup-defs.h +@@ -558,6 +558,7 @@ struct cgroup_root { + + /* A list running through the active hierarchies */ + struct list_head root_list; ++ struct rcu_head rcu; + + /* Hierarchy-specific flags */ + unsigned int flags; +--- a/kernel/cgroup/cgroup-internal.h ++++ b/kernel/cgroup/cgroup-internal.h +@@ -170,7 +170,8 @@ extern struct list_head cgroup_roots; + + /* iterate across the hierarchies */ + #define for_each_root(root) \ +- list_for_each_entry((root), &cgroup_roots, root_list) ++ list_for_each_entry_rcu((root), &cgroup_roots, root_list, \ ++ lockdep_is_held(&cgroup_mutex)) + + /** + * for_each_subsys - iterate all enabled cgroup subsystems +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -1313,7 +1313,7 @@ static void cgroup_exit_root_id(struct c + + void cgroup_free_root(struct cgroup_root *root) + { +- kfree(root); ++ kfree_rcu(root, rcu); + } + + static void cgroup_destroy_root(struct cgroup_root *root) +@@ -1346,7 +1346,7 @@ static void cgroup_destroy_root(struct c + spin_unlock_irq(&css_set_lock); + + if (!list_empty(&root->root_list)) { +- list_del(&root->root_list); ++ list_del_rcu(&root->root_list); + cgroup_root_count--; + } + +@@ -1386,7 +1386,15 @@ static inline struct cgroup *__cset_cgro + } + } + +- BUG_ON(!res_cgroup); ++ /* ++ * If cgroup_mutex is not held, the cgrp_cset_link will be freed ++ * before we remove the cgroup root from the root_list. Consequently, ++ * when accessing a cgroup root, the cset_link may have already been ++ * freed, resulting in a NULL res_cgroup. However, by holding the ++ * cgroup_mutex, we ensure that res_cgroup can't be NULL. ++ * If we don't hold cgroup_mutex in the caller, we must do the NULL ++ * check. ++ */ + return res_cgroup; + } + +@@ -1445,7 +1453,6 @@ static struct cgroup *current_cgns_cgrou + static struct cgroup *cset_cgroup_from_root(struct css_set *cset, + struct cgroup_root *root) + { +- lockdep_assert_held(&cgroup_mutex); + lockdep_assert_held(&css_set_lock); + + return __cset_cgroup_from_root(cset, root); +@@ -1453,7 +1460,9 @@ static struct cgroup *cset_cgroup_from_r + + /* + * Return the cgroup for "task" from the given hierarchy. Must be +- * called with cgroup_mutex and css_set_lock held. ++ * called with css_set_lock held to prevent task's groups from being modified. ++ * Must be called with either cgroup_mutex or rcu read lock to prevent the ++ * cgroup root from being destroyed. + */ + struct cgroup *task_cgroup_from_root(struct task_struct *task, + struct cgroup_root *root) +@@ -2014,7 +2023,7 @@ void init_cgroup_root(struct cgroup_fs_c + struct cgroup_root *root = ctx->root; + struct cgroup *cgrp = &root->cgrp; + +- INIT_LIST_HEAD(&root->root_list); ++ INIT_LIST_HEAD_RCU(&root->root_list); + atomic_set(&root->nr_cgrps, 1); + cgrp->root = root; + init_cgroup_housekeeping(cgrp); +@@ -2097,7 +2106,7 @@ int cgroup_setup_root(struct cgroup_root + * care of subsystems' refcounts, which are explicitly dropped in + * the failure exit path. + */ +- list_add(&root->root_list, &cgroup_roots); ++ list_add_rcu(&root->root_list, &cgroup_roots); + cgroup_root_count++; + + /* diff --git a/queue-6.6/series b/queue-6.6/series index ed9afca4c0c..713555c2201 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -18,3 +18,4 @@ mm-gup-stop-abusing-try_grab_folio.patch nvme-pci-add-apst-quirk-for-lenovo-n60z-laptop.patch genirq-cpuhotplug-skip-suspended-interrupts-when-restoring-affinity.patch genirq-cpuhotplug-retry-with-cpu_online_mask-when-migration-fails.patch +cgroup-make-operations-on-the-cgroup-root_list-rcu-safe.patch