]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 15 Aug 2024 10:09:07 +0000 (12:09 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 15 Aug 2024 10:09:07 +0000 (12:09 +0200)
added patches:
cgroup-make-operations-on-the-cgroup-root_list-rcu-safe.patch

queue-6.1/cgroup-make-operations-on-the-cgroup-root_list-rcu-safe.patch [new file with mode: 0644]
queue-6.1/series

diff --git a/queue-6.1/cgroup-make-operations-on-the-cgroup-root_list-rcu-safe.patch b/queue-6.1/cgroup-make-operations-on-the-cgroup-root_list-rcu-safe.patch
new file mode 100644 (file)
index 0000000..d9004a8
--- /dev/null
@@ -0,0 +1,123 @@
+From d23b5c577715892c87533b13923306acc6243f93 Mon Sep 17 00:00:00 2001
+From: Yafang Shao <laoar.shao@gmail.com>
+Date: Sun, 29 Oct 2023 06:14:29 +0000
+Subject: cgroup: Make operations on the cgroup root_list RCU safe
+
+From: Yafang Shao <laoar.shao@gmail.com>
+
+commit d23b5c577715892c87533b13923306acc6243f93 upstream.
+
+At present, when we perform operations on the cgroup root_list, we must
+hold the cgroup_mutex, which is a relatively heavyweight lock. In reality,
+we can make operations on this list RCU-safe, eliminating the need to hold
+the cgroup_mutex during traversal. Modifications to the list only occur in
+the cgroup root setup and destroy paths, which should be infrequent in a
+production environment. In contrast, traversal may occur frequently.
+Therefore, making it RCU-safe would be beneficial.
+
+Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Michal Koutný <mkoutny@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/cgroup-defs.h     |    1 +
+ kernel/cgroup/cgroup-internal.h |    3 ++-
+ kernel/cgroup/cgroup.c          |   23 ++++++++++++++++-------
+ 3 files changed, 19 insertions(+), 8 deletions(-)
+
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -540,6 +540,7 @@ struct cgroup_root {
+       /* A list running through the active hierarchies */
+       struct list_head root_list;
++      struct rcu_head rcu;
+       /* Hierarchy-specific flags */
+       unsigned int flags;
+--- a/kernel/cgroup/cgroup-internal.h
++++ b/kernel/cgroup/cgroup-internal.h
+@@ -170,7 +170,8 @@ extern struct list_head cgroup_roots;
+ /* iterate across the hierarchies */
+ #define for_each_root(root)                                           \
+-      list_for_each_entry((root), &cgroup_roots, root_list)
++      list_for_each_entry_rcu((root), &cgroup_roots, root_list,       \
++                              lockdep_is_held(&cgroup_mutex))
+ /**
+  * for_each_subsys - iterate all enabled cgroup subsystems
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -1346,7 +1346,7 @@ static void cgroup_exit_root_id(struct c
+ void cgroup_free_root(struct cgroup_root *root)
+ {
+-      kfree(root);
++      kfree_rcu(root, rcu);
+ }
+ static void cgroup_destroy_root(struct cgroup_root *root)
+@@ -1379,7 +1379,7 @@ static void cgroup_destroy_root(struct c
+       spin_unlock_irq(&css_set_lock);
+       if (!list_empty(&root->root_list)) {
+-              list_del(&root->root_list);
++              list_del_rcu(&root->root_list);
+               cgroup_root_count--;
+       }
+@@ -1419,7 +1419,15 @@ static inline struct cgroup *__cset_cgro
+               }
+       }
+-      BUG_ON(!res_cgroup);
++      /*
++       * If cgroup_mutex is not held, the cgrp_cset_link will be freed
++       * before we remove the cgroup root from the root_list. Consequently,
++       * when accessing a cgroup root, the cset_link may have already been
++       * freed, resulting in a NULL res_cgroup. However, by holding the
++       * cgroup_mutex, we ensure that res_cgroup can't be NULL.
++       * If we don't hold cgroup_mutex in the caller, we must do the NULL
++       * check.
++       */
+       return res_cgroup;
+ }
+@@ -1468,7 +1476,6 @@ static struct cgroup *current_cgns_cgrou
+ static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
+                                           struct cgroup_root *root)
+ {
+-      lockdep_assert_held(&cgroup_mutex);
+       lockdep_assert_held(&css_set_lock);
+       return __cset_cgroup_from_root(cset, root);
+@@ -1476,7 +1483,9 @@ static struct cgroup *cset_cgroup_from_r
+ /*
+  * Return the cgroup for "task" from the given hierarchy. Must be
+- * called with cgroup_mutex and css_set_lock held.
++ * called with css_set_lock held to prevent task's groups from being modified.
++ * Must be called with either cgroup_mutex or rcu read lock to prevent the
++ * cgroup root from being destroyed.
+  */
+ struct cgroup *task_cgroup_from_root(struct task_struct *task,
+                                    struct cgroup_root *root)
+@@ -2037,7 +2046,7 @@ void init_cgroup_root(struct cgroup_fs_c
+       struct cgroup_root *root = ctx->root;
+       struct cgroup *cgrp = &root->cgrp;
+-      INIT_LIST_HEAD(&root->root_list);
++      INIT_LIST_HEAD_RCU(&root->root_list);
+       atomic_set(&root->nr_cgrps, 1);
+       cgrp->root = root;
+       init_cgroup_housekeeping(cgrp);
+@@ -2120,7 +2129,7 @@ int cgroup_setup_root(struct cgroup_root
+        * care of subsystems' refcounts, which are explicitly dropped in
+        * the failure exit path.
+        */
+-      list_add(&root->root_list, &cgroup_roots);
++      list_add_rcu(&root->root_list, &cgroup_roots);
+       cgroup_root_count++;
+       /*
index 0ecdb3fe6984bb3f41b2b8c1f4ebd2b341cf68c1..1d54c30dce2b11bb3ea5121d134d6242f979dbab 100644 (file)
@@ -28,3 +28,4 @@ nfsd-make-svc_stat-per-network-namespace-instead-of-global.patch
 nvme-pci-add-apst-quirk-for-lenovo-n60z-laptop.patch
 mptcp-fully-established-after-add_addr-echo-on-mpj.patch
 drm-i915-gem-fix-virtual-memory-mapping-boundaries-calculation.patch
+cgroup-make-operations-on-the-cgroup-root_list-rcu-safe.patch