]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 28 Apr 2016 18:52:45 +0000 (11:52 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 28 Apr 2016 18:52:45 +0000 (11:52 -0700)
added patches:
sched-cgroup-fix-cleanup-cgroup-teardown-init.patch

queue-4.4/sched-cgroup-fix-cleanup-cgroup-teardown-init.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/sched-cgroup-fix-cleanup-cgroup-teardown-init.patch b/queue-4.4/sched-cgroup-fix-cleanup-cgroup-teardown-init.patch
new file mode 100644 (file)
index 0000000..ba364a2
--- /dev/null
@@ -0,0 +1,142 @@
+From 2f5177f0fd7e531b26d54633be62d1d4cb94621c Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 16 Mar 2016 16:22:45 +0100
+Subject: sched/cgroup: Fix/cleanup cgroup teardown/init
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 2f5177f0fd7e531b26d54633be62d1d4cb94621c upstream.
+
+The CPU controller hasn't kept up with the various changes in the whole
+cgroup initialization / destruction sequence, and commit:
+
+  2e91fa7f6d45 ("cgroup: keep zombies associated with their original cgroups")
+
+caused it to explode.
+
+The reason for this is that zombies do not inhibit css_offline() from
+being called, but do stall css_released(). Now we tear down the cfs_rq
+structures on css_offline() but zombies can run after that, leading to
+use-after-free issues.
+
+The solution is to move the tear-down to css_released(), which
+guarantees nobody (including no zombies) is still using our cgroup.
+
+Furthermore, a few simple cleanups are possible too. There doesn't
+appear to be any point to us using css_online() (anymore?) so fold that
+in css_alloc().
+
+And since cgroup code guarantees an RCU grace period between
+css_released() and css_free() we can forgo using call_rcu() and free the
+stuff immediately.
+
+Suggested-by: Tejun Heo <tj@kernel.org>
+Reported-by: Kazuki Yamaguchi <k@rhe.jp>
+Reported-by: Niklas Cassel <niklas.cassel@axis.com>
+Tested-by: Niklas Cassel <niklas.cassel@axis.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Tejun Heo <tj@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 2e91fa7f6d45 ("cgroup: keep zombies associated with their original cgroups")
+Link: http://lkml.kernel.org/r/20160316152245.GY6344@twins.programming.kicks-ass.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c |   35 ++++++++++++++---------------------
+ 1 file changed, 14 insertions(+), 21 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7693,7 +7693,7 @@ void set_curr_task(int cpu, struct task_
+ /* task_group_lock serializes the addition/removal of task groups */
+ static DEFINE_SPINLOCK(task_group_lock);
+-static void free_sched_group(struct task_group *tg)
++static void sched_free_group(struct task_group *tg)
+ {
+       free_fair_sched_group(tg);
+       free_rt_sched_group(tg);
+@@ -7719,7 +7719,7 @@ struct task_group *sched_create_group(st
+       return tg;
+ err:
+-      free_sched_group(tg);
++      sched_free_group(tg);
+       return ERR_PTR(-ENOMEM);
+ }
+@@ -7739,17 +7739,16 @@ void sched_online_group(struct task_grou
+ }
+ /* rcu callback to free various structures associated with a task group */
+-static void free_sched_group_rcu(struct rcu_head *rhp)
++static void sched_free_group_rcu(struct rcu_head *rhp)
+ {
+       /* now it should be safe to free those cfs_rqs */
+-      free_sched_group(container_of(rhp, struct task_group, rcu));
++      sched_free_group(container_of(rhp, struct task_group, rcu));
+ }
+-/* Destroy runqueue etc associated with a task group */
+ void sched_destroy_group(struct task_group *tg)
+ {
+       /* wait for possible concurrent references to cfs_rqs complete */
+-      call_rcu(&tg->rcu, free_sched_group_rcu);
++      call_rcu(&tg->rcu, sched_free_group_rcu);
+ }
+ void sched_offline_group(struct task_group *tg)
+@@ -8210,31 +8209,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsy
+       if (IS_ERR(tg))
+               return ERR_PTR(-ENOMEM);
++      sched_online_group(tg, parent);
++
+       return &tg->css;
+ }
+-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
+ {
+       struct task_group *tg = css_tg(css);
+-      struct task_group *parent = css_tg(css->parent);
+-      if (parent)
+-              sched_online_group(tg, parent);
+-      return 0;
++      sched_offline_group(tg);
+ }
+ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
+ {
+       struct task_group *tg = css_tg(css);
+-      sched_destroy_group(tg);
+-}
+-
+-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
+-{
+-      struct task_group *tg = css_tg(css);
+-
+-      sched_offline_group(tg);
++      /*
++       * Relies on the RCU grace period between css_released() and this.
++       */
++      sched_free_group(tg);
+ }
+ static void cpu_cgroup_fork(struct task_struct *task, void *private)
+@@ -8594,9 +8588,8 @@ static struct cftype cpu_files[] = {
+ struct cgroup_subsys cpu_cgrp_subsys = {
+       .css_alloc      = cpu_cgroup_css_alloc,
++      .css_released   = cpu_cgroup_css_released,
+       .css_free       = cpu_cgroup_css_free,
+-      .css_online     = cpu_cgroup_css_online,
+-      .css_offline    = cpu_cgroup_css_offline,
+       .fork           = cpu_cgroup_fork,
+       .can_attach     = cpu_cgroup_can_attach,
+       .attach         = cpu_cgroup_attach,
index 636841b74adf4c7a33554dcb0c1d726da7d992f8..d4e200911e3454f7aafaf151fb207904734e6753 100644 (file)
@@ -22,3 +22,4 @@ dmaengine-dw-fix-master-selection.patch
 dmaengine-hsu-correct-use-of-channel-status-register.patch
 dmaengine-pxa_dma-fix-the-maximum-requestor-line.patch
 mtd-nand-pxa3xx_nand-fix-dmaengine-initialization.patch
+sched-cgroup-fix-cleanup-cgroup-teardown-init.patch