]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2014 17:24:58 +0000 (09:24 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2014 17:24:58 +0000 (09:24 -0800)
added patches:
sched-rt-fix-isolated-cpus-leaving-root_task_group-indefinitely-throttled.patch
sched-rt-fix-sched_rr-across-cgroups.patch

queue-3.4/sched-rt-fix-isolated-cpus-leaving-root_task_group-indefinitely-throttled.patch [new file with mode: 0644]
queue-3.4/sched-rt-fix-sched_rr-across-cgroups.patch [new file with mode: 0644]
queue-3.4/series

diff --git a/queue-3.4/sched-rt-fix-isolated-cpus-leaving-root_task_group-indefinitely-throttled.patch b/queue-3.4/sched-rt-fix-isolated-cpus-leaving-root_task_group-indefinitely-throttled.patch
new file mode 100644 (file)
index 0000000..d65dbe1
--- /dev/null
@@ -0,0 +1,46 @@
+From e221d028bb08b47e624c5f0a31732c642db9d19a Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <efault@gmx.de>
+Date: Tue, 7 Aug 2012 10:02:38 +0200
+Subject: sched,rt: fix isolated CPUs leaving root_task_group indefinitely throttled
+
+From: Mike Galbraith <efault@gmx.de>
+
+commit e221d028bb08b47e624c5f0a31732c642db9d19a upstream.
+
+Root task group bandwidth replenishment must service all CPUs, regardless of
+where the timer was last started, and regardless of the isolation mechanism,
+lest 'Quoth the Raven, "Nevermore"' become rt scheduling policy.
+
+Signed-off-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/1344326558.6968.25.camel@marge.simpson.net
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Li Zefan <lizefan@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/rt.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -782,6 +782,19 @@ static int do_sched_rt_period_timer(stru
+       const struct cpumask *span;
+       span = sched_rt_period_mask();
++#ifdef CONFIG_RT_GROUP_SCHED
++      /*
++       * FIXME: isolated CPUs should really leave the root task group,
++       * whether they are isolcpus or were isolated via cpusets, lest
++       * the timer run on a CPU which does not service all runqueues,
++       * potentially leaving other CPUs indefinitely throttled.  If
++       * isolation is really required, the user will turn the throttle
++       * off to kill the perturbations it causes anyway.  Meanwhile,
++       * this maintains functionality for boot and/or troubleshooting.
++       */
++      if (rt_b == &root_task_group.rt_bandwidth)
++              span = cpu_online_mask;
++#endif
+       for_each_cpu(i, span) {
+               int enqueue = 0;
+               struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
diff --git a/queue-3.4/sched-rt-fix-sched_rr-across-cgroups.patch b/queue-3.4/sched-rt-fix-sched_rr-across-cgroups.patch
new file mode 100644 (file)
index 0000000..2857da2
--- /dev/null
@@ -0,0 +1,65 @@
+From 454c79999f7eaedcdf4c15c449e43902980cbdf5 Mon Sep 17 00:00:00 2001
+From: Colin Cross <ccross@android.com>
+Date: Wed, 16 May 2012 21:34:23 -0700
+Subject: sched/rt: Fix SCHED_RR across cgroups
+
+From: Colin Cross <ccross@android.com>
+
+commit 454c79999f7eaedcdf4c15c449e43902980cbdf5 upstream.
+
+task_tick_rt() has an optimization to only reschedule SCHED_RR tasks
+if they were the only element on their rq.  However, with cgroups
+a SCHED_RR task could be the only element on its per-cgroup rq but
+still be competing with other SCHED_RR tasks in its parent's
+cgroup.  In this case, the SCHED_RR task in the child cgroup would
+never yield at the end of its timeslice.  If the child cgroup
+rt_runtime_us was the same as the parent cgroup rt_runtime_us,
+the task in the parent cgroup would starve completely.
+
+Modify task_tick_rt() to check that the task is the only task on its
+rq, and that the each of the scheduling entities of its ancestors
+is also the only entity on its rq.
+
+Signed-off-by: Colin Cross <ccross@android.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/1337229266-15798-1-git-send-email-ccross@android.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Li Zefan <lizefan@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/rt.c |   15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1997,6 +1997,8 @@ static void watchdog(struct rq *rq, stru
+ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
+ {
++      struct sched_rt_entity *rt_se = &p->rt;
++
+       update_curr_rt(rq);
+       watchdog(rq, p);
+@@ -2014,12 +2016,15 @@ static void task_tick_rt(struct rq *rq,
+       p->rt.time_slice = RR_TIMESLICE;
+       /*
+-       * Requeue to the end of queue if we are not the only element
+-       * on the queue:
++       * Requeue to the end of queue if we (and all of our ancestors) are the
++       * only element on the queue
+        */
+-      if (p->rt.run_list.prev != p->rt.run_list.next) {
+-              requeue_task_rt(rq, p, 0);
+-              set_tsk_need_resched(p);
++      for_each_sched_rt_entity(rt_se) {
++              if (rt_se->run_list.prev != rt_se->run_list.next) {
++                      requeue_task_rt(rq, p, 0);
++                      set_tsk_need_resched(p);
++                      return;
++              }
+       }
+ }
index c86d09fae9eae3aa55c3938268e6bd0e661ae136..e2c1f8fac88a5a61ef6138d14c3edb855fdd31be 100644 (file)
@@ -21,3 +21,5 @@ drm-radeon-set-the-full-cache-bit-for-fences-on-r7xx.patch
 drm-radeon-dce4-clear-bios-scratch-dpms-bit-v2.patch
 pci-enable-ari-if-dev-and-upstream-bridge-support-it-disable-otherwise.patch
 hpfs-deadlock-and-race-in-directory-lseek.patch
+sched-rt-fix-sched_rr-across-cgroups.patch
+sched-rt-fix-isolated-cpus-leaving-root_task_group-indefinitely-throttled.patch