From: Greg Kroah-Hartman Date: Tue, 11 Feb 2014 17:24:58 +0000 (-0800) Subject: 3.4-stable patches X-Git-Tag: v3.4.80~24 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=67b9208c028c456e6e0d2ebaa501b2dbe3e0ae21;p=thirdparty%2Fkernel%2Fstable-queue.git 3.4-stable patches added patches: sched-rt-fix-isolated-cpus-leaving-root_task_group-indefinitely-throttled.patch sched-rt-fix-sched_rr-across-cgroups.patch --- diff --git a/queue-3.4/sched-rt-fix-isolated-cpus-leaving-root_task_group-indefinitely-throttled.patch b/queue-3.4/sched-rt-fix-isolated-cpus-leaving-root_task_group-indefinitely-throttled.patch new file mode 100644 index 00000000000..d65dbe104df --- /dev/null +++ b/queue-3.4/sched-rt-fix-isolated-cpus-leaving-root_task_group-indefinitely-throttled.patch @@ -0,0 +1,46 @@ +From e221d028bb08b47e624c5f0a31732c642db9d19a Mon Sep 17 00:00:00 2001 +From: Mike Galbraith +Date: Tue, 7 Aug 2012 10:02:38 +0200 +Subject: sched,rt: fix isolated CPUs leaving root_task_group indefinitely throttled + +From: Mike Galbraith + +commit e221d028bb08b47e624c5f0a31732c642db9d19a upstream. + +Root task group bandwidth replenishment must service all CPUs, regardless of +where the timer was last started, and regardless of the isolation mechanism, +lest 'Quoth the Raven, "Nevermore"' become rt scheduling policy. + +Signed-off-by: Mike Galbraith +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1344326558.6968.25.camel@marge.simpson.net +Signed-off-by: Thomas Gleixner +Cc: Li Zefan +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/sched/rt.c | 13 +++++++++++++ + 1 file changed, 13 insertions(+) + +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -782,6 +782,19 @@ static int do_sched_rt_period_timer(stru + const struct cpumask *span; + + span = sched_rt_period_mask(); ++#ifdef CONFIG_RT_GROUP_SCHED ++ /* ++ * FIXME: isolated CPUs should really leave the root task group, ++ * whether they are isolcpus or were isolated via cpusets, lest ++ * the timer run on a CPU which does not service all runqueues, ++ * potentially leaving other CPUs indefinitely throttled. If ++ * isolation is really required, the user will turn the throttle ++ * off to kill the perturbations it causes anyway. Meanwhile, ++ * this maintains functionality for boot and/or troubleshooting. ++ */ ++ if (rt_b == &root_task_group.rt_bandwidth) ++ span = cpu_online_mask; ++#endif + for_each_cpu(i, span) { + int enqueue = 0; + struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); diff --git a/queue-3.4/sched-rt-fix-sched_rr-across-cgroups.patch b/queue-3.4/sched-rt-fix-sched_rr-across-cgroups.patch new file mode 100644 index 00000000000..2857da2b357 --- /dev/null +++ b/queue-3.4/sched-rt-fix-sched_rr-across-cgroups.patch @@ -0,0 +1,65 @@ +From 454c79999f7eaedcdf4c15c449e43902980cbdf5 Mon Sep 17 00:00:00 2001 +From: Colin Cross +Date: Wed, 16 May 2012 21:34:23 -0700 +Subject: sched/rt: Fix SCHED_RR across cgroups + +From: Colin Cross + +commit 454c79999f7eaedcdf4c15c449e43902980cbdf5 upstream. + +task_tick_rt() has an optimization to only reschedule SCHED_RR tasks +if they were the only element on their rq. However, with cgroups +a SCHED_RR task could be the only element on its per-cgroup rq but +still be competing with other SCHED_RR tasks in its parent's +cgroup. In this case, the SCHED_RR task in the child cgroup would +never yield at the end of its timeslice. If the child cgroup +rt_runtime_us was the same as the parent cgroup rt_runtime_us, +the task in the parent cgroup would starve completely. + +Modify task_tick_rt() to check that the task is the only task on its +rq, and that the each of the scheduling entities of its ancestors +is also the only entity on its rq. + +Signed-off-by: Colin Cross +Signed-off-by: Peter Zijlstra +Link: http://lkml.kernel.org/r/1337229266-15798-1-git-send-email-ccross@android.com +Signed-off-by: Ingo Molnar +Cc: Li Zefan +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/sched/rt.c | 15 ++++++++++----- + 1 file changed, 10 insertions(+), 5 deletions(-) + +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -1997,6 +1997,8 @@ static void watchdog(struct rq *rq, stru + + static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) + { ++ struct sched_rt_entity *rt_se = &p->rt; ++ + update_curr_rt(rq); + + watchdog(rq, p); +@@ -2014,12 +2016,15 @@ static void task_tick_rt(struct rq *rq, + p->rt.time_slice = RR_TIMESLICE; + + /* +- * Requeue to the end of queue if we are not the only element +- * on the queue: ++ * Requeue to the end of queue if we (and all of our ancestors) are the ++ * only element on the queue + */ +- if (p->rt.run_list.prev != p->rt.run_list.next) { +- requeue_task_rt(rq, p, 0); +- set_tsk_need_resched(p); ++ for_each_sched_rt_entity(rt_se) { ++ if (rt_se->run_list.prev != rt_se->run_list.next) { ++ requeue_task_rt(rq, p, 0); ++ set_tsk_need_resched(p); ++ return; ++ } + } + } + diff --git a/queue-3.4/series b/queue-3.4/series index c86d09fae9e..e2c1f8fac88 100644 --- a/queue-3.4/series +++ b/queue-3.4/series @@ -21,3 +21,5 @@ drm-radeon-set-the-full-cache-bit-for-fences-on-r7xx.patch drm-radeon-dce4-clear-bios-scratch-dpms-bit-v2.patch pci-enable-ari-if-dev-and-upstream-bridge-support-it-disable-otherwise.patch hpfs-deadlock-and-race-in-directory-lseek.patch +sched-rt-fix-sched_rr-across-cgroups.patch +sched-rt-fix-isolated-cpus-leaving-root_task_group-indefinitely-throttled.patch