From: Greg Kroah-Hartman Date: Tue, 23 May 2017 10:39:11 +0000 (+0200) Subject: 4.4-stable patches X-Git-Tag: v3.18.55~38 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=0f8b7d49a7cf50fbe7db6ccd0c56eca7a875328c;p=thirdparty%2Fkernel%2Fstable-queue.git 4.4-stable patches added patches: sched-fair-do-not-announce-throttled-next-buddy-in-dequeue_task_fair.patch sched-fair-initialize-throttle_count-for-new-task-groups-lazily.patch --- diff --git a/queue-4.4/sched-fair-do-not-announce-throttled-next-buddy-in-dequeue_task_fair.patch b/queue-4.4/sched-fair-do-not-announce-throttled-next-buddy-in-dequeue_task_fair.patch new file mode 100644 index 00000000000..44241975f72 --- /dev/null +++ b/queue-4.4/sched-fair-do-not-announce-throttled-next-buddy-in-dequeue_task_fair.patch @@ -0,0 +1,49 @@ +From 754bd598be9bbc953bc709a9e8ed7f3188bfb9d7 Mon Sep 17 00:00:00 2001 +From: Konstantin Khlebnikov +Date: Thu, 16 Jun 2016 15:57:15 +0300 +Subject: sched/fair: Do not announce throttled next buddy in dequeue_task_fair() + +From: Konstantin Khlebnikov + +commit 754bd598be9bbc953bc709a9e8ed7f3188bfb9d7 upstream. + +Hierarchy could be already throttled at this point. Throttled next +buddy could trigger a NULL pointer dereference in pick_next_task_fair(). + +Signed-off-by: Konstantin Khlebnikov +Signed-off-by: Peter Zijlstra (Intel) +Reviewed-by: Ben Segall +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Link: http://lkml.kernel.org/r/146608183552.21905.15924473394414832071.stgit@buzz +Signed-off-by: Ingo Molnar +Cc: Ben Pineau +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/sched/fair.c | 9 ++++----- + 1 file changed, 4 insertions(+), 5 deletions(-) + +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -4233,15 +4233,14 @@ static void dequeue_task_fair(struct rq + + /* Don't dequeue parent if it has other entities besides us */ + if (cfs_rq->load.weight) { ++ /* Avoid re-evaluating load for this entity: */ ++ se = parent_entity(se); + /* + * Bias pick_next to pick a task from this cfs_rq, as + * p is sleeping when it is within its sched_slice. + */ +- if (task_sleep && parent_entity(se)) +- set_next_buddy(parent_entity(se)); +- +- /* avoid re-evaluating load for this entity */ +- se = parent_entity(se); ++ if (task_sleep && se && !throttled_hierarchy(cfs_rq)) ++ set_next_buddy(se); + break; + } + flags |= DEQUEUE_SLEEP; diff --git a/queue-4.4/sched-fair-initialize-throttle_count-for-new-task-groups-lazily.patch b/queue-4.4/sched-fair-initialize-throttle_count-for-new-task-groups-lazily.patch new file mode 100644 index 00000000000..487a94954f7 --- /dev/null +++ b/queue-4.4/sched-fair-initialize-throttle_count-for-new-task-groups-lazily.patch @@ -0,0 +1,73 @@ +From 094f469172e00d6ab0a3130b0e01c83b3cf3a98d Mon Sep 17 00:00:00 2001 +From: Konstantin Khlebnikov +Date: Thu, 16 Jun 2016 15:57:01 +0300 +Subject: sched/fair: Initialize throttle_count for new task-groups lazily + +From: Konstantin Khlebnikov + +commit 094f469172e00d6ab0a3130b0e01c83b3cf3a98d upstream. + +Cgroup created inside throttled group must inherit current throttle_count. +Broken throttle_count allows to nominate throttled entries as a next buddy, +later this leads to null pointer dereference in pick_next_task_fair(). + +This patch initialize cfs_rq->throttle_count at first enqueue: laziness +allows to skip locking all rq at group creation. Lazy approach also allows +to skip full sub-tree scan at throttling hierarchy (not in this patch). + +Signed-off-by: Konstantin Khlebnikov +Signed-off-by: Peter Zijlstra (Intel) +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: bsegall@google.com +Link: http://lkml.kernel.org/r/146608182119.21870.8439834428248129633.stgit@buzz +Signed-off-by: Ingo Molnar +Cc: Ben Pineau +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/sched/fair.c | 20 ++++++++++++++++++++ + kernel/sched/sched.h | 2 +- + 2 files changed, 21 insertions(+), 1 deletion(-) + +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -3918,6 +3918,26 @@ static void check_enqueue_throttle(struc + if (!cfs_bandwidth_used()) + return; + ++ /* Synchronize hierarchical throttle counter: */ ++ if (unlikely(!cfs_rq->throttle_uptodate)) { ++ struct rq *rq = rq_of(cfs_rq); ++ struct cfs_rq *pcfs_rq; ++ struct task_group *tg; ++ ++ cfs_rq->throttle_uptodate = 1; ++ ++ /* Get closest up-to-date node, because leaves go first: */ ++ for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) { ++ pcfs_rq = tg->cfs_rq[cpu_of(rq)]; ++ if (pcfs_rq->throttle_uptodate) ++ break; ++ } ++ if (tg) { ++ cfs_rq->throttle_count = pcfs_rq->throttle_count; ++ cfs_rq->throttled_clock_task = rq_clock_task(rq); ++ } ++ } ++ + /* an active group must be handled by the update_curr()->put() path */ + if (!cfs_rq->runtime_enabled || cfs_rq->curr) + return; +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -417,7 +417,7 @@ struct cfs_rq { + + u64 throttled_clock, throttled_clock_task; + u64 throttled_clock_task_time; +- int throttled, throttle_count; ++ int throttled, throttle_count, throttle_uptodate; + struct list_head throttled_list; + #endif /* CONFIG_CFS_BANDWIDTH */ + #endif /* CONFIG_FAIR_GROUP_SCHED */ diff --git a/queue-4.4/series b/queue-4.4/series index f1c7d259b8f..6db25a0668a 100644 --- a/queue-4.4/series +++ b/queue-4.4/series @@ -48,3 +48,5 @@ net-qmi_wwan-add-simcom-7230e.patch fscrypt-fix-context-consistency-check-when-key-s-unavailable.patch f2fs-check-entire-encrypted-bigname-when-finding-a-dentry.patch fscrypt-avoid-collisions-when-presenting-long-encrypted-filenames.patch +sched-fair-do-not-announce-throttled-next-buddy-in-dequeue_task_fair.patch +sched-fair-initialize-throttle_count-for-new-task-groups-lazily.patch