]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 May 2017 10:39:11 +0000 (12:39 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 23 May 2017 10:39:11 +0000 (12:39 +0200)
added patches:
sched-fair-do-not-announce-throttled-next-buddy-in-dequeue_task_fair.patch
sched-fair-initialize-throttle_count-for-new-task-groups-lazily.patch

queue-4.4/sched-fair-do-not-announce-throttled-next-buddy-in-dequeue_task_fair.patch [new file with mode: 0644]
queue-4.4/sched-fair-initialize-throttle_count-for-new-task-groups-lazily.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/sched-fair-do-not-announce-throttled-next-buddy-in-dequeue_task_fair.patch b/queue-4.4/sched-fair-do-not-announce-throttled-next-buddy-in-dequeue_task_fair.patch
new file mode 100644 (file)
index 0000000..4424197
--- /dev/null
@@ -0,0 +1,49 @@
+From 754bd598be9bbc953bc709a9e8ed7f3188bfb9d7 Mon Sep 17 00:00:00 2001
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Thu, 16 Jun 2016 15:57:15 +0300
+Subject: sched/fair: Do not announce throttled next buddy in dequeue_task_fair()
+
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+
+commit 754bd598be9bbc953bc709a9e8ed7f3188bfb9d7 upstream.
+
+Hierarchy could be already throttled at this point. Throttled next
+buddy could trigger a NULL pointer dereference in pick_next_task_fair().
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Ben Segall <bsegall@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/146608183552.21905.15924473394414832071.stgit@buzz
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Ben Pineau <benjamin.pineau@mirakl.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c |    9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4233,15 +4233,14 @@ static void dequeue_task_fair(struct rq
+               /* Don't dequeue parent if it has other entities besides us */
+               if (cfs_rq->load.weight) {
++                      /* Avoid re-evaluating load for this entity: */
++                      se = parent_entity(se);
+                       /*
+                        * Bias pick_next to pick a task from this cfs_rq, as
+                        * p is sleeping when it is within its sched_slice.
+                        */
+-                      if (task_sleep && parent_entity(se))
+-                              set_next_buddy(parent_entity(se));
+-
+-                      /* avoid re-evaluating load for this entity */
+-                      se = parent_entity(se);
++                      if (task_sleep && se && !throttled_hierarchy(cfs_rq))
++                              set_next_buddy(se);
+                       break;
+               }
+               flags |= DEQUEUE_SLEEP;
diff --git a/queue-4.4/sched-fair-initialize-throttle_count-for-new-task-groups-lazily.patch b/queue-4.4/sched-fair-initialize-throttle_count-for-new-task-groups-lazily.patch
new file mode 100644 (file)
index 0000000..487a949
--- /dev/null
@@ -0,0 +1,73 @@
+From 094f469172e00d6ab0a3130b0e01c83b3cf3a98d Mon Sep 17 00:00:00 2001
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Thu, 16 Jun 2016 15:57:01 +0300
+Subject: sched/fair: Initialize throttle_count for new task-groups lazily
+
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+
+commit 094f469172e00d6ab0a3130b0e01c83b3cf3a98d upstream.
+
+Cgroup created inside throttled group must inherit current throttle_count.
+Broken throttle_count allows to nominate throttled entries as a next buddy,
+later this leads to null pointer dereference in pick_next_task_fair().
+
+This patch initialize cfs_rq->throttle_count at first enqueue: laziness
+allows to skip locking all rq at group creation. Lazy approach also allows
+to skip full sub-tree scan at throttling hierarchy (not in this patch).
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: bsegall@google.com
+Link: http://lkml.kernel.org/r/146608182119.21870.8439834428248129633.stgit@buzz
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Ben Pineau <benjamin.pineau@mirakl.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c  |   20 ++++++++++++++++++++
+ kernel/sched/sched.h |    2 +-
+ 2 files changed, 21 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3918,6 +3918,26 @@ static void check_enqueue_throttle(struc
+       if (!cfs_bandwidth_used())
+               return;
++      /* Synchronize hierarchical throttle counter: */
++      if (unlikely(!cfs_rq->throttle_uptodate)) {
++              struct rq *rq = rq_of(cfs_rq);
++              struct cfs_rq *pcfs_rq;
++              struct task_group *tg;
++
++              cfs_rq->throttle_uptodate = 1;
++
++              /* Get closest up-to-date node, because leaves go first: */
++              for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
++                      pcfs_rq = tg->cfs_rq[cpu_of(rq)];
++                      if (pcfs_rq->throttle_uptodate)
++                              break;
++              }
++              if (tg) {
++                      cfs_rq->throttle_count = pcfs_rq->throttle_count;
++                      cfs_rq->throttled_clock_task = rq_clock_task(rq);
++              }
++      }
++
+       /* an active group must be handled by the update_curr()->put() path */
+       if (!cfs_rq->runtime_enabled || cfs_rq->curr)
+               return;
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -417,7 +417,7 @@ struct cfs_rq {
+       u64 throttled_clock, throttled_clock_task;
+       u64 throttled_clock_task_time;
+-      int throttled, throttle_count;
++      int throttled, throttle_count, throttle_uptodate;
+       struct list_head throttled_list;
+ #endif /* CONFIG_CFS_BANDWIDTH */
+ #endif /* CONFIG_FAIR_GROUP_SCHED */
index f1c7d259b8f4e2e898c8420571d7db120b520d21..6db25a0668a82bfa0a69c5ce2519a4f469bc0a7b 100644 (file)
@@ -48,3 +48,5 @@ net-qmi_wwan-add-simcom-7230e.patch
 fscrypt-fix-context-consistency-check-when-key-s-unavailable.patch
 f2fs-check-entire-encrypted-bigname-when-finding-a-dentry.patch
 fscrypt-avoid-collisions-when-presenting-long-encrypted-filenames.patch
+sched-fair-do-not-announce-throttled-next-buddy-in-dequeue_task_fair.patch
+sched-fair-initialize-throttle_count-for-new-task-groups-lazily.patch