]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 23 Nov 2020 11:33:10 +0000 (12:33 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 23 Nov 2020 11:33:10 +0000 (12:33 +0100)
added patches:
sched-fair-fix-overutilized-update-in-enqueue_task_fair.patch

queue-5.4/sched-fair-fix-overutilized-update-in-enqueue_task_fair.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/sched-fair-fix-overutilized-update-in-enqueue_task_fair.patch b/queue-5.4/sched-fair-fix-overutilized-update-in-enqueue_task_fair.patch
new file mode 100644 (file)
index 0000000..06339f2
--- /dev/null
@@ -0,0 +1,48 @@
+From 8e1ac4299a6e8726de42310d9c1379f188140c71 Mon Sep 17 00:00:00 2001
+From: Quentin Perret <qperret@google.com>
+Date: Thu, 12 Nov 2020 11:12:01 +0000
+Subject: sched/fair: Fix overutilized update in enqueue_task_fair()
+
+From: Quentin Perret <qperret@google.com>
+
+commit 8e1ac4299a6e8726de42310d9c1379f188140c71 upstream.
+
+enqueue_task_fair() attempts to skip the overutilized update for new
+tasks as their util_avg is not accurate yet. However, the flag we check
+to do so is overwritten earlier on in the function, which makes the
+condition pretty much a nop.
+
+Fix this by saving the flag early on.
+
+Fixes: 2802bf3cd936 ("sched/fair: Add over-utilization/tipping point indicator")
+Reported-by: Rick Yiu <rickyiu@google.com>
+Signed-off-by: Quentin Perret <qperret@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
+Link: https://lkml.kernel.org/r/20201112111201.2081902-1-qperret@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5228,6 +5228,7 @@ enqueue_task_fair(struct rq *rq, struct
+       struct cfs_rq *cfs_rq;
+       struct sched_entity *se = &p->se;
+       int idle_h_nr_running = task_has_idle_policy(p);
++      int task_new = !(flags & ENQUEUE_WAKEUP);
+       /*
+        * The code below (indirectly) updates schedutil which looks at
+@@ -5299,7 +5300,7 @@ enqueue_throttle:
+                * into account, but that is not straightforward to implement,
+                * and the following generally works well enough in practice.
+                */
+-              if (flags & ENQUEUE_WAKEUP)
++              if (!task_new)
+                       update_overutilized_status(rq);
+       }
index 17f4db08e4960278249b3aa9c00b78b2d52f1c42..27c4951a67f8c123be96a730bca09e7224db3a1d 100644 (file)
@@ -155,3 +155,4 @@ x86-microcode-intel-check-patch-signature-before-saving-microcode-for-early-load
 mm-memcg-slab-fix-root-memcg-vmstats.patch
 mm-userfaultfd-do-not-access-vma-vm_mm-after-calling-handle_userfault.patch
 mm-page_alloc-skip-waternark_boost-for-atomic-order-0-allocations.patch
+sched-fair-fix-overutilized-update-in-enqueue_task_fair.patch