]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
patches for 5.0
authorSasha Levin <sashal@kernel.org>
Wed, 24 Apr 2019 01:27:05 +0000 (21:27 -0400)
committerSasha Levin <sashal@kernel.org>
Wed, 24 Apr 2019 01:27:05 +0000 (21:27 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.0/revert-kbuild-use-oz-instead-of-os-when-using-clang.patch [new file with mode: 0644]
queue-5.0/sched-fair-limit-sched_cfs_period_timer-loop-to-avoi.patch [new file with mode: 0644]
queue-5.0/series
queue-5.0/tpm-fix-an-invalid-condition-in-tpm_common_poll.patch [new file with mode: 0644]

diff --git a/queue-5.0/revert-kbuild-use-oz-instead-of-os-when-using-clang.patch b/queue-5.0/revert-kbuild-use-oz-instead-of-os-when-using-clang.patch
new file mode 100644 (file)
index 0000000..28c7e66
--- /dev/null
@@ -0,0 +1,41 @@
+From 3b0a66d92292e3f7927ddc3a0e379d892b443510 Mon Sep 17 00:00:00 2001
+From: Matthias Kaehlcke <mka@chromium.org>
+Date: Tue, 23 Apr 2019 12:04:26 -0700
+Subject: Revert "kbuild: use -Oz instead of -Os when using clang"
+
+commit a75bb4eb9e565b9f5115e2e8c07377ce32cbe69a upstream.
+
+The clang option -Oz enables *aggressive* optimization for size,
+which doesn't necessarily result in smaller images, but can have
+negative impact on performance. Switch back to the less aggressive
+-Os.
+
+This reverts commit 6748cb3c299de1ffbe56733647b01dbcc398c419.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Matthias Kaehlcke <mka@chromium.org>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Makefile | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index ef192ca04330..807ae0e3ff6e 100644
+--- a/Makefile
++++ b/Makefile
+@@ -678,8 +678,7 @@ KBUILD_CFLAGS      += $(call cc-disable-warning, format-overflow)
+ KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+-KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
+-KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
++KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
+ else
+ ifdef CONFIG_PROFILE_ALL_BRANCHES
+ KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
+-- 
+2.19.1
+
diff --git a/queue-5.0/sched-fair-limit-sched_cfs_period_timer-loop-to-avoi.patch b/queue-5.0/sched-fair-limit-sched_cfs_period_timer-loop-to-avoi.patch
new file mode 100644 (file)
index 0000000..3acd24a
--- /dev/null
@@ -0,0 +1,101 @@
+From f9e96373235d232013d54a4c84054e03a0bb61e0 Mon Sep 17 00:00:00 2001
+From: Phil Auld <pauld@redhat.com>
+Date: Tue, 23 Apr 2019 19:51:06 -0400
+Subject: sched/fair: Limit sched_cfs_period_timer() loop to avoid hard lockup
+
+[ Upstream commit 2e8e19226398db8265a8e675fcc0118b9e80c9e8 ]
+
+With extremely short cfs_period_us setting on a parent task group with a large
+number of children the for loop in sched_cfs_period_timer() can run until the
+watchdog fires. There is no guarantee that the call to hrtimer_forward_now()
+will ever return 0.  The large number of children can make
+do_sched_cfs_period_timer() take longer than the period.
+
+ NMI watchdog: Watchdog detected hard LOCKUP on cpu 24
+ RIP: 0010:tg_nop+0x0/0x10
+  <IRQ>
+  walk_tg_tree_from+0x29/0xb0
+  unthrottle_cfs_rq+0xe0/0x1a0
+  distribute_cfs_runtime+0xd3/0xf0
+  sched_cfs_period_timer+0xcb/0x160
+  ? sched_cfs_slack_timer+0xd0/0xd0
+  __hrtimer_run_queues+0xfb/0x270
+  hrtimer_interrupt+0x122/0x270
+  smp_apic_timer_interrupt+0x6a/0x140
+  apic_timer_interrupt+0xf/0x20
+  </IRQ>
+
+To prevent this we add protection to the loop that detects when the loop has run
+too many times and scales the period and quota up, proportionally, so that the timer
+can complete before then next period expires.  This preserves the relative runtime
+quota while preventing the hard lockup.
+
+A warning is issued reporting this state and the new values.
+
+Signed-off-by: Phil Auld <pauld@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: <stable@vger.kernel.org>
+Cc: Anton Blanchard <anton@ozlabs.org>
+Cc: Ben Segall <bsegall@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lkml.kernel.org/r/20190319130005.25492-1-pauld@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 5e61a1a99e38..eeb605656d59 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4859,12 +4859,15 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
+       return HRTIMER_NORESTART;
+ }
++extern const u64 max_cfs_quota_period;
++
+ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
+ {
+       struct cfs_bandwidth *cfs_b =
+               container_of(timer, struct cfs_bandwidth, period_timer);
+       int overrun;
+       int idle = 0;
++      int count = 0;
+       raw_spin_lock(&cfs_b->lock);
+       for (;;) {
+@@ -4872,6 +4875,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
+               if (!overrun)
+                       break;
++              if (++count > 3) {
++                      u64 new, old = ktime_to_ns(cfs_b->period);
++
++                      new = (old * 147) / 128; /* ~115% */
++                      new = min(new, max_cfs_quota_period);
++
++                      cfs_b->period = ns_to_ktime(new);
++
++                      /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
++                      cfs_b->quota *= new;
++                      cfs_b->quota = div64_u64(cfs_b->quota, old);
++
++                      pr_warn_ratelimited(
++        "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
++                              smp_processor_id(),
++                              div_u64(new, NSEC_PER_USEC),
++                                div_u64(cfs_b->quota, NSEC_PER_USEC));
++
++                      /* reset count so we don't come right back in here */
++                      count = 0;
++              }
++
+               idle = do_sched_cfs_period_timer(cfs_b, overrun);
+       }
+       if (idle)
+-- 
+2.19.1
+
index bf135f48522981682db5475a7ab0b7cd7f8adf4b..dbba7563925aa94a6ad9beecc363c9dd9b003f60 100644 (file)
@@ -103,3 +103,6 @@ nfit-ars-allow-root-to-busy-poll-the-ars-state-machi.patch
 nfit-ars-avoid-stale-ars-results.patch
 tpm-tpm_i2c_atmel-return-e2big-when-the-transfer-is-.patch
 tpm-fix-the-type-of-the-return-value-in-calc_tpm2_ev.patch
+revert-kbuild-use-oz-instead-of-os-when-using-clang.patch
+sched-fair-limit-sched_cfs_period_timer-loop-to-avoi.patch
+tpm-fix-an-invalid-condition-in-tpm_common_poll.patch
diff --git a/queue-5.0/tpm-fix-an-invalid-condition-in-tpm_common_poll.patch b/queue-5.0/tpm-fix-an-invalid-condition-in-tpm_common_poll.patch
new file mode 100644 (file)
index 0000000..684099a
--- /dev/null
@@ -0,0 +1,61 @@
+From 071caea626ee7420b3003d908fdf922e99dd9004 Mon Sep 17 00:00:00 2001
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+Date: Wed, 27 Mar 2019 11:32:38 -0700
+Subject: tpm: fix an invalid condition in tpm_common_poll
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+[ Upstream commit 7110629263469b4664d00b38ef80a656eddf3637 ]
+
+The poll condition should only check response_length,
+because reads should only be issued if there is data to read.
+The response_read flag only prevents double writes.
+The problem was that the write set the response_read to false,
+enqued a tpm job, and returned. Then application called poll
+which checked the response_read flag and returned EPOLLIN.
+Then the application called read, but got nothing.
+After all that the async_work kicked in.
+Added also mutex_lock around the poll check to prevent
+other possible race conditions.
+
+Fixes: 9488585b21bef0df12 ("tpm: add support for partial reads")
+Reported-by: Mantas Mikulėnas <grawity@gmail.com>
+Tested-by: Mantas Mikulėnas <grawity@gmail.com>
+Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: James Morris <james.morris@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/tpm/tpm-dev-common.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
+index 5eecad233ea1..744b0237300a 100644
+--- a/drivers/char/tpm/tpm-dev-common.c
++++ b/drivers/char/tpm/tpm-dev-common.c
+@@ -203,12 +203,19 @@ __poll_t tpm_common_poll(struct file *file, poll_table *wait)
+       __poll_t mask = 0;
+       poll_wait(file, &priv->async_wait, wait);
++      mutex_lock(&priv->buffer_mutex);
+-      if (!priv->response_read || priv->response_length)
++      /*
++       * The response_length indicates if there is still response
++       * (or part of it) to be consumed. Partial reads decrease it
++       * by the number of bytes read, and write resets it the zero.
++       */
++      if (priv->response_length)
+               mask = EPOLLIN | EPOLLRDNORM;
+       else
+               mask = EPOLLOUT | EPOLLWRNORM;
++      mutex_unlock(&priv->buffer_mutex);
+       return mask;
+ }
+-- 
+2.19.1
+