]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 9 Aug 2021 11:00:03 +0000 (13:00 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 9 Aug 2021 11:00:03 +0000 (13:00 +0200)
added patches:
arm64-vdso-avoid-isb-after-reading-from-cntvct_el0.patch

queue-5.4/arm64-vdso-avoid-isb-after-reading-from-cntvct_el0.patch [new file with mode: 0644]
queue-5.4/series

diff --git a/queue-5.4/arm64-vdso-avoid-isb-after-reading-from-cntvct_el0.patch b/queue-5.4/arm64-vdso-avoid-isb-after-reading-from-cntvct_el0.patch
new file mode 100644 (file)
index 0000000..d03a073
--- /dev/null
@@ -0,0 +1,105 @@
+From 77ec462536a13d4b428a1eead725c4818a49f0b1 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will@kernel.org>
+Date: Thu, 18 Mar 2021 17:07:37 +0000
+Subject: arm64: vdso: Avoid ISB after reading from cntvct_el0
+
+From: Will Deacon <will@kernel.org>
+
+commit 77ec462536a13d4b428a1eead725c4818a49f0b1 upstream.
+
+We can avoid the expensive ISB instruction after reading the counter in
+the vDSO gettime functions by creating a fake address hazard against a
+dummy stack read, just like we do inside the kernel.
+
+Signed-off-by: Will Deacon <will@kernel.org>
+Reviewed-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Link: https://lore.kernel.org/r/20210318170738.7756-5-will@kernel.org
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Chanho Park <chanho61.park@samsung.com>
+---
+ arch/arm64/include/asm/arch_timer.h        |   21 ---------------------
+ arch/arm64/include/asm/barrier.h           |   19 +++++++++++++++++++
+ arch/arm64/include/asm/vdso/gettimeofday.h |    6 +-----
+ 3 files changed, 20 insertions(+), 26 deletions(-)
+
+--- a/arch/arm64/include/asm/arch_timer.h
++++ b/arch/arm64/include/asm/arch_timer.h
+@@ -165,25 +165,6 @@ static inline void arch_timer_set_cntkct
+       isb();
+ }
+-/*
+- * Ensure that reads of the counter are treated the same as memory reads
+- * for the purposes of ordering by subsequent memory barriers.
+- *
+- * This insanity brought to you by speculative system register reads,
+- * out-of-order memory accesses, sequence locks and Thomas Gleixner.
+- *
+- * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
+- */
+-#define arch_counter_enforce_ordering(val) do {                               \
+-      u64 tmp, _val = (val);                                          \
+-                                                                      \
+-      asm volatile(                                                   \
+-      "       eor     %0, %1, %1\n"                                   \
+-      "       add     %0, sp, %0\n"                                   \
+-      "       ldr     xzr, [%0]"                                      \
+-      : "=r" (tmp) : "r" (_val));                                     \
+-} while (0)
+-
+ static __always_inline u64 __arch_counter_get_cntpct_stable(void)
+ {
+       u64 cnt;
+@@ -224,8 +205,6 @@ static __always_inline u64 __arch_counte
+       return cnt;
+ }
+-#undef arch_counter_enforce_ordering
+-
+ static inline int arch_timer_arch_init(void)
+ {
+       return 0;
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -57,6 +57,25 @@ static inline unsigned long array_index_
+       return mask;
+ }
++/*
++ * Ensure that reads of the counter are treated the same as memory reads
++ * for the purposes of ordering by subsequent memory barriers.
++ *
++ * This insanity brought to you by speculative system register reads,
++ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
++ *
++ * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
++ */
++#define arch_counter_enforce_ordering(val) do {                               \
++      u64 tmp, _val = (val);                                          \
++                                                                      \
++      asm volatile(                                                   \
++      "       eor     %0, %1, %1\n"                                   \
++      "       add     %0, sp, %0\n"                                   \
++      "       ldr     xzr, [%0]"                                      \
++      : "=r" (tmp) : "r" (_val));                                     \
++} while (0)
++
+ #define __smp_mb()    dmb(ish)
+ #define __smp_rmb()   dmb(ishld)
+ #define __smp_wmb()   dmb(ishst)
+--- a/arch/arm64/include/asm/vdso/gettimeofday.h
++++ b/arch/arm64/include/asm/vdso/gettimeofday.h
+@@ -85,11 +85,7 @@ static __always_inline u64 __arch_get_hw
+        */
+       isb();
+       asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
+-      /*
+-       * This isb() is required to prevent that the seq lock is
+-       * speculated.#
+-       */
+-      isb();
++      arch_counter_enforce_ordering(res);
+       return res;
+ }
index b0b48639ae648b7a3c2a3bc785696446fbe2059a..50d46b6dd2b7f69c0e87e6c32af0f4055bafdf0a 100644 (file)
@@ -70,3 +70,4 @@ md-raid10-properly-indicate-failure-when-ending-a-failed-write-request.patch
 kvm-x86-accept-userspace-interrupt-only-if-no-event-is-injected.patch
 kvm-do-not-leak-memory-for-duplicate-debugfs-directories.patch
 kvm-x86-mmu-fix-per-cpu-counter-corruption-on-32-bit-builds.patch
+arm64-vdso-avoid-isb-after-reading-from-cntvct_el0.patch