]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 12 Sep 2018 19:41:36 +0000 (21:41 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 12 Sep 2018 19:41:36 +0000 (21:41 +0200)
added patches:
x86-pae-use-64-bit-atomic-xchg-function-in-native_ptep_get_and_clear.patch

queue-4.4/series
queue-4.4/x86-pae-use-64-bit-atomic-xchg-function-in-native_ptep_get_and_clear.patch [new file with mode: 0644]

index 9efe99b43428a7e829631b237a79964eae2bc0bc..d21b13fb104ed98fe3f62144d19cd113a1f8582e 100644 (file)
@@ -33,3 +33,4 @@ btrfs-replace-reset-on-disk-dev-stats-value-after-replace.patch
 btrfs-relocation-only-remove-reloc-rb_trees-if-reloc-control-has-been-initialized.patch
 btrfs-don-t-remove-block-group-that-still-has-pinned-down-bytes.patch
 debugobjects-make-stack-check-warning-more-informative.patch
+x86-pae-use-64-bit-atomic-xchg-function-in-native_ptep_get_and_clear.patch
diff --git a/queue-4.4/x86-pae-use-64-bit-atomic-xchg-function-in-native_ptep_get_and_clear.patch b/queue-4.4/x86-pae-use-64-bit-atomic-xchg-function-in-native_ptep_get_and_clear.patch
new file mode 100644 (file)
index 0000000..e161cae
--- /dev/null
@@ -0,0 +1,62 @@
+From b2d7a075a1ccef2fb321d595802190c8e9b39004 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Tue, 21 Aug 2018 17:37:55 +0200
+Subject: x86/pae: use 64 bit atomic xchg function in native_ptep_get_and_clear
+
+From: Juergen Gross <jgross@suse.com>
+
+commit b2d7a075a1ccef2fb321d595802190c8e9b39004 upstream.
+
+Using only 32-bit writes for the pte will result in an intermediate
+L1TF vulnerable PTE. When running as a Xen PV guest this will at once
+switch the guest to shadow mode resulting in a loss of performance.
+
+Use arch_atomic64_xchg() instead which will perform the requested
+operation atomically with all 64 bits.
+
+Some performance considerations according to:
+
+https://software.intel.com/sites/default/files/managed/ad/dc/Intel-Xeon-Scalable-Processor-throughput-latency.pdf
+
+The main number should be the latency, as there is no tight loop around
+native_ptep_get_and_clear().
+
+"lock cmpxchg8b" has a latency of 20 cycles, while "lock xchg" (with a
+memory operand) isn't mentioned in that document. "lock xadd" (with xadd
+having 3 cycles less latency than xchg) has a latency of 11, so we can
+assume a latency of 14 for "lock xchg".
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Tested-by: Jason Andryuk <jandryuk@gmail.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/pgtable-3level.h |    7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -1,6 +1,8 @@
+ #ifndef _ASM_X86_PGTABLE_3LEVEL_H
+ #define _ASM_X86_PGTABLE_3LEVEL_H
++#include <asm/atomic64_32.h>
++
+ /*
+  * Intel Physical Address Extension (PAE) Mode - three-level page
+  * tables on PPro+ CPUs.
+@@ -142,10 +144,7 @@ static inline pte_t native_ptep_get_and_
+ {
+       pte_t res;
+-      /* xchg acts as a barrier before the setting of the high bits */
+-      res.pte_low = xchg(&ptep->pte_low, 0);
+-      res.pte_high = ptep->pte_high;
+-      ptep->pte_high = 0;
++      res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
+       return res;
+ }