drm-amdgpu-warn-and-update-pin_size-values-when-destroying-a-pinned-bo.patch
drm-amdgpu-don-t-warn-on-destroying-a-pinned-bo.patch
debugobjects-make-stack-check-warning-more-informative.patch
+x86-pae-use-64-bit-atomic-xchg-function-in-native_ptep_get_and_clear.patch
+x86-xen-don-t-write-ptes-directly-in-32-bit-pv-guests.patch
--- /dev/null
+From b2d7a075a1ccef2fb321d595802190c8e9b39004 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Tue, 21 Aug 2018 17:37:55 +0200
+Subject: x86/pae: use 64 bit atomic xchg function in native_ptep_get_and_clear
+
+From: Juergen Gross <jgross@suse.com>
+
+commit b2d7a075a1ccef2fb321d595802190c8e9b39004 upstream.
+
+Using only 32-bit writes for the pte will result in an intermediate
+L1TF vulnerable PTE. When running as a Xen PV guest this will at once
+switch the guest to shadow mode resulting in a loss of performance.
+
+Use arch_atomic64_xchg() instead which will perform the requested
+operation atomically with all 64 bits.
+
+Some performance considerations according to:
+
+https://software.intel.com/sites/default/files/managed/ad/dc/Intel-Xeon-Scalable-Processor-throughput-latency.pdf
+
+The main number should be the latency, as there is no tight loop around
+native_ptep_get_and_clear().
+
+"lock cmpxchg8b" has a latency of 20 cycles, while "lock xchg" (with a
+memory operand) isn't mentioned in that document. "lock xadd" (with xadd
+having 3 cycles less latency than xchg) has a latency of 11, so we can
+assume a latency of 14 for "lock xchg".
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Tested-by: Jason Andryuk <jandryuk@gmail.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/pgtable-3level.h | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -2,6 +2,8 @@
+ #ifndef _ASM_X86_PGTABLE_3LEVEL_H
+ #define _ASM_X86_PGTABLE_3LEVEL_H
+
++#include <asm/atomic64_32.h>
++
+ /*
+ * Intel Physical Address Extension (PAE) Mode - three-level page
+ * tables on PPro+ CPUs.
+@@ -147,10 +149,7 @@ static inline pte_t native_ptep_get_and_
+ {
+ pte_t res;
+
+- /* xchg acts as a barrier before the setting of the high bits */
+- res.pte_low = xchg(&ptep->pte_low, 0);
+- res.pte_high = ptep->pte_high;
+- ptep->pte_high = 0;
++ res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
+
+ return res;
+ }
--- /dev/null
+From f7c90c2aa4004808dff777ba6ae2c7294dd06851 Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Tue, 21 Aug 2018 17:37:54 +0200
+Subject: x86/xen: don't write ptes directly in 32-bit PV guests
+
+From: Juergen Gross <jgross@suse.com>
+
+commit f7c90c2aa4004808dff777ba6ae2c7294dd06851 upstream.
+
+In some cases 32-bit PAE PV guests still write PTEs directly instead of
+using hypercalls. This is especially bad when clearing a PTE as this is
+done via 32-bit writes which will produce intermediate L1TF attackable
+PTEs.
+
+Change the code to use hypercalls instead.
+
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/mmu_pv.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -434,14 +434,13 @@ static void xen_set_pud(pud_t *ptr, pud_
+ static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
+ {
+ trace_xen_mmu_set_pte_atomic(ptep, pte);
+- set_64bit((u64 *)ptep, native_pte_val(pte));
++ __xen_set_pte(ptep, pte);
+ }
+
+ static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ trace_xen_mmu_pte_clear(mm, addr, ptep);
+- if (!xen_batched_set_pte(ptep, native_make_pte(0)))
+- native_pte_clear(mm, addr, ptep);
++ __xen_set_pte(ptep, native_make_pte(0));
+ }
+
+ static void xen_pmd_clear(pmd_t *pmdp)
+@@ -1571,7 +1570,7 @@ static void __init xen_set_pte_init(pte_
+ pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+ pte_val_ma(pte));
+ #endif
+- native_set_pte(ptep, pte);
++ __xen_set_pte(ptep, pte);
+ }
+
+ /* Early in boot, while setting up the initial pagetable, assume