x86-documentation-add-pti-description.patch
x86-cpufeatures-add-x86_bug_spectre_v.patch
sysfs-cpu-add-vulnerability-folder.patch
+x86-cpu-implement-cpu-vulnerabilites-sysfs-functions.patch
+x86-tboot-unbreak-tboot-with-pti-enabled.patch
+x86-mm-pti-remove-dead-logic-in-pti_user_pagetable_walk.patch
+x86-cpu-amd-make-lfence-a-serializing-instruction.patch
+x86-cpu-amd-use-lfence_rdtsc-in-preference-to-mfence_rdtsc.patch
+sysfs-cpu-fix-typos-in-vulnerability-documentation.patch
+x86-alternatives-fix-optimize_nops-checking.patch
--- /dev/null
+From 9ecccfaa7cb5249bd31bdceb93fcf5bedb8a24d8 Mon Sep 17 00:00:00 2001
+From: David Woodhouse <dwmw@amazon.co.uk>
+Date: Tue, 9 Jan 2018 15:02:51 +0000
+Subject: sysfs/cpu: Fix typos in vulnerability documentation
+
+From: David Woodhouse <dwmw@amazon.co.uk>
+
+commit 9ecccfaa7cb5249bd31bdceb93fcf5bedb8a24d8 upstream.
+
+Fixes: 87590ce6e ("sysfs/cpu: Add vulnerability folder")
+Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -378,7 +378,7 @@ What: /sys/devices/system/cpu/vulnerabi
+ /sys/devices/system/cpu/vulnerabilities/meltdown
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
+-Date: Januar 2018
++Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
+
+@@ -388,4 +388,4 @@ Description: Information about CPU vulne
+
+ "Not affected" CPU is not affected by the vulnerability
+ "Vulnerable" CPU is affected and no mitigation in effect
+- "Mitigation: $M" CPU is affetcted and mitigation $M is in effect
++ "Mitigation: $M" CPU is affected and mitigation $M is in effect
--- /dev/null
+From 612e8e9350fd19cae6900cf36ea0c6892d1a0dca Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 10 Jan 2018 12:28:16 +0100
+Subject: x86/alternatives: Fix optimize_nops() checking
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 612e8e9350fd19cae6900cf36ea0c6892d1a0dca upstream.
+
+The alternatives code checks only the first byte whether it is a NOP, but
+with NOPs in front of the payload and having actual instructions after it
+breaks the "optimized' test.
+
+Make sure to scan all bytes before deciding to optimize the NOPs in there.
+
+Reported-by: David Woodhouse <dwmw2@infradead.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Jiri Kosina <jikos@kernel.org>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: Andrew Lutomirski <luto@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/20180110112815.mgciyf5acwacphkq@pd.tnic
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/alternative.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -344,9 +344,12 @@ done:
+ static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
+ {
+ unsigned long flags;
++ int i;
+
+- if (instr[0] != 0x90)
+- return;
++ for (i = 0; i < a->padlen; i++) {
++ if (instr[i] != 0x90)
++ return;
++ }
+
+ local_irq_save(flags);
+ add_nops(instr + (a->instrlen - a->padlen), a->padlen);
--- /dev/null
+From e4d0e84e490790798691aaa0f2e598637f1867ec Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 8 Jan 2018 16:09:21 -0600
+Subject: x86/cpu/AMD: Make LFENCE a serializing instruction
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit e4d0e84e490790798691aaa0f2e598637f1867ec upstream.
+
+To aid in speculation control, make LFENCE a serializing instruction
+since it has less overhead than MFENCE. This is done by setting bit 1
+of MSR 0xc0011029 (DE_CFG). Some families that support LFENCE do not
+have this MSR. For these families, the LFENCE instruction is already
+serializing.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/20180108220921.12580.71694.stgit@tlendack-t1.amdoffice.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/msr-index.h | 2 ++
+ arch/x86/kernel/cpu/amd.c | 10 ++++++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -352,6 +352,8 @@
+ #define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
+ #define FAM10H_MMIO_CONF_BASE_SHIFT 20
+ #define MSR_FAM10H_NODE_ID 0xc001100c
++#define MSR_F10H_DECFG 0xc0011029
++#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
+
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1 0xc001001a
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -829,6 +829,16 @@ static void init_amd(struct cpuinfo_x86
+ set_cpu_cap(c, X86_FEATURE_K8);
+
+ if (cpu_has(c, X86_FEATURE_XMM2)) {
++ /*
++ * A serializing LFENCE has less overhead than MFENCE, so
++ * use it for execution serialization. On families which
++ * don't have that MSR, LFENCE is already serializing.
++ * msr_set_bit() uses the safe accessors, too, even if the MSR
++ * is not present.
++ */
++ msr_set_bit(MSR_F10H_DECFG,
++ MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
++
+ /* MFENCE stops RDTSC speculation */
+ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
+ }
--- /dev/null
+From 9c6a73c75864ad9fa49e5fa6513e4c4071c0e29f Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 8 Jan 2018 16:09:32 -0600
+Subject: x86/cpu/AMD: Use LFENCE_RDTSC in preference to MFENCE_RDTSC
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+commit 9c6a73c75864ad9fa49e5fa6513e4c4071c0e29f upstream.
+
+With LFENCE now a serializing instruction, use LFENCE_RDTSC in preference
+to MFENCE_RDTSC. However, since the kernel could be running under a
+hypervisor that does not support writing that MSR, read the MSR back and
+verify that the bit has been set successfully. If the MSR can be read
+and the bit is set, then set the LFENCE_RDTSC feature, otherwise set the
+MFENCE_RDTSC feature.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/20180108220932.12580.52458.stgit@tlendack-t1.amdoffice.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/msr-index.h | 1 +
+ arch/x86/kernel/cpu/amd.c | 18 ++++++++++++++++--
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -354,6 +354,7 @@
+ #define MSR_FAM10H_NODE_ID 0xc001100c
+ #define MSR_F10H_DECFG 0xc0011029
+ #define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
++#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
+
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1 0xc001001a
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -829,6 +829,9 @@ static void init_amd(struct cpuinfo_x86
+ set_cpu_cap(c, X86_FEATURE_K8);
+
+ if (cpu_has(c, X86_FEATURE_XMM2)) {
++ unsigned long long val;
++ int ret;
++
+ /*
+ * A serializing LFENCE has less overhead than MFENCE, so
+ * use it for execution serialization. On families which
+@@ -839,8 +842,19 @@ static void init_amd(struct cpuinfo_x86
+ msr_set_bit(MSR_F10H_DECFG,
+ MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
+
+- /* MFENCE stops RDTSC speculation */
+- set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
++ /*
++ * Verify that the MSR write was successful (could be running
++ * under a hypervisor) and only then assume that LFENCE is
++ * serializing.
++ */
++ ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
++ if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
++ /* A serializing LFENCE stops RDTSC speculation */
++ set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
++ } else {
++ /* MFENCE stops RDTSC speculation */
++ set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
++ }
+ }
+
+ /*
--- /dev/null
+From 61dc0f555b5c761cdafb0ba5bd41ecf22d68a4c4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 7 Jan 2018 22:48:01 +0100
+Subject: x86/cpu: Implement CPU vulnerabilites sysfs functions
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 61dc0f555b5c761cdafb0ba5bd41ecf22d68a4c4 upstream.
+
+Implement the CPU vulnerabilty show functions for meltdown, spectre_v1 and
+spectre_v2.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Linus Torvalds <torvalds@linuxfoundation.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Link: https://lkml.kernel.org/r/20180107214913.177414879@linutronix.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/Kconfig | 1 +
+ arch/x86/kernel/cpu/bugs.c | 29 +++++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -89,6 +89,7 @@ config X86
+ select GENERIC_CLOCKEVENTS_MIN_ADJUST
+ select GENERIC_CMOS_UPDATE
+ select GENERIC_CPU_AUTOPROBE
++ select GENERIC_CPU_VULNERABILITIES
+ select GENERIC_EARLY_IOREMAP
+ select GENERIC_FIND_FIRST_BIT
+ select GENERIC_IOMAP
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -10,6 +10,7 @@
+ */
+ #include <linux/init.h>
+ #include <linux/utsname.h>
++#include <linux/cpu.h>
+ #include <asm/bugs.h>
+ #include <asm/processor.h>
+ #include <asm/processor-flags.h>
+@@ -60,3 +61,31 @@ void __init check_bugs(void)
+ set_memory_4k((unsigned long)__va(0), 1);
+ #endif
+ }
++
++#ifdef CONFIG_SYSFS
++ssize_t cpu_show_meltdown(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
++ return sprintf(buf, "Not affected\n");
++ if (boot_cpu_has(X86_FEATURE_PTI))
++ return sprintf(buf, "Mitigation: PTI\n");
++ return sprintf(buf, "Vulnerable\n");
++}
++
++ssize_t cpu_show_spectre_v1(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
++ return sprintf(buf, "Not affected\n");
++ return sprintf(buf, "Vulnerable\n");
++}
++
++ssize_t cpu_show_spectre_v2(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
++ return sprintf(buf, "Not affected\n");
++ return sprintf(buf, "Vulnerable\n");
++}
++#endif
--- /dev/null
+From 8d56eff266f3e41a6c39926269c4c3f58f881a8e Mon Sep 17 00:00:00 2001
+From: Jike Song <albcamus@gmail.com>
+Date: Tue, 9 Jan 2018 00:03:41 +0800
+Subject: x86/mm/pti: Remove dead logic in pti_user_pagetable_walk*()
+
+From: Jike Song <albcamus@gmail.com>
+
+commit 8d56eff266f3e41a6c39926269c4c3f58f881a8e upstream.
+
+The following code contains dead logic:
+
+ 162 if (pgd_none(*pgd)) {
+ 163 unsigned long new_p4d_page = __get_free_page(gfp);
+ 164 if (!new_p4d_page)
+ 165 return NULL;
+ 166
+ 167 if (pgd_none(*pgd)) {
+ 168 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
+ 169 new_p4d_page = 0;
+ 170 }
+ 171 if (new_p4d_page)
+ 172 free_page(new_p4d_page);
+ 173 }
+
+There can't be any difference between two pgd_none(*pgd) at L162 and L167,
+so it's always false at L171.
+
+Dave Hansen explained:
+
+ Yes, the double-test was part of an optimization where we attempted to
+ avoid using a global spinlock in the fork() path. We would check for
+ unallocated mid-level page tables without the lock. The lock was only
+ taken when we needed to *make* an entry to avoid collisions.
+
+ Now that it is all single-threaded, there is no chance of a collision,
+ no need for a lock, and no need for the re-check.
+
+As all these functions are only called during init, mark them __init as
+well.
+
+Fixes: 03f4424f348e ("x86/mm/pti: Add functions to clone kernel PMDs")
+Signed-off-by: Jike Song <albcamus@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Alan Cox <gnomes@lxorguk.ukuu.org.uk>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Jiri Koshina <jikos@kernel.org>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Kees Cook <keescook@google.com>
+Cc: Andi Lutomirski <luto@amacapital.net>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg KH <gregkh@linux-foundation.org>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Paul Turner <pjt@google.com>
+Link: https://lkml.kernel.org/r/20180108160341.3461-1-albcamus@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/mm/pti.c | 32 ++++++--------------------------
+ 1 file changed, 6 insertions(+), 26 deletions(-)
+
+--- a/arch/x86/mm/pti.c
++++ b/arch/x86/mm/pti.c
+@@ -149,7 +149,7 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pg
+ *
+ * Returns a pointer to a P4D on success, or NULL on failure.
+ */
+-static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
++static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
+ {
+ pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
+ gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+@@ -164,12 +164,7 @@ static p4d_t *pti_user_pagetable_walk_p4
+ if (!new_p4d_page)
+ return NULL;
+
+- if (pgd_none(*pgd)) {
+- set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
+- new_p4d_page = 0;
+- }
+- if (new_p4d_page)
+- free_page(new_p4d_page);
++ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
+ }
+ BUILD_BUG_ON(pgd_large(*pgd) != 0);
+
+@@ -182,7 +177,7 @@ static p4d_t *pti_user_pagetable_walk_p4
+ *
+ * Returns a pointer to a PMD on success, or NULL on failure.
+ */
+-static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
++static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
+ {
+ gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+ p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
+@@ -194,12 +189,7 @@ static pmd_t *pti_user_pagetable_walk_pm
+ if (!new_pud_page)
+ return NULL;
+
+- if (p4d_none(*p4d)) {
+- set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
+- new_pud_page = 0;
+- }
+- if (new_pud_page)
+- free_page(new_pud_page);
++ set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
+ }
+
+ pud = pud_offset(p4d, address);
+@@ -213,12 +203,7 @@ static pmd_t *pti_user_pagetable_walk_pm
+ if (!new_pmd_page)
+ return NULL;
+
+- if (pud_none(*pud)) {
+- set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
+- new_pmd_page = 0;
+- }
+- if (new_pmd_page)
+- free_page(new_pmd_page);
++ set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
+ }
+
+ return pmd_offset(pud, address);
+@@ -251,12 +236,7 @@ static __init pte_t *pti_user_pagetable_
+ if (!new_pte_page)
+ return NULL;
+
+- if (pmd_none(*pmd)) {
+- set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
+- new_pte_page = 0;
+- }
+- if (new_pte_page)
+- free_page(new_pte_page);
++ set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
+ }
+
+ pte = pte_offset_kernel(pmd, address);
--- /dev/null
+From 262b6b30087246abf09d6275eb0c0dc421bcbe38 Mon Sep 17 00:00:00 2001
+From: Dave Hansen <dave.hansen@linux.intel.com>
+Date: Sat, 6 Jan 2018 18:41:14 +0100
+Subject: x86/tboot: Unbreak tboot with PTI enabled
+
+From: Dave Hansen <dave.hansen@linux.intel.com>
+
+commit 262b6b30087246abf09d6275eb0c0dc421bcbe38 upstream.
+
+This is another case similar to what EFI does: create a new set of
+page tables, map some code at a low address, and jump to it. PTI
+mistakes this low address for userspace and mistakenly marks it
+non-executable in an effort to make it unusable for userspace.
+
+Undo the poison to allow execution.
+
+Fixes: 385ce0ea4c07 ("x86/mm/pti: Add Kconfig")
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Alan Cox <gnomes@lxorguk.ukuu.org.uk>
+Cc: Tim Chen <tim.c.chen@linux.intel.com>
+Cc: Jon Masters <jcm@redhat.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: Jeff Law <law@redhat.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linux-foundation.org>
+Cc: David" <dwmw@amazon.co.uk>
+Cc: Nick Clifton <nickc@redhat.com>
+Link: https://lkml.kernel.org/r/20180108102805.GK25546@redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/tboot.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/kernel/tboot.c
++++ b/arch/x86/kernel/tboot.c
+@@ -127,6 +127,7 @@ static int map_tboot_page(unsigned long
+ p4d = p4d_alloc(&tboot_mm, pgd, vaddr);
+ if (!p4d)
+ return -1;
++ pgd->pgd &= ~_PAGE_NX;
+ pud = pud_alloc(&tboot_mm, p4d, vaddr);
+ if (!pud)
+ return -1;