--- /dev/null
+From 37f8580bbc1aa098ef8826f4854450dc4667fb90 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Nov 2018 09:54:00 -0400
+Subject: Revert "x86/mm: Expand static page table for fixmap space"
+
+This reverts commit 3a8304b7ad2e291777e8499e39390145d932a2fd, which was
+upstream commit 05ab1d8a4b36ee912b7087c6da127439ed0a903e.
+
+Ben Hutchings writes:
+
+This backport is incorrect. The part that updated __startup_64() in
+arch/x86/kernel/head64.c was dropped, presumably because that function
+doesn't exist in 4.9. However that seems to be an essential of the
+fix. In 4.9 the startup_64 routine in arch/x86/kernel/head_64.S would
+need to be changed instead.
+
+I also found that this introduces new boot-time warnings on some
+systems if CONFIG_DEBUG_WX is enabled.
+
+So, unless someone provides fixes for those issues, I think this should
+be reverted for the 4.9 branch.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/fixmap.h | 10 ----------
+ arch/x86/include/asm/pgtable_64.h | 3 +--
+ arch/x86/kernel/head_64.S | 16 ++++------------
+ arch/x86/mm/pgtable.c | 9 ---------
+ arch/x86/xen/mmu.c | 8 ++------
+ 5 files changed, 7 insertions(+), 39 deletions(-)
+
+diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
+index 25152843dd1f..8554f960e21b 100644
+--- a/arch/x86/include/asm/fixmap.h
++++ b/arch/x86/include/asm/fixmap.h
+@@ -14,16 +14,6 @@
+ #ifndef _ASM_X86_FIXMAP_H
+ #define _ASM_X86_FIXMAP_H
+
+-/*
+- * Exposed to assembly code for setting up initial page tables. Cannot be
+- * calculated in assembly code (fixmap entries are an enum), but is sanity
+- * checked in the actual fixmap C code to make sure that the fixmap is
+- * covered fully.
+- */
+-#define FIXMAP_PMD_NUM 2
+-/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
+-#define FIXMAP_PMD_TOP 507
+-
+ #ifndef __ASSEMBLY__
+ #include <linux/kernel.h>
+ #include <asm/acpi.h>
+diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
+index d5c4df98aac3..221a32ed1372 100644
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -13,14 +13,13 @@
+ #include <asm/processor.h>
+ #include <linux/bitops.h>
+ #include <linux/threads.h>
+-#include <asm/fixmap.h>
+
+ extern pud_t level3_kernel_pgt[512];
+ extern pud_t level3_ident_pgt[512];
+ extern pmd_t level2_kernel_pgt[512];
+ extern pmd_t level2_fixmap_pgt[512];
+ extern pmd_t level2_ident_pgt[512];
+-extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
++extern pte_t level1_fixmap_pgt[512];
+ extern pgd_t init_level4_pgt[];
+
+ #define swapper_pg_dir init_level4_pgt
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index b0d6697ab153..9d72cf547c88 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -23,7 +23,6 @@
+ #include "../entry/calling.h"
+ #include <asm/export.h>
+ #include <asm/nospec-branch.h>
+-#include <asm/fixmap.h>
+
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/asm-offsets.h>
+@@ -494,20 +493,13 @@ NEXT_PAGE(level2_kernel_pgt)
+ KERNEL_IMAGE_SIZE/PMD_SIZE)
+
+ NEXT_PAGE(level2_fixmap_pgt)
+- .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
+- pgtno = 0
+- .rept (FIXMAP_PMD_NUM)
+- .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
+- + _PAGE_TABLE;
+- pgtno = pgtno + 1
+- .endr
+- /* 6 MB reserved space + a 2MB hole */
+- .fill 4,8,0
++ .fill 506,8,0
++ .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
++ /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
++ .fill 5,8,0
+
+ NEXT_PAGE(level1_fixmap_pgt)
+- .rept (FIXMAP_PMD_NUM)
+ .fill 512,8,0
+- .endr
+
+ #undef PMDS
+
+diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
+index 8cbed30feb67..e30baa8ad94f 100644
+--- a/arch/x86/mm/pgtable.c
++++ b/arch/x86/mm/pgtable.c
+@@ -536,15 +536,6 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
+ {
+ unsigned long address = __fix_to_virt(idx);
+
+-#ifdef CONFIG_X86_64
+- /*
+- * Ensure that the static initial page tables are covering the
+- * fixmap completely.
+- */
+- BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
+- (FIXMAP_PMD_NUM * PTRS_PER_PTE));
+-#endif
+-
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
+index ebceaba20ad1..c92f75f7ae33 100644
+--- a/arch/x86/xen/mmu.c
++++ b/arch/x86/xen/mmu.c
+@@ -1936,7 +1936,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+ * L3_k[511] -> level2_fixmap_pgt */
+ convert_pfn_mfn(level3_kernel_pgt);
+
+- /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
++ /* L3_k[511][506] -> level1_fixmap_pgt */
+ convert_pfn_mfn(level2_fixmap_pgt);
+ }
+ /* We get [511][511] and have Xen's version of level2_kernel_pgt */
+@@ -1970,11 +1970,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
+ set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+-
+- for (i = 0; i < FIXMAP_PMD_NUM; i++) {
+- set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
+- PAGE_KERNEL_RO);
+- }
++ set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+ /* Pin down new L4 */
+ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
+--
+2.17.1
+