]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 14 Dec 2017 20:27:03 +0000 (21:27 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 14 Dec 2017 20:27:03 +0000 (21:27 +0100)
added patches:
revert-x86-efi-build-our-own-page-table-structures.patch
revert-x86-efi-hoist-page-table-switching-code-into-efi_call_virt.patch
revert-x86-mm-pat-ensure-cpa-pfn-only-contains-page-frame-numbers.patch

queue-4.4/revert-x86-efi-build-our-own-page-table-structures.patch [new file with mode: 0644]
queue-4.4/revert-x86-efi-hoist-page-table-switching-code-into-efi_call_virt.patch [new file with mode: 0644]
queue-4.4/revert-x86-mm-pat-ensure-cpa-pfn-only-contains-page-frame-numbers.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/revert-x86-efi-build-our-own-page-table-structures.patch b/queue-4.4/revert-x86-efi-build-our-own-page-table-structures.patch
new file mode 100644 (file)
index 0000000..a192665
--- /dev/null
@@ -0,0 +1,285 @@
+From foo@baz Thu Dec 14 21:26:14 CET 2017
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Thu, 14 Dec 2017 21:21:50 +0100
+Subject: Revert "x86/efi: Build our own page table structures"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 36e0f05afd4e1d09fd47936761a502aedbc50649 which is
+commit 67a9108ed4313b85a9c53406d80dc1ae3f8c3e36 upstream.
+
+Turns there was too many other issues with this patch to make it viable
+for the stable tree.
+
+Reported-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Jones <davej@codemonkey.org.uk>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Stephen Smalley <sds@tycho.nsa.gov>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: linux-efi@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: "Ghannam, Yazen" <Yazen.Ghannam@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/efi.h     |    1 
+ arch/x86/platform/efi/efi.c    |   39 ++++++++++------
+ arch/x86/platform/efi/efi_32.c |    5 --
+ arch/x86/platform/efi/efi_64.c |   97 ++++++-----------------------------------
+ 4 files changed, 40 insertions(+), 102 deletions(-)
+
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -136,7 +136,6 @@ extern void __init efi_memory_uc(u64 add
+ extern void __init efi_map_region(efi_memory_desc_t *md);
+ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
+ extern void efi_sync_low_kernel_mappings(void);
+-extern int __init efi_alloc_page_tables(void);
+ extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+ extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+ extern void __init old_map_region(efi_memory_desc_t *md);
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -869,7 +869,7 @@ static void __init kexec_enter_virtual_m
+  * This function will switch the EFI runtime services to virtual mode.
+  * Essentially, we look through the EFI memmap and map every region that
+  * has the runtime attribute bit set in its memory descriptor into the
+- * efi_pgd page table.
++ * ->trampoline_pgd page table using a top-down VA allocation scheme.
+  *
+  * The old method which used to update that memory descriptor with the
+  * virtual address obtained from ioremap() is still supported when the
+@@ -879,8 +879,8 @@ static void __init kexec_enter_virtual_m
+  *
+  * The new method does a pagetable switch in a preemption-safe manner
+  * so that we're in a different address space when calling a runtime
+- * function. For function arguments passing we do copy the PUDs of the
+- * kernel page table into efi_pgd prior to each call.
++ * function. For function arguments passing we do copy the PGDs of the
++ * kernel page table into ->trampoline_pgd prior to each call.
+  *
+  * Specially for kexec boot, efi runtime maps in previous kernel should
+  * be passed in via setup_data. In that case runtime ranges will be mapped
+@@ -895,12 +895,6 @@ static void __init __efi_enter_virtual_m
+       efi.systab = NULL;
+-      if (efi_alloc_page_tables()) {
+-              pr_err("Failed to allocate EFI page tables\n");
+-              clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+-              return;
+-      }
+-
+       efi_merge_regions();
+       new_memmap = efi_map_regions(&count, &pg_shift);
+       if (!new_memmap) {
+@@ -960,11 +954,28 @@ static void __init __efi_enter_virtual_m
+       efi_runtime_mkexec();
+       /*
+-       * We mapped the descriptor array into the EFI pagetable above
+-       * but we're not unmapping it here because if we're running in
+-       * EFI mixed mode we need all of memory to be accessible when
+-       * we pass parameters to the EFI runtime services in the
+-       * thunking code.
++       * We mapped the descriptor array into the EFI pagetable above but we're
++       * not unmapping it here. Here's why:
++       *
++       * We're copying select PGDs from the kernel page table to the EFI page
++       * table and when we do so and make changes to those PGDs like unmapping
++       * stuff from them, those changes appear in the kernel page table and we
++       * go boom.
++       *
++       * From setup_real_mode():
++       *
++       * ...
++       * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
++       *
++       * In this particular case, our allocation is in PGD 0 of the EFI page
++       * table but we've copied that PGD from PGD[272] of the EFI page table:
++       *
++       *      pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
++       *
++       * where the direct memory mapping in kernel space is.
++       *
++       * new_memmap's VA comes from that direct mapping and thus clearing it,
++       * it would get cleared in the kernel page table too.
+        *
+        * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
+        */
+--- a/arch/x86/platform/efi/efi_32.c
++++ b/arch/x86/platform/efi/efi_32.c
+@@ -38,11 +38,6 @@
+  * say 0 - 3G.
+  */
+-int __init efi_alloc_page_tables(void)
+-{
+-      return 0;
+-}
+-
+ void efi_sync_low_kernel_mappings(void) {}
+ void __init efi_dump_pagetable(void) {}
+ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -40,7 +40,6 @@
+ #include <asm/fixmap.h>
+ #include <asm/realmode.h>
+ #include <asm/time.h>
+-#include <asm/pgalloc.h>
+ /*
+  * We allocate runtime services regions bottom-up, starting from -4G, i.e.
+@@ -122,92 +121,22 @@ void __init efi_call_phys_epilog(pgd_t *
+       early_code_mapping_set_exec(0);
+ }
+-static pgd_t *efi_pgd;
+-
+-/*
+- * We need our own copy of the higher levels of the page tables
+- * because we want to avoid inserting EFI region mappings (EFI_VA_END
+- * to EFI_VA_START) into the standard kernel page tables. Everything
+- * else can be shared, see efi_sync_low_kernel_mappings().
+- */
+-int __init efi_alloc_page_tables(void)
+-{
+-      pgd_t *pgd;
+-      pud_t *pud;
+-      gfp_t gfp_mask;
+-
+-      if (efi_enabled(EFI_OLD_MEMMAP))
+-              return 0;
+-
+-      gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+-      efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
+-      if (!efi_pgd)
+-              return -ENOMEM;
+-
+-      pgd = efi_pgd + pgd_index(EFI_VA_END);
+-
+-      pud = pud_alloc_one(NULL, 0);
+-      if (!pud) {
+-              free_page((unsigned long)efi_pgd);
+-              return -ENOMEM;
+-      }
+-
+-      pgd_populate(NULL, pgd, pud);
+-
+-      return 0;
+-}
+-
+ /*
+  * Add low kernel mappings for passing arguments to EFI functions.
+  */
+ void efi_sync_low_kernel_mappings(void)
+ {
+-      unsigned num_entries;
+-      pgd_t *pgd_k, *pgd_efi;
+-      pud_t *pud_k, *pud_efi;
++      unsigned num_pgds;
++      pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+       if (efi_enabled(EFI_OLD_MEMMAP))
+               return;
+-      /*
+-       * We can share all PGD entries apart from the one entry that
+-       * covers the EFI runtime mapping space.
+-       *
+-       * Make sure the EFI runtime region mappings are guaranteed to
+-       * only span a single PGD entry and that the entry also maps
+-       * other important kernel regions.
+-       */
+-      BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
+-      BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
+-                      (EFI_VA_END & PGDIR_MASK));
+-
+-      pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
+-      pgd_k = pgd_offset_k(PAGE_OFFSET);
+-
+-      num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
+-      memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
+-
+-      /*
+-       * We share all the PUD entries apart from those that map the
+-       * EFI regions. Copy around them.
+-       */
+-      BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
+-      BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
+-
+-      pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
+-      pud_efi = pud_offset(pgd_efi, 0);
+-
+-      pgd_k = pgd_offset_k(EFI_VA_END);
+-      pud_k = pud_offset(pgd_k, 0);
+-
+-      num_entries = pud_index(EFI_VA_END);
+-      memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
++      num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
+-      pud_efi = pud_offset(pgd_efi, EFI_VA_START);
+-      pud_k = pud_offset(pgd_k, EFI_VA_START);
+-
+-      num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
+-      memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
++      memcpy(pgd + pgd_index(PAGE_OFFSET),
++              init_mm.pgd + pgd_index(PAGE_OFFSET),
++              sizeof(pgd_t) * num_pgds);
+ }
+ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+@@ -220,8 +149,8 @@ int __init efi_setup_page_tables(unsigne
+       if (efi_enabled(EFI_OLD_MEMMAP))
+               return 0;
+-      efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
+-      pgd = efi_pgd;
++      efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
++      pgd = __va(efi_scratch.efi_pgt);
+       /*
+        * It can happen that the physical address of new_memmap lands in memory
+@@ -267,14 +196,16 @@ int __init efi_setup_page_tables(unsigne
+ void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ {
+-      kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
++      pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
++
++      kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
+ }
+ static void __init __map_region(efi_memory_desc_t *md, u64 va)
+ {
++      pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+       unsigned long flags = 0;
+       unsigned long pfn;
+-      pgd_t *pgd = efi_pgd;
+       if (!(md->attribute & EFI_MEMORY_WB))
+               flags |= _PAGE_PCD;
+@@ -383,7 +314,9 @@ void __init efi_runtime_mkexec(void)
+ void __init efi_dump_pagetable(void)
+ {
+ #ifdef CONFIG_EFI_PGT_DUMP
+-      ptdump_walk_pgd_level(NULL, efi_pgd);
++      pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
++
++      ptdump_walk_pgd_level(NULL, pgd);
+ #endif
+ }
diff --git a/queue-4.4/revert-x86-efi-hoist-page-table-switching-code-into-efi_call_virt.patch b/queue-4.4/revert-x86-efi-hoist-page-table-switching-code-into-efi_call_virt.patch
new file mode 100644 (file)
index 0000000..5549eb8
--- /dev/null
@@ -0,0 +1,206 @@
+From foo@baz Thu Dec 14 21:26:14 CET 2017
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Thu, 14 Dec 2017 21:23:48 +0100
+Subject: Revert "x86/efi: Hoist page table switching code into efi_call_virt()"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit b73adb60852034d84092d123b323196ca42529cd which is
+commit c9f2a9a65e4855b74d92cdad688f6ee4a1a323ff upstream.
+
+Turns there was too many other issues with this patch to make it viable
+for the stable tree.
+
+Reported-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Jones <davej@codemonkey.org.uk>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Stephen Smalley <sds@tycho.nsa.gov>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: linux-efi@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: "Ghannam, Yazen" <Yazen.Ghannam@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/efi.h          |   25 --------------------
+ arch/x86/platform/efi/efi_64.c      |   24 ++++++++++----------
+ arch/x86/platform/efi/efi_stub_64.S |   43 ++++++++++++++++++++++++++++++++++++
+ 3 files changed, 56 insertions(+), 36 deletions(-)
+
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -3,7 +3,6 @@
+ #include <asm/fpu/api.h>
+ #include <asm/pgtable.h>
+-#include <asm/tlb.h>
+ /*
+  * We map the EFI regions needed for runtime services non-contiguously,
+@@ -65,17 +64,6 @@ extern u64 asmlinkage efi_call(void *fp,
+ #define efi_call_phys(f, args...)             efi_call((f), args)
+-/*
+- * Scratch space used for switching the pagetable in the EFI stub
+- */
+-struct efi_scratch {
+-      u64     r15;
+-      u64     prev_cr3;
+-      pgd_t   *efi_pgt;
+-      bool    use_pgd;
+-      u64     phys_stack;
+-} __packed;
+-
+ #define efi_call_virt(f, ...)                                         \
+ ({                                                                    \
+       efi_status_t __s;                                               \
+@@ -83,20 +71,7 @@ struct efi_scratch {
+       efi_sync_low_kernel_mappings();                                 \
+       preempt_disable();                                              \
+       __kernel_fpu_begin();                                           \
+-                                                                      \
+-      if (efi_scratch.use_pgd) {                                      \
+-              efi_scratch.prev_cr3 = read_cr3();                      \
+-              write_cr3((unsigned long)efi_scratch.efi_pgt);          \
+-              __flush_tlb_all();                                      \
+-      }                                                               \
+-                                                                      \
+       __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);    \
+-                                                                      \
+-      if (efi_scratch.use_pgd) {                                      \
+-              write_cr3(efi_scratch.prev_cr3);                        \
+-              __flush_tlb_all();                                      \
+-      }                                                               \
+-                                                                      \
+       __kernel_fpu_end();                                             \
+       preempt_enable();                                               \
+       __s;                                                            \
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -47,7 +47,16 @@
+  */
+ static u64 efi_va = EFI_VA_START;
+-struct efi_scratch efi_scratch;
++/*
++ * Scratch space used for switching the pagetable in the EFI stub
++ */
++struct efi_scratch {
++      u64 r15;
++      u64 prev_cr3;
++      pgd_t *efi_pgt;
++      bool use_pgd;
++      u64 phys_stack;
++} __packed;
+ static void __init early_code_mapping_set_exec(int executable)
+ {
+@@ -74,11 +83,8 @@ pgd_t * __init efi_call_phys_prolog(void
+       int pgd;
+       int n_pgds;
+-      if (!efi_enabled(EFI_OLD_MEMMAP)) {
+-              save_pgd = (pgd_t *)read_cr3();
+-              write_cr3((unsigned long)efi_scratch.efi_pgt);
+-              goto out;
+-      }
++      if (!efi_enabled(EFI_OLD_MEMMAP))
++              return NULL;
+       early_code_mapping_set_exec(1);
+@@ -90,7 +96,6 @@ pgd_t * __init efi_call_phys_prolog(void
+               vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
+               set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+       }
+-out:
+       __flush_tlb_all();
+       return save_pgd;
+@@ -104,11 +109,8 @@ void __init efi_call_phys_epilog(pgd_t *
+       int pgd_idx;
+       int nr_pgds;
+-      if (!efi_enabled(EFI_OLD_MEMMAP)) {
+-              write_cr3((unsigned long)save_pgd);
+-              __flush_tlb_all();
++      if (!save_pgd)
+               return;
+-      }
+       nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+--- a/arch/x86/platform/efi/efi_stub_64.S
++++ b/arch/x86/platform/efi/efi_stub_64.S
+@@ -38,6 +38,41 @@
+       mov %rsi, %cr0;                 \
+       mov (%rsp), %rsp
++      /* stolen from gcc */
++      .macro FLUSH_TLB_ALL
++      movq %r15, efi_scratch(%rip)
++      movq %r14, efi_scratch+8(%rip)
++      movq %cr4, %r15
++      movq %r15, %r14
++      andb $0x7f, %r14b
++      movq %r14, %cr4
++      movq %r15, %cr4
++      movq efi_scratch+8(%rip), %r14
++      movq efi_scratch(%rip), %r15
++      .endm
++
++      .macro SWITCH_PGT
++      cmpb $0, efi_scratch+24(%rip)
++      je 1f
++      movq %r15, efi_scratch(%rip)            # r15
++      # save previous CR3
++      movq %cr3, %r15
++      movq %r15, efi_scratch+8(%rip)          # prev_cr3
++      movq efi_scratch+16(%rip), %r15         # EFI pgt
++      movq %r15, %cr3
++      1:
++      .endm
++
++      .macro RESTORE_PGT
++      cmpb $0, efi_scratch+24(%rip)
++      je 2f
++      movq efi_scratch+8(%rip), %r15
++      movq %r15, %cr3
++      movq efi_scratch(%rip), %r15
++      FLUSH_TLB_ALL
++      2:
++      .endm
++
+ ENTRY(efi_call)
+       SAVE_XMM
+       mov (%rsp), %rax
+@@ -48,8 +83,16 @@ ENTRY(efi_call)
+       mov %r8, %r9
+       mov %rcx, %r8
+       mov %rsi, %rcx
++      SWITCH_PGT
+       call *%rdi
++      RESTORE_PGT
+       addq $48, %rsp
+       RESTORE_XMM
+       ret
+ ENDPROC(efi_call)
++
++      .data
++ENTRY(efi_scratch)
++      .fill 3,8,0
++      .byte 0
++      .quad 0
diff --git a/queue-4.4/revert-x86-mm-pat-ensure-cpa-pfn-only-contains-page-frame-numbers.patch b/queue-4.4/revert-x86-mm-pat-ensure-cpa-pfn-only-contains-page-frame-numbers.patch
new file mode 100644 (file)
index 0000000..32516c2
--- /dev/null
@@ -0,0 +1,138 @@
+From foo@baz Thu Dec 14 21:26:14 CET 2017
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Thu, 14 Dec 2017 21:25:00 +0100
+Subject: Revert "x86/mm/pat: Ensure cpa->pfn only contains page frame numbers"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 87e2bd898d3a79a8c609f183180adac47879a2a4 which is
+commit edc3b9129cecd0f0857112136f5b8b1bc1d45918 upstream.
+
+Turns there was too many other issues with this patch to make it viable
+for the stable tree.
+
+Reported-by: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Jones <davej@codemonkey.org.uk>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Stephen Smalley <sds@tycho.nsa.gov>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: linux-efi@vger.kernel.org
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: "Ghannam, Yazen" <Yazen.Ghannam@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/pageattr.c         |   17 +++++++++++------
+ arch/x86/platform/efi/efi_64.c |   16 ++++++----------
+ 2 files changed, 17 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -911,10 +911,15 @@ static void populate_pte(struct cpa_data
+       pte = pte_offset_kernel(pmd, start);
+       while (num_pages-- && start < end) {
+-              set_pte(pte, pfn_pte(cpa->pfn, pgprot));
++
++              /* deal with the NX bit */
++              if (!(pgprot_val(pgprot) & _PAGE_NX))
++                      cpa->pfn &= ~_PAGE_NX;
++
++              set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
+               start    += PAGE_SIZE;
+-              cpa->pfn++;
++              cpa->pfn += PAGE_SIZE;
+               pte++;
+       }
+ }
+@@ -970,11 +975,11 @@ static int populate_pmd(struct cpa_data
+               pmd = pmd_offset(pud, start);
+-              set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
++              set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
+                                  massage_pgprot(pmd_pgprot)));
+               start     += PMD_SIZE;
+-              cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
++              cpa->pfn  += PMD_SIZE;
+               cur_pages += PMD_SIZE >> PAGE_SHIFT;
+       }
+@@ -1043,11 +1048,11 @@ static int populate_pud(struct cpa_data
+        * Map everything starting from the Gb boundary, possibly with 1G pages
+        */
+       while (end - start >= PUD_SIZE) {
+-              set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
++              set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
+                                  massage_pgprot(pud_pgprot)));
+               start     += PUD_SIZE;
+-              cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;
++              cpa->pfn  += PUD_SIZE;
+               cur_pages += PUD_SIZE >> PAGE_SHIFT;
+               pud++;
+       }
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -143,7 +143,7 @@ void efi_sync_low_kernel_mappings(void)
+ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ {
+-      unsigned long pfn, text;
++      unsigned long text;
+       struct page *page;
+       unsigned npages;
+       pgd_t *pgd;
+@@ -160,8 +160,7 @@ int __init efi_setup_page_tables(unsigne
+        * and ident-map those pages containing the map before calling
+        * phys_efi_set_virtual_address_map().
+        */
+-      pfn = pa_memmap >> PAGE_SHIFT;
+-      if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
++      if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
+               pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
+               return 1;
+       }
+@@ -186,9 +185,8 @@ int __init efi_setup_page_tables(unsigne
+       npages = (_end - _text) >> PAGE_SHIFT;
+       text = __pa(_text);
+-      pfn = text >> PAGE_SHIFT;
+-      if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) {
++      if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
+               pr_err("Failed to map kernel text 1:1\n");
+               return 1;
+       }
+@@ -206,14 +204,12 @@ void __init efi_cleanup_page_tables(unsi
+ static void __init __map_region(efi_memory_desc_t *md, u64 va)
+ {
+       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+-      unsigned long flags = 0;
+-      unsigned long pfn;
++      unsigned long pf = 0;
+       if (!(md->attribute & EFI_MEMORY_WB))
+-              flags |= _PAGE_PCD;
++              pf |= _PAGE_PCD;
+-      pfn = md->phys_addr >> PAGE_SHIFT;
+-      if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
++      if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
+               pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
+                          md->phys_addr, va);
+ }
index 5da606e91341e0e273ca8c2cf1b0529368184d22..cdc49ba7791f204557995df56b4f21c0a2fd2e61 100644 (file)
@@ -100,3 +100,6 @@ rds-fix-null-pointer-dereference-in-__rds_rdma_map.patch
 sit-update-frag_off-info.patch
 packet-fix-crash-in-fanout_demux_rollover.patch
 net-packet-fix-a-race-in-packet_bind-and-packet_notifier.patch
+revert-x86-efi-build-our-own-page-table-structures.patch
+revert-x86-efi-hoist-page-table-switching-code-into-efi_call_virt.patch
+revert-x86-mm-pat-ensure-cpa-pfn-only-contains-page-frame-numbers.patch