From: Sasha Levin Date: Sat, 24 Nov 2018 15:47:56 +0000 (-0500) Subject: patches for 4.19 X-Git-Tag: v3.18.127~20 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=77e30089ac12ec45c99693026217ed6a36189945;p=thirdparty%2Fkernel%2Fstable-queue.git patches for 4.19 Signed-off-by: Sasha Levin --- diff --git a/queue-4.19/series b/queue-4.19/series index c965493b2f9..1e6075961ea 100644 --- a/queue-4.19/series +++ b/queue-4.19/series @@ -89,3 +89,6 @@ net-aquantia-invalid-checksumm-offload-implementatio.patch kbuild-deb-pkg-fix-too-low-build-version-number.patch revert-scripts-setlocalversion-git-make-dirty-check-.patch sunrpc-drop-pointless-static-qualifier-in-xdr_get_ne.patch +x86-mm-move-ldt-remap-out-of-kaslr-region-on-5-level.patch +x86-ldt-unmap-ptes-for-the-slot-before-freeing-ldt-p.patch +x86-ldt-remove-unused-variable-in-map_ldt_struct.patch diff --git a/queue-4.19/x86-ldt-remove-unused-variable-in-map_ldt_struct.patch b/queue-4.19/x86-ldt-remove-unused-variable-in-map_ldt_struct.patch new file mode 100644 index 00000000000..52f60477388 --- /dev/null +++ b/queue-4.19/x86-ldt-remove-unused-variable-in-map_ldt_struct.patch @@ -0,0 +1,61 @@ +From 672c8ddd075916815c2f4d99b0abec6038d92053 Mon Sep 17 00:00:00 2001 +From: "Kirill A. Shutemov" +Date: Fri, 26 Oct 2018 15:28:56 +0300 +Subject: x86/ldt: Remove unused variable in map_ldt_struct() + +commit b082f2dd80612015cd6d9d84e52099734ec9a0e1 upstream + +Splitting out the sanity check in map_ldt_struct() moved page table syncing +into a separate function, which made the pgd variable unused. Remove it. + +[ tglx: Massaged changelog ] + +Fixes: 9bae3197e15d ("x86/ldt: Split out sanity check in map_ldt_struct()") +Signed-off-by: Kirill A. Shutemov +Signed-off-by: Thomas Gleixner +Reviewed-by: Andy Lutomirski +Cc: bp@alien8.de +Cc: hpa@zytor.com +Cc: dave.hansen@linux.intel.com +Cc: peterz@infradead.org +Cc: boris.ostrovsky@oracle.com +Cc: jgross@suse.com +Cc: bhe@redhat.com +Cc: willy@infradead.org +Cc: linux-mm@kvack.org +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/20181026122856.66224-4-kirill.shutemov@linux.intel.com +Signed-off-by: Sasha Levin +--- + arch/x86/kernel/ldt.c | 8 -------- + 1 file changed, 8 deletions(-) + +diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c +index 2a71ded9b13e..65590eee6289 100644 +--- a/arch/x86/kernel/ldt.c ++++ b/arch/x86/kernel/ldt.c +@@ -207,7 +207,6 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) + bool is_vmalloc; + spinlock_t *ptl; + int i, nr_pages; +- pgd_t *pgd; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return 0; +@@ -221,13 +220,6 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) + /* Check if the current mappings are sane */ + sanity_check_ldt_mapping(mm); + +- /* +- * Did we already have the top level entry allocated? We can't +- * use pgd_none() for this because it doens't do anything on +- * 4-level page table kernels. +- */ +- pgd = pgd_offset(mm, LDT_BASE_ADDR); +- + is_vmalloc = is_vmalloc_addr(ldt->entries); + + nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); +-- +2.17.1 + diff --git a/queue-4.19/x86-ldt-unmap-ptes-for-the-slot-before-freeing-ldt-p.patch b/queue-4.19/x86-ldt-unmap-ptes-for-the-slot-before-freeing-ldt-p.patch new file mode 100644 index 00000000000..97edff9c6e6 --- /dev/null +++ b/queue-4.19/x86-ldt-unmap-ptes-for-the-slot-before-freeing-ldt-p.patch @@ -0,0 +1,148 @@ +From 5f126f0481b854dd27e2dfc5c01e345db7d220ef Mon Sep 17 00:00:00 2001 +From: "Kirill A. Shutemov" +Date: Fri, 26 Oct 2018 15:28:55 +0300 +Subject: x86/ldt: Unmap PTEs for the slot before freeing LDT pages + +commit a0e6e0831c516860fc7f9be1db6c081fe902ebcf upstream + +modify_ldt(2) leaves the old LDT mapped after switching over to the new +one. The old LDT gets freed and the pages can be re-used. + +Leaving the mapping in place can have security implications. The mapping is +present in the userspace page tables and Meltdown-like attacks can read +these freed and possibly reused pages. + +It's relatively simple to fix: unmap the old LDT and flush TLB before +freeing the old LDT memory. + +This further allows to avoid flushing the TLB in map_ldt_struct() as the +slot is unmapped and flushed by unmap_ldt_struct() or has never been mapped +at all. + +[ tglx: Massaged changelog and removed the needless line breaks ] + +Fixes: f55f0501cbf6 ("x86/pti: Put the LDT in its own PGD if PTI is on") +Signed-off-by: Kirill A. Shutemov +Signed-off-by: Thomas Gleixner +Cc: bp@alien8.de +Cc: hpa@zytor.com +Cc: dave.hansen@linux.intel.com +Cc: luto@kernel.org +Cc: peterz@infradead.org +Cc: boris.ostrovsky@oracle.com +Cc: jgross@suse.com +Cc: bhe@redhat.com +Cc: willy@infradead.org +Cc: linux-mm@kvack.org +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/20181026122856.66224-3-kirill.shutemov@linux.intel.com +Signed-off-by: Sasha Levin +--- + arch/x86/kernel/ldt.c | 51 ++++++++++++++++++++++++++++++++----------- + 1 file changed, 38 insertions(+), 13 deletions(-) + +diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c +index 733e6ace0fa4..2a71ded9b13e 100644 +--- a/arch/x86/kernel/ldt.c ++++ b/arch/x86/kernel/ldt.c +@@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm) + /* + * If PTI is enabled, this maps the LDT into the kernelmode and + * usermode tables for the given mm. +- * +- * There is no corresponding unmap function. Even if the LDT is freed, we +- * leave the PTEs around until the slot is reused or the mm is destroyed. +- * This is harmless: the LDT is always in ordinary memory, and no one will +- * access the freed slot. +- * +- * If we wanted to unmap freed LDTs, we'd also need to do a flush to make +- * it useful, and the flush would slow down modify_ldt(). + */ + static int + map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) +@@ -214,8 +206,8 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) + unsigned long va; + bool is_vmalloc; + spinlock_t *ptl; ++ int i, nr_pages; + pgd_t *pgd; +- int i; + + if (!static_cpu_has(X86_FEATURE_PTI)) + return 0; +@@ -238,7 +230,9 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) + + is_vmalloc = is_vmalloc_addr(ldt->entries); + +- for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { ++ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); ++ ++ for (i = 0; i < nr_pages; i++) { + unsigned long offset = i << PAGE_SHIFT; + const void *src = (char *)ldt->entries + offset; + unsigned long pfn; +@@ -272,13 +266,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) + /* Propagate LDT mapping to the user page-table */ + map_ldt_struct_to_user(mm); + +- va = (unsigned long)ldt_slot_va(slot); +- flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, 0); +- + ldt->slot = slot; + return 0; + } + ++static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) ++{ ++ unsigned long va; ++ int i, nr_pages; ++ ++ if (!ldt) ++ return; ++ ++ /* LDT map/unmap is only required for PTI */ ++ if (!static_cpu_has(X86_FEATURE_PTI)) ++ return; ++ ++ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); ++ ++ for (i = 0; i < nr_pages; i++) { ++ unsigned long offset = i << PAGE_SHIFT; ++ spinlock_t *ptl; ++ pte_t *ptep; ++ ++ va = (unsigned long)ldt_slot_va(ldt->slot) + offset; ++ ptep = get_locked_pte(mm, va, &ptl); ++ pte_clear(mm, va, ptep); ++ pte_unmap_unlock(ptep, ptl); ++ } ++ ++ va = (unsigned long)ldt_slot_va(ldt->slot); ++ flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, 0); ++} ++ + #else /* !CONFIG_PAGE_TABLE_ISOLATION */ + + static int +@@ -286,6 +306,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) + { + return 0; + } ++ ++static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) ++{ ++} + #endif /* CONFIG_PAGE_TABLE_ISOLATION */ + + static void free_ldt_pgtables(struct mm_struct *mm) +@@ -524,6 +548,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) + } + + install_ldt(mm, new_ldt); ++ unmap_ldt_struct(mm, old_ldt); + free_ldt_struct(old_ldt); + error = 0; + +-- +2.17.1 + diff --git a/queue-4.19/x86-mm-move-ldt-remap-out-of-kaslr-region-on-5-level.patch b/queue-4.19/x86-mm-move-ldt-remap-out-of-kaslr-region-on-5-level.patch new file mode 100644 index 00000000000..cbbd1fbaba9 --- /dev/null +++ b/queue-4.19/x86-mm-move-ldt-remap-out-of-kaslr-region-on-5-level.patch @@ -0,0 +1,140 @@ +From 9a5fa7049f27bb3a42abddea82f31900d48bb76e Mon Sep 17 00:00:00 2001 +From: "Kirill A. Shutemov" +Date: Fri, 26 Oct 2018 15:28:54 +0300 +Subject: x86/mm: Move LDT remap out of KASLR region on 5-level paging + +commit d52888aa2753e3063a9d3a0c9f72f94aa9809c15 upstream + +On 5-level paging the LDT remap area is placed in the middle of the KASLR +randomization region and it can overlap with the direct mapping, the +vmalloc or the vmap area. + +The LDT mapping is per mm, so it cannot be moved into the P4D page table +next to the CPU_ENTRY_AREA without complicating PGD table allocation for +5-level paging. + +The 4 PGD slot gap just before the direct mapping is reserved for +hypervisors, so it cannot be used. + +Move the direct mapping one slot deeper and use the resulting gap for the +LDT remap area. The resulting layout is the same for 4 and 5 level paging. + +[ tglx: Massaged changelog ] + +Fixes: f55f0501cbf6 ("x86/pti: Put the LDT in its own PGD if PTI is on") +Signed-off-by: Kirill A. Shutemov +Signed-off-by: Thomas Gleixner +Reviewed-by: Andy Lutomirski +Cc: bp@alien8.de +Cc: hpa@zytor.com +Cc: dave.hansen@linux.intel.com +Cc: peterz@infradead.org +Cc: boris.ostrovsky@oracle.com +Cc: jgross@suse.com +Cc: bhe@redhat.com +Cc: willy@infradead.org +Cc: linux-mm@kvack.org +Cc: stable@vger.kernel.org +Link: https://lkml.kernel.org/r/20181026122856.66224-2-kirill.shutemov@linux.intel.com +Signed-off-by: Sasha Levin +--- + Documentation/x86/x86_64/mm.txt | 10 ++++++---- + arch/x86/include/asm/page_64_types.h | 12 +++++++----- + arch/x86/include/asm/pgtable_64_types.h | 4 +--- + arch/x86/xen/mmu_pv.c | 6 +++--- + 4 files changed, 17 insertions(+), 15 deletions(-) + +diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt +index 5432a96d31ff..05ef53d83a41 100644 +--- a/Documentation/x86/x86_64/mm.txt ++++ b/Documentation/x86/x86_64/mm.txt +@@ -4,8 +4,9 @@ Virtual memory map with 4 level page tables: + 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm + hole caused by [47:63] sign extension + ffff800000000000 - ffff87ffffffffff (=43 bits) guard hole, reserved for hypervisor +-ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory +-ffffc80000000000 - ffffc8ffffffffff (=40 bits) hole ++ffff880000000000 - ffff887fffffffff (=39 bits) LDT remap for PTI ++ffff888000000000 - ffffc87fffffffff (=64 TB) direct mapping of all phys. memory ++ffffc88000000000 - ffffc8ffffffffff (=39 bits) hole + ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space + ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole + ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB) +@@ -30,8 +31,9 @@ Virtual memory map with 5 level page tables: + 0000000000000000 - 00ffffffffffffff (=56 bits) user space, different per mm + hole caused by [56:63] sign extension + ff00000000000000 - ff0fffffffffffff (=52 bits) guard hole, reserved for hypervisor +-ff10000000000000 - ff8fffffffffffff (=55 bits) direct mapping of all phys. memory +-ff90000000000000 - ff9fffffffffffff (=52 bits) LDT remap for PTI ++ff10000000000000 - ff10ffffffffffff (=48 bits) LDT remap for PTI ++ff11000000000000 - ff90ffffffffffff (=55 bits) direct mapping of all phys. memory ++ff91000000000000 - ff9fffffffffffff (=3840 TB) hole + ffa0000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space (12800 TB) + ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole + ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB) +diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h +index 6afac386a434..b99d497e342d 100644 +--- a/arch/x86/include/asm/page_64_types.h ++++ b/arch/x86/include/asm/page_64_types.h +@@ -33,12 +33,14 @@ + + /* + * Set __PAGE_OFFSET to the most negative possible address + +- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a +- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's +- * what Xen requires. ++ * PGDIR_SIZE*17 (pgd slot 273). ++ * ++ * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for ++ * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary, ++ * but it's what Xen requires. + */ +-#define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL) +-#define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL) ++#define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL) ++#define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL) + + #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT + #define __PAGE_OFFSET page_offset_base +diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h +index 04edd2d58211..84bd9bdc1987 100644 +--- a/arch/x86/include/asm/pgtable_64_types.h ++++ b/arch/x86/include/asm/pgtable_64_types.h +@@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d; + */ + #define MAXMEM (1UL << MAX_PHYSMEM_BITS) + +-#define LDT_PGD_ENTRY_L4 -3UL +-#define LDT_PGD_ENTRY_L5 -112UL +-#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) ++#define LDT_PGD_ENTRY -240UL + #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) + #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) + +diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c +index dd461c0167ef..2c84c6ad8b50 100644 +--- a/arch/x86/xen/mmu_pv.c ++++ b/arch/x86/xen/mmu_pv.c +@@ -1897,7 +1897,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) + init_top_pgt[0] = __pgd(0); + + /* Pre-constructed entries are in pfn, so convert to mfn */ +- /* L4[272] -> level3_ident_pgt */ ++ /* L4[273] -> level3_ident_pgt */ + /* L4[511] -> level3_kernel_pgt */ + convert_pfn_mfn(init_top_pgt); + +@@ -1917,8 +1917,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) + addr[0] = (unsigned long)pgd; + addr[1] = (unsigned long)l3; + addr[2] = (unsigned long)l2; +- /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: +- * Both L4[272][0] and L4[511][510] have entries that point to the same ++ /* Graft it onto L4[273][0]. Note that we creating an aliasing problem: ++ * Both L4[273][0] and L4[511][510] have entries that point to the same + * L2 (PMD) tables. Meaning that if you modify it in __va space + * it will be also modified in the __ka space! (But if you just + * modify the PMD table to point to other PTE's or none, then you +-- +2.17.1 +