1 From c626e405d9daf44ca3c9967380815b356728602b Mon Sep 17 00:00:00 2001
2 From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
3 Date: Fri, 30 Nov 2018 23:23:27 +0300
4 Subject: x86/mm: Fix guard hole handling
6 [ Upstream commit 16877a5570e0c5f4270d5b17f9bab427bcae9514 ]
8 There is a guard hole at the beginning of the kernel address space, also
9 used by hypervisors. It occupies 16 PGD entries.
11 This reserved range is not defined explicitely, it is calculated relative
12 to other entities: direct mapping and user space ranges.
14 The calculation got broken by recent changes of the kernel memory layout:
15 LDT remap range is now mapped before direct mapping and makes the
18 The breakage leads to crash on Xen dom0 boot[1].
20 Define the reserved range explicitely. It's part of kernel ABI (hypervisors
21 expect it to be stable) and must not depend on changes in the rest of
24 [1] https://lists.xenproject.org/archives/html/xen-devel/2018-11/msg03313.html
26 Fixes: d52888aa2753 ("x86/mm: Move LDT remap out of KASLR region on 5-level paging")
27 Reported-by: Hans van Kranenburg <hans.van.kranenburg@mendix.com>
28 Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
29 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
30 Tested-by: Hans van Kranenburg <hans.van.kranenburg@mendix.com>
31 Reviewed-by: Juergen Gross <jgross@suse.com>
34 Cc: dave.hansen@linux.intel.com
36 Cc: peterz@infradead.org
37 Cc: boris.ostrovsky@oracle.com
39 Cc: linux-mm@kvack.org
40 Cc: xen-devel@lists.xenproject.org
41 Link: https://lkml.kernel.org/r/20181130202328.65359-2-kirill.shutemov@linux.intel.com
42 Signed-off-by: Sasha Levin <sashal@kernel.org>
44 arch/x86/include/asm/pgtable_64_types.h | 5 +++++
45 arch/x86/mm/dump_pagetables.c | 8 ++++----
46 arch/x86/xen/mmu_pv.c | 11 ++++++-----
47 3 files changed, 15 insertions(+), 9 deletions(-)
49 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
50 index 7764617b8f9c..bf6d2692fc60 100644
51 --- a/arch/x86/include/asm/pgtable_64_types.h
52 +++ b/arch/x86/include/asm/pgtable_64_types.h
53 @@ -94,6 +94,11 @@ typedef struct { pteval_t pte; } pte_t;
54 # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
57 +#define GUARD_HOLE_PGD_ENTRY -256UL
58 +#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT)
59 +#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
60 +#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
62 #define LDT_PGD_ENTRY -240UL
63 #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
65 diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
66 index 2a4849e92831..cf403e057f3f 100644
67 --- a/arch/x86/mm/dump_pagetables.c
68 +++ b/arch/x86/mm/dump_pagetables.c
69 @@ -465,11 +465,11 @@ static inline bool is_hypervisor_range(int idx)
73 - * ffff800000000000 - ffff87ffffffffff is reserved for
75 + * A hole in the beginning of kernel address space reserved
78 - return (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
79 - (idx < pgd_index(__PAGE_OFFSET));
80 + return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
81 + (idx < pgd_index(GUARD_HOLE_END_ADDR));
85 diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
86 index b33fa127a613..7631e6130d44 100644
87 --- a/arch/x86/xen/mmu_pv.c
88 +++ b/arch/x86/xen/mmu_pv.c
89 @@ -614,19 +614,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
93 - unsigned hole_low, hole_high;
94 + unsigned hole_low = 0, hole_high = 0;
96 /* The limit is the last byte to be touched */
98 BUG_ON(limit >= FIXADDR_TOP);
100 +#ifdef CONFIG_X86_64
102 * 64-bit has a great big hole in the middle of the address
103 - * space, which contains the Xen mappings. On 32-bit these
104 - * will end up making a zero-sized hole and so is a no-op.
105 + * space, which contains the Xen mappings.
107 - hole_low = pgd_index(USER_LIMIT);
108 - hole_high = pgd_index(PAGE_OFFSET);
109 + hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
110 + hole_high = pgd_index(GUARD_HOLE_END_ADDR);
113 nr = pgd_index(limit) + 1;
114 for (i = 0; i < nr; i++) {