]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.4.111/map-the-vsyscall-page-with-_page_user.patch
5.1-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.4.111 / map-the-vsyscall-page-with-_page_user.patch
1 From: Borislav Petkov <bp@suse.de>
2 Date: Thu, 4 Jan 2018 17:42:45 +0100
3 Subject: Map the vsyscall page with _PAGE_USER
4
5 From: Borislav Petkov <bp@suse.de>
6
7 This needs to happen early in kaiser_pagetable_walk(), before the
8 hierarchy is established so that _PAGE_USER permission can be really
9 set.
10
11 A proper fix would be to teach kaiser_pagetable_walk() to update those
12 permissions but the vsyscall page is the only exception here so ...
13
14 Signed-off-by: Borislav Petkov <bp@suse.de>
15 Acked-by: Hugh Dickins <hughd@google.com>
16 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
17 ---
18 arch/x86/entry/vsyscall/vsyscall_64.c | 5 +++++
19 arch/x86/include/asm/vsyscall.h | 2 ++
20 arch/x86/mm/kaiser.c | 34 ++++++++++++++++++++++++++++++----
21 3 files changed, 37 insertions(+), 4 deletions(-)
22
23 --- a/arch/x86/entry/vsyscall/vsyscall_64.c
24 +++ b/arch/x86/entry/vsyscall/vsyscall_64.c
25 @@ -66,6 +66,11 @@ static int __init vsyscall_setup(char *s
26 }
27 early_param("vsyscall", vsyscall_setup);
28
29 +bool vsyscall_enabled(void)
30 +{
31 + return vsyscall_mode != NONE;
32 +}
33 +
34 static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
35 const char *message)
36 {
37 --- a/arch/x86/include/asm/vsyscall.h
38 +++ b/arch/x86/include/asm/vsyscall.h
39 @@ -12,12 +12,14 @@ extern void map_vsyscall(void);
40 * Returns true if handled.
41 */
42 extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
43 +extern bool vsyscall_enabled(void);
44 #else
45 static inline void map_vsyscall(void) {}
46 static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
47 {
48 return false;
49 }
50 +static inline bool vsyscall_enabled(void) { return false; }
51 #endif
52
53 #endif /* _ASM_X86_VSYSCALL_H */
54 --- a/arch/x86/mm/kaiser.c
55 +++ b/arch/x86/mm/kaiser.c
56 @@ -20,6 +20,7 @@
57 #include <asm/pgalloc.h>
58 #include <asm/desc.h>
59 #include <asm/cmdline.h>
60 +#include <asm/vsyscall.h>
61
62 int kaiser_enabled __read_mostly = 1;
63 EXPORT_SYMBOL(kaiser_enabled); /* for inlined TLB flush functions */
64 @@ -111,12 +112,13 @@ static inline unsigned long get_pa_from_
65 *
66 * Returns a pointer to a PTE on success, or NULL on failure.
67 */
68 -static pte_t *kaiser_pagetable_walk(unsigned long address)
69 +static pte_t *kaiser_pagetable_walk(unsigned long address, bool user)
70 {
71 pmd_t *pmd;
72 pud_t *pud;
73 pgd_t *pgd = native_get_shadow_pgd(pgd_offset_k(address));
74 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
75 + unsigned long prot = _KERNPG_TABLE;
76
77 if (pgd_none(*pgd)) {
78 WARN_ONCE(1, "All shadow pgds should have been populated");
79 @@ -124,6 +126,17 @@ static pte_t *kaiser_pagetable_walk(unsi
80 }
81 BUILD_BUG_ON(pgd_large(*pgd) != 0);
82
83 + if (user) {
84 + /*
85 + * The vsyscall page is the only page that will have
86 + * _PAGE_USER set. Catch everything else.
87 + */
88 + BUG_ON(address != VSYSCALL_ADDR);
89 +
90 + set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
91 + prot = _PAGE_TABLE;
92 + }
93 +
94 pud = pud_offset(pgd, address);
95 /* The shadow page tables do not use large mappings: */
96 if (pud_large(*pud)) {
97 @@ -136,7 +149,7 @@ static pte_t *kaiser_pagetable_walk(unsi
98 return NULL;
99 spin_lock(&shadow_table_allocation_lock);
100 if (pud_none(*pud)) {
101 - set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
102 + set_pud(pud, __pud(prot | __pa(new_pmd_page)));
103 __inc_zone_page_state(virt_to_page((void *)
104 new_pmd_page), NR_KAISERTABLE);
105 } else
106 @@ -156,7 +169,7 @@ static pte_t *kaiser_pagetable_walk(unsi
107 return NULL;
108 spin_lock(&shadow_table_allocation_lock);
109 if (pmd_none(*pmd)) {
110 - set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
111 + set_pmd(pmd, __pmd(prot | __pa(new_pte_page)));
112 __inc_zone_page_state(virt_to_page((void *)
113 new_pte_page), NR_KAISERTABLE);
114 } else
115 @@ -192,7 +205,7 @@ static int kaiser_add_user_map(const voi
116 ret = -EIO;
117 break;
118 }
119 - pte = kaiser_pagetable_walk(address);
120 + pte = kaiser_pagetable_walk(address, flags & _PAGE_USER);
121 if (!pte) {
122 ret = -ENOMEM;
123 break;
124 @@ -319,6 +332,19 @@ void __init kaiser_init(void)
125
126 kaiser_init_all_pgds();
127
128 + /*
129 + * Note that this sets _PAGE_USER and it needs to happen when the
130 + * pagetable hierarchy gets created, i.e., early. Otherwise
131 + * kaiser_pagetable_walk() will encounter initialized PTEs in the
132 + * hierarchy and not set the proper permissions, leading to the
133 + * pagefaults with page-protection violations when trying to read the
134 + * vsyscall page. For example.
135 + */
136 + if (vsyscall_enabled())
137 + kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
138 + PAGE_SIZE,
139 + __PAGE_KERNEL_VSYSCALL);
140 +
141 for_each_possible_cpu(cpu) {
142 void *percpu_vaddr = __per_cpu_user_mapped_start +
143 per_cpu_offset(cpu);