From: Andrea Arcangeli Date: Tue, 5 Dec 2017 20:15:07 +0000 (+0100) Subject: x86/mm/kaiser: re-enable vsyscalls X-Git-Tag: v3.2.98~25 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0cee3c94208ae76fc10459cf9398c63dc6956dad;p=thirdparty%2Fkernel%2Fstable.git x86/mm/kaiser: re-enable vsyscalls To avoid breaking the kernel ABI. Signed-off-by: Andrea Arcangeli [Hugh Dickins: Backported to 3.2: - Leave out the PVCLOCK_FIXMAP user mapping, which does not apply to this tree - For safety added vsyscall_pgprot, and a BUG_ON if _PAGE_USER outside of FIXMAP.] Signed-off-by: Hugh Dickins Signed-off-by: Ben Hutchings --- diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index eaea1d31f7530..143e98b28081b 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h @@ -22,6 +22,7 @@ enum vsyscall_num { /* kernel space (writeable) */ extern int vgetcpu_mode; extern struct timezone sys_tz; +extern unsigned long vsyscall_pgprot; #include diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 4970ef070f2f5..02fd03bf15dd1 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -74,6 +75,8 @@ static inline void hpet_set_mapping(void) hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE); #ifdef CONFIG_X86_64 __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE); + kaiser_add_mapping(__fix_to_virt(VSYSCALL_HPET), PAGE_SIZE, + __PAGE_KERNEL_VVAR_NOCACHE); #endif } diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index e4d4a22e8b943..3178f308609a6 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -58,6 +58,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = }; static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE; +unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL; static int __init vsyscall_setup(char *str) { @@ -274,10 +275,10 @@ void __init map_vsyscall(void) extern char __vvar_page; unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page); + if (vsyscall_mode != NATIVE) + vsyscall_pgprot = __PAGE_KERNEL_VVAR; __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, - vsyscall_mode == NATIVE - ? PAGE_KERNEL_VSYSCALL - : PAGE_KERNEL_VVAR); + __pgprot(vsyscall_pgprot)); BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) != (unsigned long)VSYSCALL_START); diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c index 79b0222ffa746..ab1dfa6075466 100644 --- a/arch/x86/mm/kaiser.c +++ b/arch/x86/mm/kaiser.c @@ -16,6 +16,7 @@ extern struct mm_struct init_mm; #include #include /* to verify its kaiser declarations */ +#include #include #include #include @@ -133,7 +134,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address) return NULL; spin_lock(&shadow_table_allocation_lock); if (pud_none(*pud)) { - set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page))); + set_pud(pud, __pud(_PAGE_TABLE | __pa(new_pmd_page))); __inc_zone_page_state(virt_to_page((void *) new_pmd_page), NR_KAISERTABLE); } else @@ -153,7 +154,7 @@ static pte_t *kaiser_pagetable_walk(unsigned long address) return NULL; spin_lock(&shadow_table_allocation_lock); if (pmd_none(*pmd)) { - set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page))); + set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(new_pte_page))); __inc_zone_page_state(virt_to_page((void *) new_pte_page), NR_KAISERTABLE); } else @@ -174,6 +175,9 @@ int kaiser_add_user_map(const void *__start_addr, unsigned long size, unsigned long end_addr = PAGE_ALIGN(start_addr + size); unsigned long target_address; + if (flags & _PAGE_USER) + BUG_ON(address < FIXADDR_START || end_addr >= FIXADDR_TOP); + for (; address < end_addr; address += PAGE_SIZE) { target_address = get_pa_from_mapping(address); if (target_address == -1) { @@ -227,7 +231,7 @@ static void __init kaiser_init_all_pgds(void) break; } inc_zone_page_state(virt_to_page(pud), NR_KAISERTABLE); - new_pgd = __pgd(_KERNPG_TABLE |__pa(pud)); + new_pgd = __pgd(_PAGE_TABLE |__pa(pud)); /* * Make sure not to stomp on some other pgd entry. */ @@ -285,6 +289,10 @@ void __init kaiser_init(void) kaiser_add_user_map_early((void *)idt_descr.address, sizeof(gate_desc) * NR_VECTORS, __PAGE_KERNEL_RO); + kaiser_add_user_map_early((void *)VVAR_ADDRESS, PAGE_SIZE, + __PAGE_KERNEL_VVAR); + kaiser_add_user_map_early((void *)VSYSCALL_START, PAGE_SIZE, + vsyscall_pgprot); kaiser_add_user_map_early(&x86_cr3_pcid_noflush, sizeof(x86_cr3_pcid_noflush), __PAGE_KERNEL);