]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/3.14.16/x86-espfix-xen-fix-allocation-of-pages-for-paravirt-page-tables.patch
drop queue-4.14/mips-make-sure-dt-memory-regions-are-valid.patch
[thirdparty/kernel/stable-queue.git] / releases / 3.14.16 / x86-espfix-xen-fix-allocation-of-pages-for-paravirt-page-tables.patch
1 From 8762e5092828c4dc0f49da5a47a644c670df77f3 Mon Sep 17 00:00:00 2001
2 From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
3 Date: Wed, 9 Jul 2014 13:18:18 -0400
4 Subject: x86/espfix/xen: Fix allocation of pages for paravirt page tables
5
6 From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
7
8 commit 8762e5092828c4dc0f49da5a47a644c670df77f3 upstream.
9
10 init_espfix_ap() is currently off by one level when informing hypervisor
11 that allocated pages will be used for ministacks' page tables.
12
13 The most immediate effect of this on a PV guest is that if
14 'stack_page = __get_free_page()' returns a non-zeroed-out page the hypervisor
15 will refuse to use it for a page table (which it shouldn't be anyway). This will
16 result in warnings by both Xen and Linux.
17
18 More importantly, a subsequent write to that page (again, by a PV guest) is
19 likely to result in fatal page fault.
20
21 Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
22 Link: http://lkml.kernel.org/r/1404926298-5565-1-git-send-email-boris.ostrovsky@oracle.com
23 Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24 Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
25 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
26
27 ---
28 arch/x86/kernel/espfix_64.c | 5 ++---
29 1 file changed, 2 insertions(+), 3 deletions(-)
30
31 --- a/arch/x86/kernel/espfix_64.c
32 +++ b/arch/x86/kernel/espfix_64.c
33 @@ -175,7 +175,7 @@ void init_espfix_ap(void)
34 if (!pud_present(pud)) {
35 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
36 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
37 - paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
38 + paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
39 for (n = 0; n < ESPFIX_PUD_CLONES; n++)
40 set_pud(&pud_p[n], pud);
41 }
42 @@ -185,7 +185,7 @@ void init_espfix_ap(void)
43 if (!pmd_present(pmd)) {
44 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
45 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
46 - paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
47 + paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
48 for (n = 0; n < ESPFIX_PMD_CLONES; n++)
49 set_pmd(&pmd_p[n], pmd);
50 }
51 @@ -193,7 +193,6 @@ void init_espfix_ap(void)
52 pte_p = pte_offset_kernel(&pmd, addr);
53 stack_page = (void *)__get_free_page(GFP_KERNEL);
54 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
55 - paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
56 for (n = 0; n < ESPFIX_PTE_CLONES; n++)
57 set_pte(&pte_p[n*PTE_STRIDE], pte);
58