]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.19.31/xen-fix-dom0-boot-on-huge-systems.patch
Linux 4.19.31
[thirdparty/kernel/stable-queue.git] / releases / 4.19.31 / xen-fix-dom0-boot-on-huge-systems.patch
1 From 01bd2ac2f55a1916d81dace12fa8d7ae1c79b5ea Mon Sep 17 00:00:00 2001
2 From: Juergen Gross <jgross@suse.com>
3 Date: Thu, 7 Mar 2019 10:11:19 +0100
4 Subject: xen: fix dom0 boot on huge systems
5
6 From: Juergen Gross <jgross@suse.com>
7
8 commit 01bd2ac2f55a1916d81dace12fa8d7ae1c79b5ea upstream.
9
10 Commit f7c90c2aa40048 ("x86/xen: don't write ptes directly in 32-bit
11 PV guests") introduced a regression for booting dom0 on huge systems
12 with lots of RAM (in the TB range).
13
14 Reason is that on those hosts the p2m list needs to be moved early in
15 the boot process and this requires temporary page tables to be created.
16 Said commit modified xen_set_pte_init() to use a hypercall for writing
17 a PTE, but this requires the page table being in the direct mapped
18 area, which is not the case for the temporary page tables used in
19 xen_relocate_p2m().
20
21 As the page tables are completely written before being linked to the
22 actual address space instead of set_pte() a plain write to memory can
23 be used in xen_relocate_p2m().
24
25 Fixes: f7c90c2aa40048 ("x86/xen: don't write ptes directly in 32-bit PV guests")
26 Cc: stable@vger.kernel.org
27 Signed-off-by: Juergen Gross <jgross@suse.com>
28 Reviewed-by: Jan Beulich <jbeulich@suse.com>
29 Signed-off-by: Juergen Gross <jgross@suse.com>
30 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
31
32 ---
33 arch/x86/xen/mmu_pv.c | 13 ++++++-------
34 1 file changed, 6 insertions(+), 7 deletions(-)
35
36 --- a/arch/x86/xen/mmu_pv.c
37 +++ b/arch/x86/xen/mmu_pv.c
38 @@ -2106,10 +2106,10 @@ void __init xen_relocate_p2m(void)
39 pt = early_memremap(pt_phys, PAGE_SIZE);
40 clear_page(pt);
41 for (idx_pte = 0;
42 - idx_pte < min(n_pte, PTRS_PER_PTE);
43 - idx_pte++) {
44 - set_pte(pt + idx_pte,
45 - pfn_pte(p2m_pfn, PAGE_KERNEL));
46 + idx_pte < min(n_pte, PTRS_PER_PTE);
47 + idx_pte++) {
48 + pt[idx_pte] = pfn_pte(p2m_pfn,
49 + PAGE_KERNEL);
50 p2m_pfn++;
51 }
52 n_pte -= PTRS_PER_PTE;
53 @@ -2117,8 +2117,7 @@ void __init xen_relocate_p2m(void)
54 make_lowmem_page_readonly(__va(pt_phys));
55 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
56 PFN_DOWN(pt_phys));
57 - set_pmd(pmd + idx_pt,
58 - __pmd(_PAGE_TABLE | pt_phys));
59 + pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
60 pt_phys += PAGE_SIZE;
61 }
62 n_pt -= PTRS_PER_PMD;
63 @@ -2126,7 +2125,7 @@ void __init xen_relocate_p2m(void)
64 make_lowmem_page_readonly(__va(pmd_phys));
65 pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
66 PFN_DOWN(pmd_phys));
67 - set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
68 + pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
69 pmd_phys += PAGE_SIZE;
70 }
71 n_pmd -= PTRS_PER_PUD;