]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.19.45/mm-huge_memory-fix-vmf_insert_pfn_-pmd-pud-crash-handle-unaligned-addresses.patch
Linux 4.14.121
[thirdparty/kernel/stable-queue.git] / releases / 4.19.45 / mm-huge_memory-fix-vmf_insert_pfn_-pmd-pud-crash-handle-unaligned-addresses.patch
1 From fce86ff5802bac3a7b19db171aa1949ef9caac31 Mon Sep 17 00:00:00 2001
2 From: Dan Williams <dan.j.williams@intel.com>
3 Date: Mon, 13 May 2019 17:15:33 -0700
4 Subject: mm/huge_memory: fix vmf_insert_pfn_{pmd, pud}() crash, handle unaligned addresses
5
6 From: Dan Williams <dan.j.williams@intel.com>
7
8 commit fce86ff5802bac3a7b19db171aa1949ef9caac31 upstream.
9
10 Starting with c6f3c5ee40c1 ("mm/huge_memory.c: fix modifying of page
11 protection by insert_pfn_pmd()") vmf_insert_pfn_pmd() internally calls
12 pmdp_set_access_flags(). That helper enforces a pmd aligned @address
13 argument via VM_BUG_ON() assertion.
14
15 Update the implementation to take a 'struct vm_fault' argument directly
16 and apply the address alignment fixup internally to fix crash signatures
17 like:
18
19 kernel BUG at arch/x86/mm/pgtable.c:515!
20 invalid opcode: 0000 [#1] SMP NOPTI
21 CPU: 51 PID: 43713 Comm: java Tainted: G OE 4.19.35 #1
22 [..]
23 RIP: 0010:pmdp_set_access_flags+0x48/0x50
24 [..]
25 Call Trace:
26 vmf_insert_pfn_pmd+0x198/0x350
27 dax_iomap_fault+0xe82/0x1190
28 ext4_dax_huge_fault+0x103/0x1f0
29 ? __switch_to_asm+0x40/0x70
30 __handle_mm_fault+0x3f6/0x1370
31 ? __switch_to_asm+0x34/0x70
32 ? __switch_to_asm+0x40/0x70
33 handle_mm_fault+0xda/0x200
34 __do_page_fault+0x249/0x4f0
35 do_page_fault+0x32/0x110
36 ? page_fault+0x8/0x30
37 page_fault+0x1e/0x30
38
39 Link: http://lkml.kernel.org/r/155741946350.372037.11148198430068238140.stgit@dwillia2-desk3.amr.corp.intel.com
40 Fixes: c6f3c5ee40c1 ("mm/huge_memory.c: fix modifying of page protection by insert_pfn_pmd()")
41 Signed-off-by: Dan Williams <dan.j.williams@intel.com>
42 Reported-by: Piotr Balcer <piotr.balcer@intel.com>
43 Tested-by: Yan Ma <yan.ma@intel.com>
44 Tested-by: Pankaj Gupta <pagupta@redhat.com>
45 Reviewed-by: Matthew Wilcox <willy@infradead.org>
46 Reviewed-by: Jan Kara <jack@suse.cz>
47 Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
48 Cc: Chandan Rajendra <chandan@linux.ibm.com>
49 Cc: Souptick Joarder <jrdr.linux@gmail.com>
50 Cc: <stable@vger.kernel.org>
51 Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
52 Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
53 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
54
55 ---
56 drivers/dax/device.c | 6 ++----
57 fs/dax.c | 6 ++----
58 include/linux/huge_mm.h | 6 ++----
59 mm/huge_memory.c | 16 ++++++++++------
60 4 files changed, 16 insertions(+), 18 deletions(-)
61
62 --- a/drivers/dax/device.c
63 +++ b/drivers/dax/device.c
64 @@ -325,8 +325,7 @@ static vm_fault_t __dev_dax_pmd_fault(st
65
66 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
67
68 - return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, *pfn,
69 - vmf->flags & FAULT_FLAG_WRITE);
70 + return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
71 }
72
73 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
74 @@ -376,8 +375,7 @@ static vm_fault_t __dev_dax_pud_fault(st
75
76 *pfn = phys_to_pfn_t(phys, dax_region->pfn_flags);
77
78 - return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, *pfn,
79 - vmf->flags & FAULT_FLAG_WRITE);
80 + return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
81 }
82 #else
83 static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
84 --- a/fs/dax.c
85 +++ b/fs/dax.c
86 @@ -1660,8 +1660,7 @@ static vm_fault_t dax_iomap_pmd_fault(st
87 }
88
89 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
90 - result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
91 - write);
92 + result = vmf_insert_pfn_pmd(vmf, pfn, write);
93 break;
94 case IOMAP_UNWRITTEN:
95 case IOMAP_HOLE:
96 @@ -1775,8 +1774,7 @@ static vm_fault_t dax_insert_pfn_mkwrite
97 break;
98 #ifdef CONFIG_FS_DAX_PMD
99 case PE_SIZE_PMD:
100 - ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
101 - pfn, true);
102 + ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
103 break;
104 #endif
105 default:
106 --- a/include/linux/huge_mm.h
107 +++ b/include/linux/huge_mm.h
108 @@ -47,10 +47,8 @@ extern bool move_huge_pmd(struct vm_area
109 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
110 unsigned long addr, pgprot_t newprot,
111 int prot_numa);
112 -vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
113 - pmd_t *pmd, pfn_t pfn, bool write);
114 -vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
115 - pud_t *pud, pfn_t pfn, bool write);
116 +vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
117 +vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
118 enum transparent_hugepage_flag {
119 TRANSPARENT_HUGEPAGE_FLAG,
120 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
121 --- a/mm/huge_memory.c
122 +++ b/mm/huge_memory.c
123 @@ -772,11 +772,13 @@ out_unlock:
124 pte_free(mm, pgtable);
125 }
126
127 -vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
128 - pmd_t *pmd, pfn_t pfn, bool write)
129 +vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write)
130 {
131 + unsigned long addr = vmf->address & PMD_MASK;
132 + struct vm_area_struct *vma = vmf->vma;
133 pgprot_t pgprot = vma->vm_page_prot;
134 pgtable_t pgtable = NULL;
135 +
136 /*
137 * If we had pmd_special, we could avoid all these restrictions,
138 * but we need to be consistent with PTEs and architectures that
139 @@ -799,7 +801,7 @@ vm_fault_t vmf_insert_pfn_pmd(struct vm_
140
141 track_pfn_insert(vma, &pgprot, pfn);
142
143 - insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable);
144 + insert_pfn_pmd(vma, addr, vmf->pmd, pfn, pgprot, write, pgtable);
145 return VM_FAULT_NOPAGE;
146 }
147 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
148 @@ -848,10 +850,12 @@ out_unlock:
149 spin_unlock(ptl);
150 }
151
152 -vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
153 - pud_t *pud, pfn_t pfn, bool write)
154 +vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write)
155 {
156 + unsigned long addr = vmf->address & PUD_MASK;
157 + struct vm_area_struct *vma = vmf->vma;
158 pgprot_t pgprot = vma->vm_page_prot;
159 +
160 /*
161 * If we had pud_special, we could avoid all these restrictions,
162 * but we need to be consistent with PTEs and architectures that
163 @@ -868,7 +872,7 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_
164
165 track_pfn_insert(vma, &pgprot, pfn);
166
167 - insert_pfn_pud(vma, addr, pud, pfn, pgprot, write);
168 + insert_pfn_pud(vma, addr, vmf->pud, pfn, pgprot, write);
169 return VM_FAULT_NOPAGE;
170 }
171 EXPORT_SYMBOL_GPL(vmf_insert_pfn_pud);