]> git.ipfire.org Git - people/ms/linux.git/blob - mm/pgtable-generic.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[people/ms/linux.git] / mm / pgtable-generic.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/pgtable-generic.c
4 *
5 * Generic pgtable methods declared in linux/pgtable.h
6 *
7 * Copyright (C) 2010 Linus Torvalds
8 */
9
10 #include <linux/pagemap.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pgtable.h>
13 #include <linux/mm_inline.h>
14 #include <asm/tlb.h>
15
16 /*
17 * If a p?d_bad entry is found while walking page tables, report
18 * the error, before resetting entry to p?d_none. Usually (but
19 * very seldom) called out from the p?d_none_or_clear_bad macros.
20 */
21
22 void pgd_clear_bad(pgd_t *pgd)
23 {
24 pgd_ERROR(*pgd);
25 pgd_clear(pgd);
26 }
27
28 #ifndef __PAGETABLE_P4D_FOLDED
29 void p4d_clear_bad(p4d_t *p4d)
30 {
31 p4d_ERROR(*p4d);
32 p4d_clear(p4d);
33 }
34 #endif
35
36 #ifndef __PAGETABLE_PUD_FOLDED
37 void pud_clear_bad(pud_t *pud)
38 {
39 pud_ERROR(*pud);
40 pud_clear(pud);
41 }
42 #endif
43
44 /*
45 * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
46 * above. pmd folding is special and typically pmd_* macros refer to upper
47 * level even when folded
48 */
49 void pmd_clear_bad(pmd_t *pmd)
50 {
51 pmd_ERROR(*pmd);
52 pmd_clear(pmd);
53 }
54
55 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
56 /*
57 * Only sets the access flags (dirty, accessed), as well as write
58 * permission. Furthermore, we know it always gets set to a "more
59 * permissive" setting, which allows most architectures to optimize
60 * this. We return whether the PTE actually changed, which in turn
61 * instructs the caller to do things like update__mmu_cache. This
62 * used to be done in the caller, but sparc needs minor faults to
63 * force that call on sun4c so we changed this macro slightly
64 */
65 int ptep_set_access_flags(struct vm_area_struct *vma,
66 unsigned long address, pte_t *ptep,
67 pte_t entry, int dirty)
68 {
69 int changed = !pte_same(*ptep, entry);
70 if (changed) {
71 set_pte_at(vma->vm_mm, address, ptep, entry);
72 flush_tlb_fix_spurious_fault(vma, address);
73 }
74 return changed;
75 }
76 #endif
77
78 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
79 int ptep_clear_flush_young(struct vm_area_struct *vma,
80 unsigned long address, pte_t *ptep)
81 {
82 int young;
83 young = ptep_test_and_clear_young(vma, address, ptep);
84 if (young)
85 flush_tlb_page(vma, address);
86 return young;
87 }
88 #endif
89
90 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
91 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
92 pte_t *ptep)
93 {
94 struct mm_struct *mm = (vma)->vm_mm;
95 pte_t pte;
96 pte = ptep_get_and_clear(mm, address, ptep);
97 if (pte_accessible(mm, pte))
98 flush_tlb_page(vma, address);
99 return pte;
100 }
101 #endif
102
103 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
104
105 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
106 int pmdp_set_access_flags(struct vm_area_struct *vma,
107 unsigned long address, pmd_t *pmdp,
108 pmd_t entry, int dirty)
109 {
110 int changed = !pmd_same(*pmdp, entry);
111 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
112 if (changed) {
113 set_pmd_at(vma->vm_mm, address, pmdp, entry);
114 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
115 }
116 return changed;
117 }
118 #endif
119
120 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
121 int pmdp_clear_flush_young(struct vm_area_struct *vma,
122 unsigned long address, pmd_t *pmdp)
123 {
124 int young;
125 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
126 young = pmdp_test_and_clear_young(vma, address, pmdp);
127 if (young)
128 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
129 return young;
130 }
131 #endif
132
133 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
134 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
135 pmd_t *pmdp)
136 {
137 pmd_t pmd;
138 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
139 VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
140 !pmd_devmap(*pmdp));
141 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
142 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
143 return pmd;
144 }
145
146 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
147 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
148 pud_t *pudp)
149 {
150 pud_t pud;
151
152 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
153 VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp));
154 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
155 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
156 return pud;
157 }
158 #endif
159 #endif
160
161 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
162 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
163 pgtable_t pgtable)
164 {
165 assert_spin_locked(pmd_lockptr(mm, pmdp));
166
167 /* FIFO */
168 if (!pmd_huge_pte(mm, pmdp))
169 INIT_LIST_HEAD(&pgtable->lru);
170 else
171 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
172 pmd_huge_pte(mm, pmdp) = pgtable;
173 }
174 #endif
175
176 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
177 /* no "address" argument so destroys page coloring of some arch */
178 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
179 {
180 pgtable_t pgtable;
181
182 assert_spin_locked(pmd_lockptr(mm, pmdp));
183
184 /* FIFO */
185 pgtable = pmd_huge_pte(mm, pmdp);
186 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
187 struct page, lru);
188 if (pmd_huge_pte(mm, pmdp))
189 list_del(&pgtable->lru);
190 return pgtable;
191 }
192 #endif
193
194 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
195 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
196 pmd_t *pmdp)
197 {
198 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
199 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
200 return old;
201 }
202 #endif
203
204 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
205 pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
206 pmd_t *pmdp)
207 {
208 return pmdp_invalidate(vma, address, pmdp);
209 }
210 #endif
211
212 #ifndef pmdp_collapse_flush
213 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
214 pmd_t *pmdp)
215 {
216 /*
217 * pmd and hugepage pte format are same. So we could
218 * use the same function.
219 */
220 pmd_t pmd;
221
222 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
223 VM_BUG_ON(pmd_trans_huge(*pmdp));
224 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
225
226 /* collapse entails shooting down ptes not pmd */
227 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
228 return pmd;
229 }
230 #endif
231 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */