]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
3dfcb315 AK |
2 | #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H |
3 | #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H | |
4 | ||
9849a569 | 5 | #define __ARCH_USE_5LEVEL_HACK |
3dfcb315 AK |
6 | #include <asm-generic/pgtable-nopmd.h> |
7 | ||
cbbb8683 | 8 | #include <asm/book3s/32/hash.h> |
3dfcb315 | 9 | |
cbbb8683 | 10 | /* And here we include common definitions */ |
b2133bd7 CL |
11 | |
12 | #define _PAGE_KERNEL_RO 0 | |
385e89d5 | 13 | #define _PAGE_KERNEL_ROX (_PAGE_EXEC) |
b2133bd7 | 14 | #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) |
385e89d5 | 15 | #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) |
b2133bd7 CL |
16 | |
17 | #define _PAGE_HPTEFLAGS _PAGE_HASHPTE | |
18 | ||
19 | #ifndef __ASSEMBLY__ | |
20 | ||
21 | static inline bool pte_user(pte_t pte) | |
22 | { | |
23 | return pte_val(pte) & _PAGE_USER; | |
24 | } | |
25 | #endif /* __ASSEMBLY__ */ | |
26 | ||
27 | /* | |
28 | * Location of the PFN in the PTE. Most 32-bit platforms use the same | |
29 | * as _PAGE_SHIFT here (ie, naturally aligned). | |
30 | * Platform who don't just pre-define the value so we don't override it here. | |
31 | */ | |
32 | #define PTE_RPN_SHIFT (PAGE_SHIFT) | |
33 | ||
34 | /* | |
35 | * The mask covered by the RPN must be a ULL on 32-bit platforms with | |
36 | * 64-bit PTEs. | |
37 | */ | |
38 | #ifdef CONFIG_PTE_64BIT | |
39 | #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) | |
40 | #else | |
41 | #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) | |
42 | #endif | |
43 | ||
44 | /* | |
45 | * _PAGE_CHG_MASK masks of bits that are to be preserved across | |
46 | * pgprot changes. | |
47 | */ | |
48 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \ | |
49 | _PAGE_ACCESSED | _PAGE_SPECIAL) | |
50 | ||
b2133bd7 CL |
51 | /* |
52 | * We define 2 sets of base prot bits, one for basic pages (ie, | |
53 | * cacheable kernel and user pages) and one for non cacheable | |
54 | * pages. We always set _PAGE_COHERENT when SMP is enabled or | |
55 | * the processor might need it for DMA coherency. | |
56 | */ | |
57 | #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) | |
58 | #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) | |
59 | ||
60 | /* | |
61 | * Permission masks used to generate the __P and __S table. | |
62 | * | |
63 | * Note:__pgprot is defined in arch/powerpc/include/asm/page.h | |
64 | * | |
65 | * Write permissions imply read permissions for now. | |
66 | */ | |
67 | #define PAGE_NONE __pgprot(_PAGE_BASE) | |
68 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) | |
385e89d5 | 69 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) |
b2133bd7 | 70 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) |
385e89d5 | 71 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) |
b2133bd7 | 72 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) |
385e89d5 | 73 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) |
b2133bd7 CL |
74 | |
75 | /* Permission masks used for kernel mappings */ | |
76 | #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) | |
77 | #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) | |
78 | #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ | |
79 | _PAGE_NO_CACHE | _PAGE_GUARDED) | |
80 | #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) | |
81 | #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) | |
82 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) | |
83 | ||
84 | /* | |
85 | * Protection used for kernel text. We want the debuggers to be able to | |
86 | * set breakpoints anywhere, so don't write protect the kernel text | |
87 | * on platforms where such control is possible. | |
88 | */ | |
89 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ | |
90 | defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) | |
91 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X | |
92 | #else | |
93 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX | |
94 | #endif | |
95 | ||
96 | /* Make modules code happy. We don't set RO yet */ | |
97 | #define PAGE_KERNEL_EXEC PAGE_KERNEL_X | |
98 | ||
99 | /* Advertise special mapping type for AGP */ | |
100 | #define PAGE_AGP (PAGE_KERNEL_NC) | |
101 | #define HAVE_PAGE_AGP | |
3dfcb315 | 102 | |
9b081e10 CL |
103 | #define PTE_INDEX_SIZE PTE_SHIFT |
104 | #define PMD_INDEX_SIZE 0 | |
105 | #define PUD_INDEX_SIZE 0 | |
106 | #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) | |
107 | ||
108 | #define PMD_CACHE_INDEX PMD_INDEX_SIZE | |
fae22116 | 109 | #define PUD_CACHE_INDEX PUD_INDEX_SIZE |
9b081e10 CL |
110 | |
111 | #ifndef __ASSEMBLY__ | |
112 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | |
113 | #define PMD_TABLE_SIZE 0 | |
114 | #define PUD_TABLE_SIZE 0 | |
115 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | |
116 | #endif /* __ASSEMBLY__ */ | |
117 | ||
118 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | |
119 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | |
120 | ||
3dfcb315 AK |
121 | /* |
122 | * The normal case is that PTEs are 32-bits and we have a 1-page | |
123 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | |
124 | * | |
125 | * For any >32-bit physical address platform, we can use the following | |
126 | * two level page table layout where the pgdir is 8KB and the MS 13 bits | |
127 | * are an index to the second level table. The combined pgdir/pmd first | |
128 | * level has 2048 entries and the second level has 512 64-bit PTE entries. | |
129 | * -Matt | |
130 | */ | |
131 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ | |
9b081e10 | 132 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) |
3dfcb315 AK |
133 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
134 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
135 | ||
3dfcb315 | 136 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
3dfcb315 AK |
137 | /* |
138 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary | |
139 | * value (for now) on others, from where we can start layout kernel | |
140 | * virtual space that goes below PKMAP and FIXMAP | |
141 | */ | |
142 | #ifdef CONFIG_HIGHMEM | |
143 | #define KVIRT_TOP PKMAP_BASE | |
144 | #else | |
145 | #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */ | |
146 | #endif | |
147 | ||
148 | /* | |
149 | * ioremap_bot starts at that address. Early ioremaps move down from there, | |
150 | * until mem_init() at which point this becomes the top of the vmalloc | |
151 | * and ioremap space | |
152 | */ | |
153 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
154 | #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) | |
155 | #else | |
156 | #define IOREMAP_TOP KVIRT_TOP | |
157 | #endif | |
158 | ||
159 | /* | |
160 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
161 | * current 16MB value just means that there will be a 64MB "hole" after the | |
162 | * physical memory until the kernel virtual memory starts. That means that | |
163 | * any out-of-bounds memory accesses will hopefully be caught. | |
164 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
165 | * area for the same reason. ;) | |
166 | * | |
167 | * We no longer map larger than phys RAM with the BATs so we don't have | |
168 | * to worry about the VMALLOC_OFFSET causing problems. We do have to worry | |
169 | * about clashes between our early calls to ioremap() that start growing down | |
170 | * from ioremap_base being run into the VM area allocations (growing upwards | |
171 | * from VMALLOC_START). For this reason we have ioremap_bot to check when | |
172 | * we actually run into our mappings setup in the early boot with the VM | |
173 | * system. This really does become a problem for machines with good amounts | |
174 | * of RAM. -- Cort | |
175 | */ | |
176 | #define VMALLOC_OFFSET (0x1000000) /* 16M */ | |
63b2bc61 CL |
177 | |
178 | /* | |
179 | * With CONFIG_STRICT_KERNEL_RWX, kernel segments are set NX. But when modules | |
180 | * are used, NX cannot be set on VMALLOC space. So vmalloc VM space and linear | |
181 | * memory shall not share segments. | |
182 | */ | |
183 | #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES) | |
184 | #define VMALLOC_START ((_ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \ | |
185 | ~(VMALLOC_OFFSET - 1)) | |
186 | #else | |
3dfcb315 | 187 | #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) |
63b2bc61 | 188 | #endif |
3dfcb315 AK |
189 | #define VMALLOC_END ioremap_bot |
190 | ||
cbbb8683 AK |
191 | #ifndef __ASSEMBLY__ |
192 | #include <linux/sched.h> | |
193 | #include <linux/threads.h> | |
cbbb8683 AK |
194 | |
195 | extern unsigned long ioremap_bot; | |
196 | ||
9b081e10 CL |
197 | /* Bits to mask out from a PGD to get to the PUD page */ |
198 | #define PGD_MASKED_BITS 0 | |
cbbb8683 AK |
199 | |
200 | #define pte_ERROR(e) \ | |
201 | pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ | |
202 | (unsigned long long)pte_val(e)) | |
203 | #define pgd_ERROR(e) \ | |
204 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
3dfcb315 AK |
205 | /* |
206 | * Bits in a linux-style PTE. These match the bits in the | |
207 | * (hardware-defined) PowerPC PTE as closely as possible. | |
208 | */ | |
209 | ||
3dfcb315 AK |
210 | #define pte_clear(mm, addr, ptep) \ |
211 | do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) | |
212 | ||
213 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
214 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) | |
215 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) | |
f281b5d5 AK |
216 | static inline void pmd_clear(pmd_t *pmdp) |
217 | { | |
218 | *pmdp = __pmd(0); | |
219 | } | |
220 | ||
3dfcb315 AK |
221 | |
222 | /* | |
223 | * When flushing the tlb entry for a page, we also need to flush the hash | |
224 | * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. | |
225 | */ | |
226 | extern int flush_hash_pages(unsigned context, unsigned long va, | |
227 | unsigned long pmdval, int count); | |
228 | ||
229 | /* Add an HPTE to the hash table */ | |
230 | extern void add_hash_page(unsigned context, unsigned long va, | |
231 | unsigned long pmdval); | |
232 | ||
233 | /* Flush an entry from the TLB/hash table */ | |
234 | extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, | |
235 | unsigned long address); | |
236 | ||
237 | /* | |
238 | * PTE updates. This function is called whenever an existing | |
239 | * valid PTE is updated. This does -not- include set_pte_at() | |
240 | * which nowadays only sets a new PTE. | |
241 | * | |
242 | * Depending on the type of MMU, we may need to use atomic updates | |
243 | * and the PTE may be either 32 or 64 bit wide. In the later case, | |
244 | * when using atomic updates, only the low part of the PTE is | |
245 | * accessed atomically. | |
246 | * | |
247 | * In addition, on 44x, we also maintain a global flag indicating | |
248 | * that an executable user mapping was modified, which is needed | |
249 | * to properly flush the virtually tagged instruction cache of | |
250 | * those implementations. | |
251 | */ | |
252 | #ifndef CONFIG_PTE_64BIT | |
253 | static inline unsigned long pte_update(pte_t *p, | |
254 | unsigned long clr, | |
255 | unsigned long set) | |
256 | { | |
3dfcb315 AK |
257 | unsigned long old, tmp; |
258 | ||
259 | __asm__ __volatile__("\ | |
260 | 1: lwarx %0,0,%3\n\ | |
261 | andc %1,%0,%4\n\ | |
262 | or %1,%1,%5\n" | |
3dfcb315 AK |
263 | " stwcx. %1,0,%3\n\ |
264 | bne- 1b" | |
265 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
266 | : "r" (p), "r" (clr), "r" (set), "m" (*p) | |
267 | : "cc" ); | |
cbbb8683 | 268 | |
3dfcb315 AK |
269 | return old; |
270 | } | |
271 | #else /* CONFIG_PTE_64BIT */ | |
272 | static inline unsigned long long pte_update(pte_t *p, | |
273 | unsigned long clr, | |
274 | unsigned long set) | |
275 | { | |
3dfcb315 AK |
276 | unsigned long long old; |
277 | unsigned long tmp; | |
278 | ||
279 | __asm__ __volatile__("\ | |
280 | 1: lwarx %L0,0,%4\n\ | |
281 | lwzx %0,0,%3\n\ | |
282 | andc %1,%L0,%5\n\ | |
283 | or %1,%1,%6\n" | |
3dfcb315 AK |
284 | " stwcx. %1,0,%4\n\ |
285 | bne- 1b" | |
286 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
287 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) | |
288 | : "cc" ); | |
cbbb8683 | 289 | |
3dfcb315 AK |
290 | return old; |
291 | } | |
292 | #endif /* CONFIG_PTE_64BIT */ | |
293 | ||
294 | /* | |
295 | * 2.6 calls this without flushing the TLB entry; this is wrong | |
296 | * for our hash-based implementation, we fix that up here. | |
297 | */ | |
298 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
299 | static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) | |
300 | { | |
301 | unsigned long old; | |
302 | old = pte_update(ptep, _PAGE_ACCESSED, 0); | |
3dfcb315 AK |
303 | if (old & _PAGE_HASHPTE) { |
304 | unsigned long ptephys = __pa(ptep) & PAGE_MASK; | |
305 | flush_hash_pages(context, addr, ptephys, 1); | |
306 | } | |
3dfcb315 AK |
307 | return (old & _PAGE_ACCESSED) != 0; |
308 | } | |
309 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
310 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) | |
311 | ||
312 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
313 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |
314 | pte_t *ptep) | |
315 | { | |
316 | return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); | |
317 | } | |
318 | ||
319 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
320 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |
321 | pte_t *ptep) | |
322 | { | |
b2133bd7 | 323 | pte_update(ptep, _PAGE_RW, 0); |
3dfcb315 | 324 | } |
3dfcb315 | 325 | |
e4c1112c | 326 | static inline void __ptep_set_access_flags(struct vm_area_struct *vma, |
b3603e17 | 327 | pte_t *ptep, pte_t entry, |
e4c1112c AK |
328 | unsigned long address, |
329 | int psize) | |
3dfcb315 AK |
330 | { |
331 | unsigned long set = pte_val(entry) & | |
385e89d5 | 332 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); |
3dfcb315 | 333 | |
b2133bd7 | 334 | pte_update(ptep, 0, set); |
bd5050e3 AK |
335 | |
336 | flush_tlb_page(vma, address); | |
3dfcb315 AK |
337 | } |
338 | ||
339 | #define __HAVE_ARCH_PTE_SAME | |
340 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) | |
341 | ||
3dfcb315 | 342 | #define pmd_page_vaddr(pmd) \ |
32ea4c14 | 343 | ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) |
3dfcb315 AK |
344 | #define pmd_page(pmd) \ |
345 | pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | |
3dfcb315 AK |
346 | |
347 | /* to find an entry in a kernel page-table-directory */ | |
348 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
349 | ||
350 | /* to find an entry in a page-table-directory */ | |
351 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | |
352 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
353 | ||
354 | /* Find an entry in the third-level page table.. */ | |
355 | #define pte_index(address) \ | |
356 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
357 | #define pte_offset_kernel(dir, addr) \ | |
358 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) | |
359 | #define pte_offset_map(dir, addr) \ | |
32ea4c14 CL |
360 | ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \ |
361 | (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr)) | |
3dfcb315 AK |
362 | #define pte_unmap(pte) kunmap_atomic(pte) |
363 | ||
364 | /* | |
365 | * Encode and decode a swap entry. | |
366 | * Note that the bits we use in a PTE for representing a swap entry | |
367 | * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). | |
368 | * -- paulus | |
369 | */ | |
370 | #define __swp_type(entry) ((entry).val & 0x1f) | |
371 | #define __swp_offset(entry) ((entry).val >> 5) | |
372 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) | |
373 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) | |
374 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) | |
375 | ||
c766ee72 | 376 | int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); |
4386c096 | 377 | |
1ca72129 AK |
378 | /* Generic accessors to PTE bits */ |
379 | static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} | |
ca8afd40 | 380 | static inline int pte_read(pte_t pte) { return 1; } |
1ca72129 AK |
381 | static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } |
382 | static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } | |
383 | static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } | |
384 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } | |
385e89d5 | 385 | static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } |
1ca72129 AK |
386 | |
387 | static inline int pte_present(pte_t pte) | |
388 | { | |
389 | return pte_val(pte) & _PAGE_PRESENT; | |
390 | } | |
391 | ||
daba7902 CL |
392 | static inline bool pte_hw_valid(pte_t pte) |
393 | { | |
394 | return pte_val(pte) & _PAGE_PRESENT; | |
395 | } | |
396 | ||
397 | static inline bool pte_hashpte(pte_t pte) | |
398 | { | |
399 | return !!(pte_val(pte) & _PAGE_HASHPTE); | |
400 | } | |
401 | ||
402 | static inline bool pte_ci(pte_t pte) | |
403 | { | |
404 | return !!(pte_val(pte) & _PAGE_NO_CACHE); | |
405 | } | |
406 | ||
5769beaf AK |
407 | /* |
408 | * We only find page table entry in the last level | |
409 | * Hence no need for other accessors | |
410 | */ | |
411 | #define pte_access_permitted pte_access_permitted | |
412 | static inline bool pte_access_permitted(pte_t pte, bool write) | |
413 | { | |
5769beaf AK |
414 | /* |
415 | * A read-only access is controlled by _PAGE_USER bit. | |
416 | * We have _PAGE_READ set for WRITE and EXECUTE | |
417 | */ | |
26973fa5 CL |
418 | if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) |
419 | return false; | |
5769beaf | 420 | |
26973fa5 | 421 | if (write && !pte_write(pte)) |
5769beaf AK |
422 | return false; |
423 | ||
424 | return true; | |
425 | } | |
426 | ||
1ca72129 AK |
427 | /* Conversion functions: convert a page and protection to a page entry, |
428 | * and a page entry and page directory to the page they refer to. | |
429 | * | |
430 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | |
431 | * long for now. | |
432 | */ | |
433 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |
434 | { | |
435 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | |
436 | pgprot_val(pgprot)); | |
437 | } | |
438 | ||
439 | static inline unsigned long pte_pfn(pte_t pte) | |
440 | { | |
441 | return pte_val(pte) >> PTE_RPN_SHIFT; | |
442 | } | |
443 | ||
444 | /* Generic modifiers for PTE bits */ | |
445 | static inline pte_t pte_wrprotect(pte_t pte) | |
446 | { | |
447 | return __pte(pte_val(pte) & ~_PAGE_RW); | |
448 | } | |
449 | ||
daba7902 CL |
450 | static inline pte_t pte_exprotect(pte_t pte) |
451 | { | |
385e89d5 | 452 | return __pte(pte_val(pte) & ~_PAGE_EXEC); |
daba7902 CL |
453 | } |
454 | ||
1ca72129 AK |
455 | static inline pte_t pte_mkclean(pte_t pte) |
456 | { | |
457 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); | |
458 | } | |
459 | ||
460 | static inline pte_t pte_mkold(pte_t pte) | |
461 | { | |
462 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); | |
463 | } | |
464 | ||
daba7902 CL |
465 | static inline pte_t pte_mkexec(pte_t pte) |
466 | { | |
385e89d5 | 467 | return __pte(pte_val(pte) | _PAGE_EXEC); |
daba7902 CL |
468 | } |
469 | ||
470 | static inline pte_t pte_mkpte(pte_t pte) | |
471 | { | |
472 | return pte; | |
473 | } | |
474 | ||
1ca72129 AK |
475 | static inline pte_t pte_mkwrite(pte_t pte) |
476 | { | |
477 | return __pte(pte_val(pte) | _PAGE_RW); | |
478 | } | |
479 | ||
480 | static inline pte_t pte_mkdirty(pte_t pte) | |
481 | { | |
482 | return __pte(pte_val(pte) | _PAGE_DIRTY); | |
483 | } | |
484 | ||
485 | static inline pte_t pte_mkyoung(pte_t pte) | |
486 | { | |
487 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | |
488 | } | |
489 | ||
490 | static inline pte_t pte_mkspecial(pte_t pte) | |
491 | { | |
492 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | |
493 | } | |
494 | ||
495 | static inline pte_t pte_mkhuge(pte_t pte) | |
496 | { | |
497 | return pte; | |
498 | } | |
499 | ||
daba7902 CL |
500 | static inline pte_t pte_mkprivileged(pte_t pte) |
501 | { | |
502 | return __pte(pte_val(pte) & ~_PAGE_USER); | |
503 | } | |
504 | ||
505 | static inline pte_t pte_mkuser(pte_t pte) | |
506 | { | |
507 | return __pte(pte_val(pte) | _PAGE_USER); | |
508 | } | |
509 | ||
1ca72129 AK |
510 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
511 | { | |
512 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | |
513 | } | |
514 | ||
515 | ||
516 | ||
517 | /* This low level function performs the actual PTE insertion | |
518 | * Setting the PTE depends on the MMU type and other factors. It's | |
519 | * an horrible mess that I'm not going to try to clean up now but | |
520 | * I'm keeping it in one place rather than spread around | |
521 | */ | |
522 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |
523 | pte_t *ptep, pte_t pte, int percpu) | |
524 | { | |
a0cd0f8c | 525 | #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) |
1ca72129 AK |
526 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the |
527 | * helper pte_update() which does an atomic update. We need to do that | |
528 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a | |
529 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving | |
530 | * the hash bits instead (ie, same as the non-SMP case) | |
531 | */ | |
532 | if (percpu) | |
533 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
534 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
535 | else | |
536 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | |
537 | ||
a0cd0f8c | 538 | #elif defined(CONFIG_PTE_64BIT) |
1ca72129 AK |
539 | /* Second case is 32-bit with 64-bit PTE. In this case, we |
540 | * can just store as long as we do the two halves in the right order | |
541 | * with a barrier in between. This is possible because we take care, | |
542 | * in the hash code, to pre-invalidate if the PTE was already hashed, | |
543 | * which synchronizes us with any concurrent invalidation. | |
544 | * In the percpu case, we also fallback to the simple update preserving | |
545 | * the hash bits | |
546 | */ | |
547 | if (percpu) { | |
548 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
549 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
550 | return; | |
551 | } | |
552 | if (pte_val(*ptep) & _PAGE_HASHPTE) | |
553 | flush_hash_entry(mm, ptep, addr); | |
554 | __asm__ __volatile__("\ | |
555 | stw%U0%X0 %2,%0\n\ | |
556 | eieio\n\ | |
557 | stw%U0%X0 %L2,%1" | |
558 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | |
559 | : "r" (pte) : "memory"); | |
560 | ||
a0cd0f8c | 561 | #else |
1ca72129 AK |
562 | /* Third case is 32-bit hash table in UP mode, we need to preserve |
563 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous | |
564 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) | |
565 | * and see we need to keep track that this PTE needs invalidating | |
566 | */ | |
567 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
568 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
1ca72129 AK |
569 | #endif |
570 | } | |
571 | ||
572 | /* | |
573 | * Macro to mark a page protection value as "uncacheable". | |
574 | */ | |
575 | ||
576 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | |
577 | _PAGE_WRITETHRU) | |
578 | ||
579 | #define pgprot_noncached pgprot_noncached | |
580 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | |
581 | { | |
582 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | |
583 | _PAGE_NO_CACHE | _PAGE_GUARDED); | |
584 | } | |
585 | ||
586 | #define pgprot_noncached_wc pgprot_noncached_wc | |
587 | static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) | |
588 | { | |
589 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | |
590 | _PAGE_NO_CACHE); | |
591 | } | |
592 | ||
593 | #define pgprot_cached pgprot_cached | |
594 | static inline pgprot_t pgprot_cached(pgprot_t prot) | |
595 | { | |
596 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | |
597 | _PAGE_COHERENT); | |
598 | } | |
599 | ||
600 | #define pgprot_cached_wthru pgprot_cached_wthru | |
601 | static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) | |
602 | { | |
603 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | |
604 | _PAGE_COHERENT | _PAGE_WRITETHRU); | |
605 | } | |
606 | ||
607 | #define pgprot_cached_noncoherent pgprot_cached_noncoherent | |
608 | static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) | |
609 | { | |
610 | return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); | |
611 | } | |
612 | ||
613 | #define pgprot_writecombine pgprot_writecombine | |
614 | static inline pgprot_t pgprot_writecombine(pgprot_t prot) | |
615 | { | |
616 | return pgprot_noncached_wc(prot); | |
617 | } | |
618 | ||
3dfcb315 AK |
619 | #endif /* !__ASSEMBLY__ */ |
620 | ||
621 | #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */ |