]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/powerpc/include/asm/book3s/32/pgtable.h
49d76adb9bc58a3d931094b7a33450b9f2a8d48d
[thirdparty/kernel/stable.git] / arch / powerpc / include / asm / book3s / 32 / pgtable.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4
5 #define __ARCH_USE_5LEVEL_HACK
6 #include <asm-generic/pgtable-nopmd.h>
7
8 #include <asm/book3s/32/hash.h>
9
10 /* And here we include common definitions */
11
12 #define _PAGE_KERNEL_RO 0
13 #define _PAGE_KERNEL_ROX (_PAGE_EXEC)
14 #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
15 #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
16
17 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
18
19 #ifndef __ASSEMBLY__
20
21 static inline bool pte_user(pte_t pte)
22 {
23 return pte_val(pte) & _PAGE_USER;
24 }
25 #endif /* __ASSEMBLY__ */
26
27 /*
28 * Location of the PFN in the PTE. Most 32-bit platforms use the same
29 * as _PAGE_SHIFT here (ie, naturally aligned).
30 * Platform who don't just pre-define the value so we don't override it here.
31 */
32 #define PTE_RPN_SHIFT (PAGE_SHIFT)
33
34 /*
35 * The mask covered by the RPN must be a ULL on 32-bit platforms with
36 * 64-bit PTEs.
37 */
38 #ifdef CONFIG_PTE_64BIT
39 #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
40 #else
41 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
42 #endif
43
44 /*
45 * _PAGE_CHG_MASK masks of bits that are to be preserved across
46 * pgprot changes.
47 */
48 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
49 _PAGE_ACCESSED | _PAGE_SPECIAL)
50
51 /*
52 * We define 2 sets of base prot bits, one for basic pages (ie,
53 * cacheable kernel and user pages) and one for non cacheable
54 * pages. We always set _PAGE_COHERENT when SMP is enabled or
55 * the processor might need it for DMA coherency.
56 */
57 #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
58 #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
59
60 /*
61 * Permission masks used to generate the __P and __S table.
62 *
63 * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
64 *
65 * Write permissions imply read permissions for now.
66 */
67 #define PAGE_NONE __pgprot(_PAGE_BASE)
68 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
69 #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
70 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
71 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
72 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
73 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
74
75 /* Permission masks used for kernel mappings */
76 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
77 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
78 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
79 _PAGE_NO_CACHE | _PAGE_GUARDED)
80 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
81 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
82 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
83
84 /*
85 * Protection used for kernel text. We want the debuggers to be able to
86 * set breakpoints anywhere, so don't write protect the kernel text
87 * on platforms where such control is possible.
88 */
89 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
90 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
91 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X
92 #else
93 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
94 #endif
95
96 /* Make modules code happy. We don't set RO yet */
97 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X
98
99 /* Advertise special mapping type for AGP */
100 #define PAGE_AGP (PAGE_KERNEL_NC)
101 #define HAVE_PAGE_AGP
102
103 #define PTE_INDEX_SIZE PTE_SHIFT
104 #define PMD_INDEX_SIZE 0
105 #define PUD_INDEX_SIZE 0
106 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
107
108 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
109 #define PUD_CACHE_INDEX PUD_INDEX_SIZE
110
111 #ifndef __ASSEMBLY__
112 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
113 #define PMD_TABLE_SIZE 0
114 #define PUD_TABLE_SIZE 0
115 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
116 #endif /* __ASSEMBLY__ */
117
118 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
119 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
120
121 /*
122 * The normal case is that PTEs are 32-bits and we have a 1-page
123 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
124 *
125 * For any >32-bit physical address platform, we can use the following
126 * two level page table layout where the pgdir is 8KB and the MS 13 bits
127 * are an index to the second level table. The combined pgdir/pmd first
128 * level has 2048 entries and the second level has 512 64-bit PTE entries.
129 * -Matt
130 */
131 /* PGDIR_SHIFT determines what a top-level page table entry can map */
132 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
133 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
134 #define PGDIR_MASK (~(PGDIR_SIZE-1))
135
136 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
137 /*
138 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
139 * value (for now) on others, from where we can start layout kernel
140 * virtual space that goes below PKMAP and FIXMAP
141 */
142 #ifdef CONFIG_HIGHMEM
143 #define KVIRT_TOP PKMAP_BASE
144 #else
145 #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
146 #endif
147
148 /*
149 * ioremap_bot starts at that address. Early ioremaps move down from there,
150 * until mem_init() at which point this becomes the top of the vmalloc
151 * and ioremap space
152 */
153 #ifdef CONFIG_NOT_COHERENT_CACHE
154 #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
155 #else
156 #define IOREMAP_TOP KVIRT_TOP
157 #endif
158
159 /*
160 * Just any arbitrary offset to the start of the vmalloc VM area: the
161 * current 16MB value just means that there will be a 64MB "hole" after the
162 * physical memory until the kernel virtual memory starts. That means that
163 * any out-of-bounds memory accesses will hopefully be caught.
164 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
165 * area for the same reason. ;)
166 *
167 * We no longer map larger than phys RAM with the BATs so we don't have
168 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
169 * about clashes between our early calls to ioremap() that start growing down
170 * from ioremap_base being run into the VM area allocations (growing upwards
171 * from VMALLOC_START). For this reason we have ioremap_bot to check when
172 * we actually run into our mappings setup in the early boot with the VM
173 * system. This really does become a problem for machines with good amounts
174 * of RAM. -- Cort
175 */
176 #define VMALLOC_OFFSET (0x1000000) /* 16M */
177 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
178 #define VMALLOC_END ioremap_bot
179
180 #ifndef __ASSEMBLY__
181 #include <linux/sched.h>
182 #include <linux/threads.h>
183
184 extern unsigned long ioremap_bot;
185
186 /* Bits to mask out from a PGD to get to the PUD page */
187 #define PGD_MASKED_BITS 0
188
189 #define pte_ERROR(e) \
190 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
191 (unsigned long long)pte_val(e))
192 #define pgd_ERROR(e) \
193 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
194 /*
195 * Bits in a linux-style PTE. These match the bits in the
196 * (hardware-defined) PowerPC PTE as closely as possible.
197 */
198
199 #define pte_clear(mm, addr, ptep) \
200 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
201
202 #define pmd_none(pmd) (!pmd_val(pmd))
203 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
204 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
205 static inline void pmd_clear(pmd_t *pmdp)
206 {
207 *pmdp = __pmd(0);
208 }
209
210
211 /*
212 * When flushing the tlb entry for a page, we also need to flush the hash
213 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
214 */
215 extern int flush_hash_pages(unsigned context, unsigned long va,
216 unsigned long pmdval, int count);
217
218 /* Add an HPTE to the hash table */
219 extern void add_hash_page(unsigned context, unsigned long va,
220 unsigned long pmdval);
221
222 /* Flush an entry from the TLB/hash table */
223 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
224 unsigned long address);
225
226 /*
227 * PTE updates. This function is called whenever an existing
228 * valid PTE is updated. This does -not- include set_pte_at()
229 * which nowadays only sets a new PTE.
230 *
231 * Depending on the type of MMU, we may need to use atomic updates
232 * and the PTE may be either 32 or 64 bit wide. In the later case,
233 * when using atomic updates, only the low part of the PTE is
234 * accessed atomically.
235 *
236 * In addition, on 44x, we also maintain a global flag indicating
237 * that an executable user mapping was modified, which is needed
238 * to properly flush the virtually tagged instruction cache of
239 * those implementations.
240 */
241 #ifndef CONFIG_PTE_64BIT
242 static inline unsigned long pte_update(pte_t *p,
243 unsigned long clr,
244 unsigned long set)
245 {
246 unsigned long old, tmp;
247
248 __asm__ __volatile__("\
249 1: lwarx %0,0,%3\n\
250 andc %1,%0,%4\n\
251 or %1,%1,%5\n"
252 " stwcx. %1,0,%3\n\
253 bne- 1b"
254 : "=&r" (old), "=&r" (tmp), "=m" (*p)
255 : "r" (p), "r" (clr), "r" (set), "m" (*p)
256 : "cc" );
257
258 return old;
259 }
260 #else /* CONFIG_PTE_64BIT */
261 static inline unsigned long long pte_update(pte_t *p,
262 unsigned long clr,
263 unsigned long set)
264 {
265 unsigned long long old;
266 unsigned long tmp;
267
268 __asm__ __volatile__("\
269 1: lwarx %L0,0,%4\n\
270 lwzx %0,0,%3\n\
271 andc %1,%L0,%5\n\
272 or %1,%1,%6\n"
273 " stwcx. %1,0,%4\n\
274 bne- 1b"
275 : "=&r" (old), "=&r" (tmp), "=m" (*p)
276 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
277 : "cc" );
278
279 return old;
280 }
281 #endif /* CONFIG_PTE_64BIT */
282
283 /*
284 * 2.6 calls this without flushing the TLB entry; this is wrong
285 * for our hash-based implementation, we fix that up here.
286 */
287 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
288 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
289 {
290 unsigned long old;
291 old = pte_update(ptep, _PAGE_ACCESSED, 0);
292 if (old & _PAGE_HASHPTE) {
293 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
294 flush_hash_pages(context, addr, ptephys, 1);
295 }
296 return (old & _PAGE_ACCESSED) != 0;
297 }
298 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
299 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
300
301 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
302 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
303 pte_t *ptep)
304 {
305 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
306 }
307
308 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
309 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
310 pte_t *ptep)
311 {
312 pte_update(ptep, _PAGE_RW, 0);
313 }
314
315 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
316 pte_t *ptep, pte_t entry,
317 unsigned long address,
318 int psize)
319 {
320 unsigned long set = pte_val(entry) &
321 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
322
323 pte_update(ptep, 0, set);
324
325 flush_tlb_page(vma, address);
326 }
327
328 #define __HAVE_ARCH_PTE_SAME
329 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
330
331 #define pmd_page_vaddr(pmd) \
332 ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
333 #define pmd_page(pmd) \
334 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
335
336 /* to find an entry in a kernel page-table-directory */
337 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
338
339 /* to find an entry in a page-table-directory */
340 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
341 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
342
343 /* Find an entry in the third-level page table.. */
344 #define pte_index(address) \
345 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
346 #define pte_offset_kernel(dir, addr) \
347 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
348 #define pte_offset_map(dir, addr) \
349 ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
350 (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
351 #define pte_unmap(pte) kunmap_atomic(pte)
352
353 /*
354 * Encode and decode a swap entry.
355 * Note that the bits we use in a PTE for representing a swap entry
356 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
357 * -- paulus
358 */
359 #define __swp_type(entry) ((entry).val & 0x1f)
360 #define __swp_offset(entry) ((entry).val >> 5)
361 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
362 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
363 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
364
365 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
366
367 /* Generic accessors to PTE bits */
368 static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
369 static inline int pte_read(pte_t pte) { return 1; }
370 static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
371 static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
372 static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
373 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
374 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
375
376 static inline int pte_present(pte_t pte)
377 {
378 return pte_val(pte) & _PAGE_PRESENT;
379 }
380
381 static inline bool pte_hw_valid(pte_t pte)
382 {
383 return pte_val(pte) & _PAGE_PRESENT;
384 }
385
386 static inline bool pte_hashpte(pte_t pte)
387 {
388 return !!(pte_val(pte) & _PAGE_HASHPTE);
389 }
390
391 static inline bool pte_ci(pte_t pte)
392 {
393 return !!(pte_val(pte) & _PAGE_NO_CACHE);
394 }
395
396 /*
397 * We only find page table entry in the last level
398 * Hence no need for other accessors
399 */
400 #define pte_access_permitted pte_access_permitted
401 static inline bool pte_access_permitted(pte_t pte, bool write)
402 {
403 /*
404 * A read-only access is controlled by _PAGE_USER bit.
405 * We have _PAGE_READ set for WRITE and EXECUTE
406 */
407 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
408 return false;
409
410 if (write && !pte_write(pte))
411 return false;
412
413 return true;
414 }
415
416 /* Conversion functions: convert a page and protection to a page entry,
417 * and a page entry and page directory to the page they refer to.
418 *
419 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
420 * long for now.
421 */
422 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
423 {
424 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
425 pgprot_val(pgprot));
426 }
427
428 static inline unsigned long pte_pfn(pte_t pte)
429 {
430 return pte_val(pte) >> PTE_RPN_SHIFT;
431 }
432
433 /* Generic modifiers for PTE bits */
434 static inline pte_t pte_wrprotect(pte_t pte)
435 {
436 return __pte(pte_val(pte) & ~_PAGE_RW);
437 }
438
439 static inline pte_t pte_exprotect(pte_t pte)
440 {
441 return __pte(pte_val(pte) & ~_PAGE_EXEC);
442 }
443
444 static inline pte_t pte_mkclean(pte_t pte)
445 {
446 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
447 }
448
449 static inline pte_t pte_mkold(pte_t pte)
450 {
451 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
452 }
453
454 static inline pte_t pte_mkexec(pte_t pte)
455 {
456 return __pte(pte_val(pte) | _PAGE_EXEC);
457 }
458
459 static inline pte_t pte_mkpte(pte_t pte)
460 {
461 return pte;
462 }
463
464 static inline pte_t pte_mkwrite(pte_t pte)
465 {
466 return __pte(pte_val(pte) | _PAGE_RW);
467 }
468
469 static inline pte_t pte_mkdirty(pte_t pte)
470 {
471 return __pte(pte_val(pte) | _PAGE_DIRTY);
472 }
473
474 static inline pte_t pte_mkyoung(pte_t pte)
475 {
476 return __pte(pte_val(pte) | _PAGE_ACCESSED);
477 }
478
479 static inline pte_t pte_mkspecial(pte_t pte)
480 {
481 return __pte(pte_val(pte) | _PAGE_SPECIAL);
482 }
483
484 static inline pte_t pte_mkhuge(pte_t pte)
485 {
486 return pte;
487 }
488
489 static inline pte_t pte_mkprivileged(pte_t pte)
490 {
491 return __pte(pte_val(pte) & ~_PAGE_USER);
492 }
493
494 static inline pte_t pte_mkuser(pte_t pte)
495 {
496 return __pte(pte_val(pte) | _PAGE_USER);
497 }
498
499 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
500 {
501 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
502 }
503
504
505
506 /* This low level function performs the actual PTE insertion
507 * Setting the PTE depends on the MMU type and other factors. It's
508 * an horrible mess that I'm not going to try to clean up now but
509 * I'm keeping it in one place rather than spread around
510 */
511 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
512 pte_t *ptep, pte_t pte, int percpu)
513 {
514 #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
515 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
516 * helper pte_update() which does an atomic update. We need to do that
517 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
518 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
519 * the hash bits instead (ie, same as the non-SMP case)
520 */
521 if (percpu)
522 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
523 | (pte_val(pte) & ~_PAGE_HASHPTE));
524 else
525 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
526
527 #elif defined(CONFIG_PTE_64BIT)
528 /* Second case is 32-bit with 64-bit PTE. In this case, we
529 * can just store as long as we do the two halves in the right order
530 * with a barrier in between. This is possible because we take care,
531 * in the hash code, to pre-invalidate if the PTE was already hashed,
532 * which synchronizes us with any concurrent invalidation.
533 * In the percpu case, we also fallback to the simple update preserving
534 * the hash bits
535 */
536 if (percpu) {
537 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
538 | (pte_val(pte) & ~_PAGE_HASHPTE));
539 return;
540 }
541 if (pte_val(*ptep) & _PAGE_HASHPTE)
542 flush_hash_entry(mm, ptep, addr);
543 __asm__ __volatile__("\
544 stw%U0%X0 %2,%0\n\
545 eieio\n\
546 stw%U0%X0 %L2,%1"
547 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
548 : "r" (pte) : "memory");
549
550 #else
551 /* Third case is 32-bit hash table in UP mode, we need to preserve
552 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
553 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
554 * and see we need to keep track that this PTE needs invalidating
555 */
556 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
557 | (pte_val(pte) & ~_PAGE_HASHPTE));
558 #endif
559 }
560
561 /*
562 * Macro to mark a page protection value as "uncacheable".
563 */
564
565 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
566 _PAGE_WRITETHRU)
567
568 #define pgprot_noncached pgprot_noncached
569 static inline pgprot_t pgprot_noncached(pgprot_t prot)
570 {
571 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
572 _PAGE_NO_CACHE | _PAGE_GUARDED);
573 }
574
575 #define pgprot_noncached_wc pgprot_noncached_wc
576 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
577 {
578 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
579 _PAGE_NO_CACHE);
580 }
581
582 #define pgprot_cached pgprot_cached
583 static inline pgprot_t pgprot_cached(pgprot_t prot)
584 {
585 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
586 _PAGE_COHERENT);
587 }
588
589 #define pgprot_cached_wthru pgprot_cached_wthru
590 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
591 {
592 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
593 _PAGE_COHERENT | _PAGE_WRITETHRU);
594 }
595
596 #define pgprot_cached_noncoherent pgprot_cached_noncoherent
597 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
598 {
599 return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
600 }
601
602 #define pgprot_writecombine pgprot_writecombine
603 static inline pgprot_t pgprot_writecombine(pgprot_t prot)
604 {
605 return pgprot_noncached_wc(prot);
606 }
607
608 #endif /* !__ASSEMBLY__ */
609
610 #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */