]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
3dfcb315 AK |
2 | #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H |
3 | #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H | |
4 | ||
9849a569 | 5 | #define __ARCH_USE_5LEVEL_HACK |
3dfcb315 AK |
6 | #include <asm-generic/pgtable-nopmd.h> |
7 | ||
cbbb8683 | 8 | #include <asm/book3s/32/hash.h> |
3dfcb315 | 9 | |
cbbb8683 AK |
10 | /* And here we include common definitions */ |
11 | #include <asm/pte-common.h> | |
3dfcb315 | 12 | |
9b081e10 CL |
13 | #define PTE_INDEX_SIZE PTE_SHIFT |
14 | #define PMD_INDEX_SIZE 0 | |
15 | #define PUD_INDEX_SIZE 0 | |
16 | #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) | |
17 | ||
18 | #define PMD_CACHE_INDEX PMD_INDEX_SIZE | |
19 | ||
20 | #ifndef __ASSEMBLY__ | |
21 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | |
22 | #define PMD_TABLE_SIZE 0 | |
23 | #define PUD_TABLE_SIZE 0 | |
24 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | |
25 | #endif /* __ASSEMBLY__ */ | |
26 | ||
27 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | |
28 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | |
29 | ||
3dfcb315 AK |
30 | /* |
31 | * The normal case is that PTEs are 32-bits and we have a 1-page | |
32 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | |
33 | * | |
34 | * For any >32-bit physical address platform, we can use the following | |
35 | * two level page table layout where the pgdir is 8KB and the MS 13 bits | |
36 | * are an index to the second level table. The combined pgdir/pmd first | |
37 | * level has 2048 entries and the second level has 512 64-bit PTE entries. | |
38 | * -Matt | |
39 | */ | |
40 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ | |
9b081e10 | 41 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) |
3dfcb315 AK |
42 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
43 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
44 | ||
3dfcb315 | 45 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) |
3dfcb315 AK |
46 | /* |
47 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary | |
48 | * value (for now) on others, from where we can start layout kernel | |
49 | * virtual space that goes below PKMAP and FIXMAP | |
50 | */ | |
51 | #ifdef CONFIG_HIGHMEM | |
52 | #define KVIRT_TOP PKMAP_BASE | |
53 | #else | |
54 | #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */ | |
55 | #endif | |
56 | ||
57 | /* | |
58 | * ioremap_bot starts at that address. Early ioremaps move down from there, | |
59 | * until mem_init() at which point this becomes the top of the vmalloc | |
60 | * and ioremap space | |
61 | */ | |
62 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
63 | #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) | |
64 | #else | |
65 | #define IOREMAP_TOP KVIRT_TOP | |
66 | #endif | |
67 | ||
68 | /* | |
69 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
70 | * current 16MB value just means that there will be a 64MB "hole" after the | |
71 | * physical memory until the kernel virtual memory starts. That means that | |
72 | * any out-of-bounds memory accesses will hopefully be caught. | |
73 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
74 | * area for the same reason. ;) | |
75 | * | |
76 | * We no longer map larger than phys RAM with the BATs so we don't have | |
77 | * to worry about the VMALLOC_OFFSET causing problems. We do have to worry | |
78 | * about clashes between our early calls to ioremap() that start growing down | |
79 | * from ioremap_base being run into the VM area allocations (growing upwards | |
80 | * from VMALLOC_START). For this reason we have ioremap_bot to check when | |
81 | * we actually run into our mappings setup in the early boot with the VM | |
82 | * system. This really does become a problem for machines with good amounts | |
83 | * of RAM. -- Cort | |
84 | */ | |
85 | #define VMALLOC_OFFSET (0x1000000) /* 16M */ | |
86 | #ifdef PPC_PIN_SIZE | |
87 | #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | |
88 | #else | |
89 | #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | |
90 | #endif | |
91 | #define VMALLOC_END ioremap_bot | |
92 | ||
cbbb8683 AK |
93 | #ifndef __ASSEMBLY__ |
94 | #include <linux/sched.h> | |
95 | #include <linux/threads.h> | |
96 | #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ | |
97 | ||
98 | extern unsigned long ioremap_bot; | |
99 | ||
9b081e10 CL |
100 | /* Bits to mask out from a PGD to get to the PUD page */ |
101 | #define PGD_MASKED_BITS 0 | |
cbbb8683 AK |
102 | |
103 | #define pte_ERROR(e) \ | |
104 | pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ | |
105 | (unsigned long long)pte_val(e)) | |
106 | #define pgd_ERROR(e) \ | |
107 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
3dfcb315 AK |
108 | /* |
109 | * Bits in a linux-style PTE. These match the bits in the | |
110 | * (hardware-defined) PowerPC PTE as closely as possible. | |
111 | */ | |
112 | ||
3dfcb315 AK |
113 | #define pte_clear(mm, addr, ptep) \ |
114 | do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) | |
115 | ||
116 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
117 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) | |
118 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) | |
f281b5d5 AK |
119 | static inline void pmd_clear(pmd_t *pmdp) |
120 | { | |
121 | *pmdp = __pmd(0); | |
122 | } | |
123 | ||
3dfcb315 AK |
124 | |
125 | /* | |
126 | * When flushing the tlb entry for a page, we also need to flush the hash | |
127 | * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. | |
128 | */ | |
129 | extern int flush_hash_pages(unsigned context, unsigned long va, | |
130 | unsigned long pmdval, int count); | |
131 | ||
132 | /* Add an HPTE to the hash table */ | |
133 | extern void add_hash_page(unsigned context, unsigned long va, | |
134 | unsigned long pmdval); | |
135 | ||
136 | /* Flush an entry from the TLB/hash table */ | |
137 | extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, | |
138 | unsigned long address); | |
139 | ||
140 | /* | |
141 | * PTE updates. This function is called whenever an existing | |
142 | * valid PTE is updated. This does -not- include set_pte_at() | |
143 | * which nowadays only sets a new PTE. | |
144 | * | |
145 | * Depending on the type of MMU, we may need to use atomic updates | |
146 | * and the PTE may be either 32 or 64 bit wide. In the later case, | |
147 | * when using atomic updates, only the low part of the PTE is | |
148 | * accessed atomically. | |
149 | * | |
150 | * In addition, on 44x, we also maintain a global flag indicating | |
151 | * that an executable user mapping was modified, which is needed | |
152 | * to properly flush the virtually tagged instruction cache of | |
153 | * those implementations. | |
154 | */ | |
155 | #ifndef CONFIG_PTE_64BIT | |
156 | static inline unsigned long pte_update(pte_t *p, | |
157 | unsigned long clr, | |
158 | unsigned long set) | |
159 | { | |
3dfcb315 AK |
160 | unsigned long old, tmp; |
161 | ||
162 | __asm__ __volatile__("\ | |
163 | 1: lwarx %0,0,%3\n\ | |
164 | andc %1,%0,%4\n\ | |
165 | or %1,%1,%5\n" | |
166 | PPC405_ERR77(0,%3) | |
167 | " stwcx. %1,0,%3\n\ | |
168 | bne- 1b" | |
169 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
170 | : "r" (p), "r" (clr), "r" (set), "m" (*p) | |
171 | : "cc" ); | |
cbbb8683 | 172 | |
3dfcb315 AK |
173 | return old; |
174 | } | |
175 | #else /* CONFIG_PTE_64BIT */ | |
176 | static inline unsigned long long pte_update(pte_t *p, | |
177 | unsigned long clr, | |
178 | unsigned long set) | |
179 | { | |
3dfcb315 AK |
180 | unsigned long long old; |
181 | unsigned long tmp; | |
182 | ||
183 | __asm__ __volatile__("\ | |
184 | 1: lwarx %L0,0,%4\n\ | |
185 | lwzx %0,0,%3\n\ | |
186 | andc %1,%L0,%5\n\ | |
187 | or %1,%1,%6\n" | |
188 | PPC405_ERR77(0,%3) | |
189 | " stwcx. %1,0,%4\n\ | |
190 | bne- 1b" | |
191 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
192 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) | |
193 | : "cc" ); | |
cbbb8683 | 194 | |
3dfcb315 AK |
195 | return old; |
196 | } | |
197 | #endif /* CONFIG_PTE_64BIT */ | |
198 | ||
199 | /* | |
200 | * 2.6 calls this without flushing the TLB entry; this is wrong | |
201 | * for our hash-based implementation, we fix that up here. | |
202 | */ | |
203 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
204 | static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) | |
205 | { | |
206 | unsigned long old; | |
207 | old = pte_update(ptep, _PAGE_ACCESSED, 0); | |
3dfcb315 AK |
208 | if (old & _PAGE_HASHPTE) { |
209 | unsigned long ptephys = __pa(ptep) & PAGE_MASK; | |
210 | flush_hash_pages(context, addr, ptephys, 1); | |
211 | } | |
3dfcb315 AK |
212 | return (old & _PAGE_ACCESSED) != 0; |
213 | } | |
214 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
215 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) | |
216 | ||
217 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
218 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |
219 | pte_t *ptep) | |
220 | { | |
221 | return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); | |
222 | } | |
223 | ||
224 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
225 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |
226 | pte_t *ptep) | |
227 | { | |
228 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); | |
229 | } | |
230 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | |
231 | unsigned long addr, pte_t *ptep) | |
232 | { | |
233 | ptep_set_wrprotect(mm, addr, ptep); | |
234 | } | |
235 | ||
236 | ||
c6d1a767 | 237 | static inline void __ptep_set_access_flags(struct mm_struct *mm, |
b3603e17 AK |
238 | pte_t *ptep, pte_t entry, |
239 | unsigned long address) | |
3dfcb315 AK |
240 | { |
241 | unsigned long set = pte_val(entry) & | |
242 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); | |
243 | unsigned long clr = ~pte_val(entry) & _PAGE_RO; | |
244 | ||
245 | pte_update(ptep, clr, set); | |
246 | } | |
247 | ||
248 | #define __HAVE_ARCH_PTE_SAME | |
249 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) | |
250 | ||
251 | /* | |
252 | * Note that on Book E processors, the pmd contains the kernel virtual | |
253 | * (lowmem) address of the pte page. The physical address is less useful | |
254 | * because everything runs with translation enabled (even the TLB miss | |
255 | * handler). On everything else the pmd contains the physical address | |
256 | * of the pte page. -- paulus | |
257 | */ | |
258 | #ifndef CONFIG_BOOKE | |
259 | #define pmd_page_vaddr(pmd) \ | |
260 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | |
261 | #define pmd_page(pmd) \ | |
262 | pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | |
263 | #else | |
264 | #define pmd_page_vaddr(pmd) \ | |
265 | ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) | |
266 | #define pmd_page(pmd) \ | |
267 | pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) | |
268 | #endif | |
269 | ||
270 | /* to find an entry in a kernel page-table-directory */ | |
271 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
272 | ||
273 | /* to find an entry in a page-table-directory */ | |
274 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | |
275 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
276 | ||
277 | /* Find an entry in the third-level page table.. */ | |
278 | #define pte_index(address) \ | |
279 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
280 | #define pte_offset_kernel(dir, addr) \ | |
281 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) | |
282 | #define pte_offset_map(dir, addr) \ | |
283 | ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) | |
284 | #define pte_unmap(pte) kunmap_atomic(pte) | |
285 | ||
286 | /* | |
287 | * Encode and decode a swap entry. | |
288 | * Note that the bits we use in a PTE for representing a swap entry | |
289 | * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). | |
290 | * -- paulus | |
291 | */ | |
292 | #define __swp_type(entry) ((entry).val & 0x1f) | |
293 | #define __swp_offset(entry) ((entry).val >> 5) | |
294 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) | |
295 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) | |
296 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) | |
297 | ||
4386c096 CL |
298 | int map_kernel_page(unsigned long va, phys_addr_t pa, int flags); |
299 | ||
1ca72129 AK |
300 | /* Generic accessors to PTE bits */ |
301 | static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} | |
ca8afd40 | 302 | static inline int pte_read(pte_t pte) { return 1; } |
1ca72129 AK |
303 | static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } |
304 | static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } | |
305 | static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } | |
306 | static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } | |
307 | static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } | |
308 | ||
309 | static inline int pte_present(pte_t pte) | |
310 | { | |
311 | return pte_val(pte) & _PAGE_PRESENT; | |
312 | } | |
313 | ||
314 | /* Conversion functions: convert a page and protection to a page entry, | |
315 | * and a page entry and page directory to the page they refer to. | |
316 | * | |
317 | * Even if PTEs can be unsigned long long, a PFN is always an unsigned | |
318 | * long for now. | |
319 | */ | |
320 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |
321 | { | |
322 | return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | | |
323 | pgprot_val(pgprot)); | |
324 | } | |
325 | ||
326 | static inline unsigned long pte_pfn(pte_t pte) | |
327 | { | |
328 | return pte_val(pte) >> PTE_RPN_SHIFT; | |
329 | } | |
330 | ||
331 | /* Generic modifiers for PTE bits */ | |
332 | static inline pte_t pte_wrprotect(pte_t pte) | |
333 | { | |
334 | return __pte(pte_val(pte) & ~_PAGE_RW); | |
335 | } | |
336 | ||
337 | static inline pte_t pte_mkclean(pte_t pte) | |
338 | { | |
339 | return __pte(pte_val(pte) & ~_PAGE_DIRTY); | |
340 | } | |
341 | ||
342 | static inline pte_t pte_mkold(pte_t pte) | |
343 | { | |
344 | return __pte(pte_val(pte) & ~_PAGE_ACCESSED); | |
345 | } | |
346 | ||
347 | static inline pte_t pte_mkwrite(pte_t pte) | |
348 | { | |
349 | return __pte(pte_val(pte) | _PAGE_RW); | |
350 | } | |
351 | ||
352 | static inline pte_t pte_mkdirty(pte_t pte) | |
353 | { | |
354 | return __pte(pte_val(pte) | _PAGE_DIRTY); | |
355 | } | |
356 | ||
357 | static inline pte_t pte_mkyoung(pte_t pte) | |
358 | { | |
359 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | |
360 | } | |
361 | ||
362 | static inline pte_t pte_mkspecial(pte_t pte) | |
363 | { | |
364 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | |
365 | } | |
366 | ||
367 | static inline pte_t pte_mkhuge(pte_t pte) | |
368 | { | |
369 | return pte; | |
370 | } | |
371 | ||
372 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
373 | { | |
374 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | |
375 | } | |
376 | ||
377 | ||
378 | ||
379 | /* This low level function performs the actual PTE insertion | |
380 | * Setting the PTE depends on the MMU type and other factors. It's | |
381 | * an horrible mess that I'm not going to try to clean up now but | |
382 | * I'm keeping it in one place rather than spread around | |
383 | */ | |
384 | static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |
385 | pte_t *ptep, pte_t pte, int percpu) | |
386 | { | |
387 | #if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) | |
388 | /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the | |
389 | * helper pte_update() which does an atomic update. We need to do that | |
390 | * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a | |
391 | * per-CPU PTE such as a kmap_atomic, we do a simple update preserving | |
392 | * the hash bits instead (ie, same as the non-SMP case) | |
393 | */ | |
394 | if (percpu) | |
395 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
396 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
397 | else | |
398 | pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); | |
399 | ||
400 | #elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) | |
401 | /* Second case is 32-bit with 64-bit PTE. In this case, we | |
402 | * can just store as long as we do the two halves in the right order | |
403 | * with a barrier in between. This is possible because we take care, | |
404 | * in the hash code, to pre-invalidate if the PTE was already hashed, | |
405 | * which synchronizes us with any concurrent invalidation. | |
406 | * In the percpu case, we also fallback to the simple update preserving | |
407 | * the hash bits | |
408 | */ | |
409 | if (percpu) { | |
410 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
411 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
412 | return; | |
413 | } | |
414 | if (pte_val(*ptep) & _PAGE_HASHPTE) | |
415 | flush_hash_entry(mm, ptep, addr); | |
416 | __asm__ __volatile__("\ | |
417 | stw%U0%X0 %2,%0\n\ | |
418 | eieio\n\ | |
419 | stw%U0%X0 %L2,%1" | |
420 | : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) | |
421 | : "r" (pte) : "memory"); | |
422 | ||
423 | #elif defined(CONFIG_PPC_STD_MMU_32) | |
424 | /* Third case is 32-bit hash table in UP mode, we need to preserve | |
425 | * the _PAGE_HASHPTE bit since we may not have invalidated the previous | |
426 | * translation in the hash yet (done in a subsequent flush_tlb_xxx()) | |
427 | * and see we need to keep track that this PTE needs invalidating | |
428 | */ | |
429 | *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) | |
430 | | (pte_val(pte) & ~_PAGE_HASHPTE)); | |
431 | ||
432 | #else | |
433 | #error "Not supported " | |
434 | #endif | |
435 | } | |
436 | ||
437 | /* | |
438 | * Macro to mark a page protection value as "uncacheable". | |
439 | */ | |
440 | ||
441 | #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ | |
442 | _PAGE_WRITETHRU) | |
443 | ||
444 | #define pgprot_noncached pgprot_noncached | |
445 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | |
446 | { | |
447 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | |
448 | _PAGE_NO_CACHE | _PAGE_GUARDED); | |
449 | } | |
450 | ||
451 | #define pgprot_noncached_wc pgprot_noncached_wc | |
452 | static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) | |
453 | { | |
454 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | |
455 | _PAGE_NO_CACHE); | |
456 | } | |
457 | ||
458 | #define pgprot_cached pgprot_cached | |
459 | static inline pgprot_t pgprot_cached(pgprot_t prot) | |
460 | { | |
461 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | |
462 | _PAGE_COHERENT); | |
463 | } | |
464 | ||
465 | #define pgprot_cached_wthru pgprot_cached_wthru | |
466 | static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) | |
467 | { | |
468 | return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | | |
469 | _PAGE_COHERENT | _PAGE_WRITETHRU); | |
470 | } | |
471 | ||
472 | #define pgprot_cached_noncoherent pgprot_cached_noncoherent | |
473 | static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) | |
474 | { | |
475 | return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); | |
476 | } | |
477 | ||
478 | #define pgprot_writecombine pgprot_writecombine | |
479 | static inline pgprot_t pgprot_writecombine(pgprot_t prot) | |
480 | { | |
481 | return pgprot_noncached_wc(prot); | |
482 | } | |
483 | ||
3dfcb315 AK |
484 | #endif /* !__ASSEMBLY__ */ |
485 | ||
486 | #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */ |