]>
Commit | Line | Data |
---|---|---|
047ea784 PM |
1 | #ifndef _ASM_POWERPC_PGTABLE_H |
2 | #define _ASM_POWERPC_PGTABLE_H | |
3 | ||
4 | #ifndef CONFIG_PPC64 | |
5 | #include <asm-ppc/pgtable.h> | |
6 | #else | |
1da177e4 | 7 | |
1da177e4 LT |
8 | /* |
9 | * This file contains the functions and defines necessary to modify and use | |
10 | * the ppc64 hashed page table. | |
11 | */ | |
12 | ||
13 | #ifndef __ASSEMBLY__ | |
14 | #include <linux/config.h> | |
15 | #include <linux/stddef.h> | |
16 | #include <asm/processor.h> /* For TASK_SIZE */ | |
17 | #include <asm/mmu.h> | |
18 | #include <asm/page.h> | |
19 | #include <asm/tlbflush.h> | |
8c65b4a6 | 20 | struct mm_struct; |
1da177e4 LT |
21 | #endif /* __ASSEMBLY__ */ |
22 | ||
3c726f8d BH |
23 | #ifdef CONFIG_PPC_64K_PAGES |
24 | #include <asm/pgtable-64k.h> | |
25 | #else | |
26 | #include <asm/pgtable-4k.h> | |
27 | #endif | |
1f8d419e DG |
28 | |
29 | #define FIRST_USER_ADDRESS 0 | |
1da177e4 LT |
30 | |
31 | /* | |
32 | * Size of EA range mapped by our pagetables. | |
33 | */ | |
e28f7faf DG |
34 | #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ |
35 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) | |
36 | #define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) | |
37 | ||
38 | #if TASK_SIZE_USER64 > PGTABLE_RANGE | |
39 | #error TASK_SIZE_USER64 exceeds pagetable range | |
40 | #endif | |
41 | ||
42 | #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) | |
43 | #error TASK_SIZE_USER64 exceeds user VSID range | |
44 | #endif | |
1da177e4 LT |
45 | |
46 | /* | |
47 | * Define the address range of the vmalloc VM area. | |
48 | */ | |
49 | #define VMALLOC_START (0xD000000000000000ul) | |
e28f7faf | 50 | #define VMALLOC_SIZE (0x80000000000UL) |
20cee16c | 51 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) |
1da177e4 | 52 | |
800fc3ee DG |
53 | /* |
54 | * Define the address range of the imalloc VM area. | |
55 | */ | |
56 | #define PHBS_IO_BASE VMALLOC_END | |
57 | #define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ | |
58 | #define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) | |
59 | ||
1da177e4 | 60 | /* |
3c726f8d BH |
61 | * Common bits in a linux-style PTE. These match the bits in the |
62 | * (hardware-defined) PowerPC PTE as closely as possible. Additional | |
63 | * bits may be defined in pgtable-*.h | |
1da177e4 LT |
64 | */ |
65 | #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ | |
66 | #define _PAGE_USER 0x0002 /* matches one of the PP bits */ | |
67 | #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ | |
68 | #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ | |
69 | #define _PAGE_GUARDED 0x0008 | |
70 | #define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ | |
71 | #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ | |
72 | #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ | |
73 | #define _PAGE_DIRTY 0x0080 /* C: page changed */ | |
74 | #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ | |
75 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ | |
76 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ | |
77 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ | |
1da177e4 LT |
78 | |
79 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) | |
80 | ||
81 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) | |
82 | ||
047ea784 | 83 | /* __pgprot defined in asm-powerpc/page.h */ |
1da177e4 LT |
84 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) |
85 | ||
86 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) | |
87 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) | |
88 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) | |
89 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | |
90 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) | |
91 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | |
92 | #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) | |
93 | #define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | |
94 | _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) | |
95 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) | |
96 | ||
97 | #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) | |
98 | #define HAVE_PAGE_AGP | |
99 | ||
3c726f8d BH |
100 | /* PTEIDX nibble */ |
101 | #define _PTEIDX_SECONDARY 0x8 | |
102 | #define _PTEIDX_GROUP_IX 0x7 | |
103 | ||
1da177e4 LT |
104 | |
105 | /* | |
106 | * POWER4 and newer have per page execute protection, older chips can only | |
107 | * do this on a segment (256MB) basis. | |
108 | * | |
109 | * Also, write permissions imply read permissions. | |
110 | * This is the closest we can get.. | |
111 | * | |
112 | * Note due to the way vm flags are laid out, the bits are XWR | |
113 | */ | |
114 | #define __P000 PAGE_NONE | |
115 | #define __P001 PAGE_READONLY | |
116 | #define __P010 PAGE_COPY | |
117 | #define __P011 PAGE_COPY | |
118 | #define __P100 PAGE_READONLY_X | |
119 | #define __P101 PAGE_READONLY_X | |
120 | #define __P110 PAGE_COPY_X | |
121 | #define __P111 PAGE_COPY_X | |
122 | ||
123 | #define __S000 PAGE_NONE | |
124 | #define __S001 PAGE_READONLY | |
125 | #define __S010 PAGE_SHARED | |
126 | #define __S011 PAGE_SHARED | |
127 | #define __S100 PAGE_READONLY_X | |
128 | #define __S101 PAGE_READONLY_X | |
129 | #define __S110 PAGE_SHARED_X | |
130 | #define __S111 PAGE_SHARED_X | |
131 | ||
132 | #ifndef __ASSEMBLY__ | |
133 | ||
134 | /* | |
135 | * ZERO_PAGE is a global shared page that is always zero: used | |
136 | * for zero-mapped memory areas etc.. | |
137 | */ | |
138 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | |
139 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
140 | #endif /* __ASSEMBLY__ */ | |
141 | ||
1da177e4 LT |
142 | #ifdef CONFIG_HUGETLB_PAGE |
143 | ||
1da177e4 LT |
144 | #define HAVE_ARCH_UNMAPPED_AREA |
145 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | |
1da177e4 LT |
146 | |
147 | #endif | |
148 | ||
149 | #ifndef __ASSEMBLY__ | |
150 | ||
151 | /* | |
152 | * Conversion functions: convert a page and protection to a page entry, | |
153 | * and a page entry and page directory to the page they refer to. | |
154 | * | |
155 | * mk_pte takes a (struct page *) as input | |
156 | */ | |
157 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
158 | ||
1f8d419e DG |
159 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) |
160 | { | |
161 | pte_t pte; | |
162 | ||
163 | ||
3c726f8d | 164 | pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); |
1f8d419e DG |
165 | return pte; |
166 | } | |
1da177e4 LT |
167 | |
168 | #define pte_modify(_pte, newprot) \ | |
169 | (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) | |
170 | ||
171 | #define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) | |
172 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | |
173 | ||
174 | /* pte_clear moved to later in this file */ | |
175 | ||
3c726f8d | 176 | #define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) |
1da177e4 LT |
177 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
178 | ||
3c726f8d | 179 | #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) |
1da177e4 LT |
180 | #define pmd_none(pmd) (!pmd_val(pmd)) |
181 | #define pmd_bad(pmd) (pmd_val(pmd) == 0) | |
182 | #define pmd_present(pmd) (pmd_val(pmd) != 0) | |
183 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) | |
3c726f8d | 184 | #define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) |
1da177e4 | 185 | #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) |
58366af5 | 186 | |
3c726f8d | 187 | #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) |
58366af5 | 188 | #define pud_none(pud) (!pud_val(pud)) |
e28f7faf DG |
189 | #define pud_bad(pud) ((pud_val(pud)) == 0) |
190 | #define pud_present(pud) (pud_val(pud) != 0) | |
191 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) | |
3c726f8d | 192 | #define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS) |
e28f7faf DG |
193 | |
194 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) | |
1da177e4 LT |
195 | |
196 | /* | |
197 | * Find an entry in a page-table-directory. We combine the address region | |
198 | * (the high order N bits) and the pgd portion of the address. | |
199 | */ | |
200 | /* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ | |
e28f7faf | 201 | #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) |
1da177e4 LT |
202 | |
203 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
204 | ||
58366af5 | 205 | #define pmd_offset(pudp,addr) \ |
e28f7faf | 206 | (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) |
1da177e4 | 207 | |
1da177e4 | 208 | #define pte_offset_kernel(dir,addr) \ |
e28f7faf | 209 | (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) |
1da177e4 LT |
210 | |
211 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | |
212 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | |
213 | #define pte_unmap(pte) do { } while(0) | |
214 | #define pte_unmap_nested(pte) do { } while(0) | |
215 | ||
216 | /* to find an entry in a kernel page-table-directory */ | |
217 | /* This now only contains the vmalloc pages */ | |
218 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
219 | ||
1da177e4 LT |
220 | /* |
221 | * The following only work if pte_present() is true. | |
222 | * Undefined behaviour if not.. | |
223 | */ | |
224 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;} | |
225 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} | |
226 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} | |
227 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} | |
228 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} | |
229 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} | |
1da177e4 LT |
230 | |
231 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | |
232 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | |
233 | ||
234 | static inline pte_t pte_rdprotect(pte_t pte) { | |
235 | pte_val(pte) &= ~_PAGE_USER; return pte; } | |
236 | static inline pte_t pte_exprotect(pte_t pte) { | |
237 | pte_val(pte) &= ~_PAGE_EXEC; return pte; } | |
238 | static inline pte_t pte_wrprotect(pte_t pte) { | |
239 | pte_val(pte) &= ~(_PAGE_RW); return pte; } | |
240 | static inline pte_t pte_mkclean(pte_t pte) { | |
241 | pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } | |
242 | static inline pte_t pte_mkold(pte_t pte) { | |
243 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | |
1da177e4 LT |
244 | static inline pte_t pte_mkread(pte_t pte) { |
245 | pte_val(pte) |= _PAGE_USER; return pte; } | |
246 | static inline pte_t pte_mkexec(pte_t pte) { | |
247 | pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } | |
248 | static inline pte_t pte_mkwrite(pte_t pte) { | |
249 | pte_val(pte) |= _PAGE_RW; return pte; } | |
250 | static inline pte_t pte_mkdirty(pte_t pte) { | |
251 | pte_val(pte) |= _PAGE_DIRTY; return pte; } | |
252 | static inline pte_t pte_mkyoung(pte_t pte) { | |
253 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | |
254 | static inline pte_t pte_mkhuge(pte_t pte) { | |
3c726f8d | 255 | return pte; } |
1da177e4 LT |
256 | |
257 | /* Atomic PTE updates */ | |
258 | static inline unsigned long pte_update(pte_t *p, unsigned long clr) | |
259 | { | |
260 | unsigned long old, tmp; | |
261 | ||
262 | __asm__ __volatile__( | |
263 | "1: ldarx %0,0,%3 # pte_update\n\ | |
264 | andi. %1,%0,%6\n\ | |
265 | bne- 1b \n\ | |
266 | andc %1,%0,%4 \n\ | |
267 | stdcx. %1,0,%3 \n\ | |
268 | bne- 1b" | |
269 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
270 | : "r" (p), "r" (clr), "m" (*p), "i" (_PAGE_BUSY) | |
271 | : "cc" ); | |
272 | return old; | |
273 | } | |
274 | ||
275 | /* PTE updating functions, this function puts the PTE in the | |
276 | * batch, doesn't actually triggers the hash flush immediately, | |
277 | * you need to call flush_tlb_pending() to do that. | |
3c726f8d | 278 | * Pass -1 for "normal" size (4K or 64K) |
1da177e4 | 279 | */ |
3c726f8d BH |
280 | extern void hpte_update(struct mm_struct *mm, unsigned long addr, |
281 | pte_t *ptep, unsigned long pte, int huge); | |
1da177e4 | 282 | |
3c726f8d BH |
283 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, |
284 | unsigned long addr, pte_t *ptep) | |
1da177e4 LT |
285 | { |
286 | unsigned long old; | |
287 | ||
288 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | |
289 | return 0; | |
290 | old = pte_update(ptep, _PAGE_ACCESSED); | |
291 | if (old & _PAGE_HASHPTE) { | |
3c726f8d | 292 | hpte_update(mm, addr, ptep, old, 0); |
1da177e4 LT |
293 | flush_tlb_pending(); |
294 | } | |
295 | return (old & _PAGE_ACCESSED) != 0; | |
296 | } | |
297 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
298 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
299 | ({ \ | |
300 | int __r; \ | |
301 | __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ | |
302 | __r; \ | |
303 | }) | |
304 | ||
305 | /* | |
306 | * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the | |
307 | * moment we always flush but we need to fix hpte_update and test if the | |
308 | * optimisation is worth it. | |
309 | */ | |
3c726f8d BH |
310 | static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, |
311 | unsigned long addr, pte_t *ptep) | |
1da177e4 LT |
312 | { |
313 | unsigned long old; | |
314 | ||
315 | if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) | |
316 | return 0; | |
317 | old = pte_update(ptep, _PAGE_DIRTY); | |
318 | if (old & _PAGE_HASHPTE) | |
3c726f8d | 319 | hpte_update(mm, addr, ptep, old, 0); |
1da177e4 LT |
320 | return (old & _PAGE_DIRTY) != 0; |
321 | } | |
322 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | |
323 | #define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ | |
324 | ({ \ | |
325 | int __r; \ | |
326 | __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ | |
327 | __r; \ | |
328 | }) | |
329 | ||
330 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
3c726f8d BH |
331 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
332 | pte_t *ptep) | |
1da177e4 LT |
333 | { |
334 | unsigned long old; | |
335 | ||
336 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | |
337 | return; | |
338 | old = pte_update(ptep, _PAGE_RW); | |
339 | if (old & _PAGE_HASHPTE) | |
3c726f8d | 340 | hpte_update(mm, addr, ptep, old, 0); |
1da177e4 LT |
341 | } |
342 | ||
343 | /* | |
344 | * We currently remove entries from the hashtable regardless of whether | |
345 | * the entry was young or dirty. The generic routines only flush if the | |
346 | * entry was young or dirty which is not good enough. | |
347 | * | |
348 | * We should be more intelligent about this but for the moment we override | |
349 | * these functions and force a tlb flush unconditionally | |
350 | */ | |
351 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
352 | #define ptep_clear_flush_young(__vma, __address, __ptep) \ | |
353 | ({ \ | |
354 | int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ | |
355 | __ptep); \ | |
356 | __young; \ | |
357 | }) | |
358 | ||
359 | #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH | |
360 | #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ | |
361 | ({ \ | |
362 | int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ | |
363 | __ptep); \ | |
364 | flush_tlb_page(__vma, __address); \ | |
365 | __dirty; \ | |
366 | }) | |
367 | ||
368 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
3c726f8d BH |
369 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
370 | unsigned long addr, pte_t *ptep) | |
1da177e4 LT |
371 | { |
372 | unsigned long old = pte_update(ptep, ~0UL); | |
373 | ||
374 | if (old & _PAGE_HASHPTE) | |
3c726f8d | 375 | hpte_update(mm, addr, ptep, old, 0); |
1da177e4 LT |
376 | return __pte(old); |
377 | } | |
378 | ||
3c726f8d BH |
379 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
380 | pte_t * ptep) | |
1da177e4 LT |
381 | { |
382 | unsigned long old = pte_update(ptep, ~0UL); | |
383 | ||
384 | if (old & _PAGE_HASHPTE) | |
3c726f8d | 385 | hpte_update(mm, addr, ptep, old, 0); |
1da177e4 LT |
386 | } |
387 | ||
388 | /* | |
389 | * set_pte stores a linux PTE into the linux page table. | |
390 | */ | |
391 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
392 | pte_t *ptep, pte_t pte) | |
393 | { | |
394 | if (pte_present(*ptep)) { | |
395 | pte_clear(mm, addr, ptep); | |
396 | flush_tlb_pending(); | |
397 | } | |
3c726f8d BH |
398 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
399 | ||
400 | #ifdef CONFIG_PPC_64K_PAGES | |
401 | if (mmu_virtual_psize != MMU_PAGE_64K) | |
402 | pte = __pte(pte_val(pte) | _PAGE_COMBO); | |
403 | #endif /* CONFIG_PPC_64K_PAGES */ | |
404 | ||
405 | *ptep = pte; | |
1da177e4 LT |
406 | } |
407 | ||
408 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this | |
409 | * function doesn't need to flush the hash entry | |
410 | */ | |
411 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
412 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | |
413 | { | |
414 | unsigned long bits = pte_val(entry) & | |
415 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); | |
416 | unsigned long old, tmp; | |
417 | ||
418 | __asm__ __volatile__( | |
419 | "1: ldarx %0,0,%4\n\ | |
420 | andi. %1,%0,%6\n\ | |
421 | bne- 1b \n\ | |
422 | or %0,%3,%0\n\ | |
423 | stdcx. %0,0,%4\n\ | |
424 | bne- 1b" | |
425 | :"=&r" (old), "=&r" (tmp), "=m" (*ptep) | |
426 | :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) | |
427 | :"cc"); | |
428 | } | |
429 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | |
430 | do { \ | |
431 | __ptep_set_access_flags(__ptep, __entry, __dirty); \ | |
432 | flush_tlb_page_nohash(__vma, __address); \ | |
433 | } while(0) | |
434 | ||
435 | /* | |
436 | * Macro to mark a page protection value as "uncacheable". | |
437 | */ | |
438 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) | |
439 | ||
440 | struct file; | |
8b150478 | 441 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
1da177e4 LT |
442 | unsigned long size, pgprot_t vma_prot); |
443 | #define __HAVE_PHYS_MEM_ACCESS_PROT | |
444 | ||
445 | #define __HAVE_ARCH_PTE_SAME | |
446 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) | |
447 | ||
65500d23 HD |
448 | #define pte_ERROR(e) \ |
449 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | |
1da177e4 | 450 | #define pmd_ERROR(e) \ |
e28f7faf | 451 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) |
1da177e4 | 452 | #define pgd_ERROR(e) \ |
e28f7faf | 453 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
1da177e4 | 454 | |
1f8d419e | 455 | extern pgd_t swapper_pg_dir[]; |
1da177e4 LT |
456 | |
457 | extern void paging_init(void); | |
458 | ||
b74d0bd5 | 459 | #ifdef CONFIG_HUGETLB_PAGE |
3bf5ee95 | 460 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ |
e28f7faf | 461 | free_pgd_range(tlb, addr, end, floor, ceiling) |
b74d0bd5 | 462 | #endif |
1da177e4 LT |
463 | |
464 | /* | |
465 | * This gets called at the end of handling a page fault, when | |
466 | * the kernel has put a new PTE into the page table for the process. | |
467 | * We use it to put a corresponding HPTE into the hash table | |
468 | * ahead of time, instead of waiting for the inevitable extra | |
469 | * hash-table miss exception. | |
470 | */ | |
471 | struct vm_area_struct; | |
472 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |
473 | ||
474 | /* Encode and de-code a swap entry */ | |
475 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) | |
476 | #define __swp_offset(entry) ((entry).val >> 8) | |
3c726f8d BH |
477 | #define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) |
478 | #define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) | |
479 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) | |
480 | #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) | |
481 | #define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) | |
482 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) | |
1da177e4 LT |
483 | |
484 | /* | |
485 | * kern_addr_valid is intended to indicate whether an address is a valid | |
486 | * kernel address. Most 32-bit archs define it as always true (like this) | |
487 | * but most 64-bit archs actually perform a test. What should we do here? | |
488 | * The only use is in fs/ncpfs/dir.c | |
489 | */ | |
490 | #define kern_addr_valid(addr) (1) | |
491 | ||
1da177e4 LT |
492 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
493 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
494 | ||
1da177e4 LT |
495 | void pgtable_cache_init(void); |
496 | ||
1da177e4 LT |
497 | /* |
498 | * find_linux_pte returns the address of a linux pte for a given | |
499 | * effective address and directory. If not found, it returns zero. | |
3c726f8d | 500 | */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) |
1da177e4 LT |
501 | { |
502 | pgd_t *pg; | |
58366af5 | 503 | pud_t *pu; |
1da177e4 LT |
504 | pmd_t *pm; |
505 | pte_t *pt = NULL; | |
1da177e4 LT |
506 | |
507 | pg = pgdir + pgd_index(ea); | |
508 | if (!pgd_none(*pg)) { | |
58366af5 BH |
509 | pu = pud_offset(pg, ea); |
510 | if (!pud_none(*pu)) { | |
511 | pm = pmd_offset(pu, ea); | |
3c726f8d | 512 | if (pmd_present(*pm)) |
58366af5 | 513 | pt = pte_offset_kernel(pm, ea); |
1da177e4 LT |
514 | } |
515 | } | |
1da177e4 LT |
516 | return pt; |
517 | } | |
518 | ||
519 | #include <asm-generic/pgtable.h> | |
520 | ||
521 | #endif /* __ASSEMBLY__ */ | |
522 | ||
047ea784 PM |
523 | #endif /* CONFIG_PPC64 */ |
524 | #endif /* _ASM_POWERPC_PGTABLE_H */ |