]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - arch/powerpc/include/asm/nohash/32/pgtable.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / powerpc / include / asm / nohash / 32 / pgtable.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_32_PGTABLE_H
4
5 #define __ARCH_USE_5LEVEL_HACK
6 #include <asm-generic/pgtable-nopmd.h>
7
8 #ifndef __ASSEMBLY__
9 #include <linux/sched.h>
10 #include <linux/threads.h>
11 #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
12
13 extern unsigned long ioremap_bot;
14
15 #ifdef CONFIG_44x
16 extern int icache_44x_need_flush;
17 #endif
18
19 #endif /* __ASSEMBLY__ */
20
21 #define PTE_INDEX_SIZE PTE_SHIFT
22 #define PMD_INDEX_SIZE 0
23 #define PUD_INDEX_SIZE 0
24 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
25
26 #define PMD_CACHE_INDEX PMD_INDEX_SIZE
27
28 #ifndef __ASSEMBLY__
29 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
30 #define PMD_TABLE_SIZE 0
31 #define PUD_TABLE_SIZE 0
32 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
33 #endif /* __ASSEMBLY__ */
34
35 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
36 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
37
38 /*
39 * The normal case is that PTEs are 32-bits and we have a 1-page
40 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
41 *
42 * For any >32-bit physical address platform, we can use the following
43 * two level page table layout where the pgdir is 8KB and the MS 13 bits
44 * are an index to the second level table. The combined pgdir/pmd first
45 * level has 2048 entries and the second level has 512 64-bit PTE entries.
46 * -Matt
47 */
48 /* PGDIR_SHIFT determines what a top-level page table entry can map */
49 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
50 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
51 #define PGDIR_MASK (~(PGDIR_SIZE-1))
52
53 /* Bits to mask out from a PGD to get to the PUD page */
54 #define PGD_MASKED_BITS 0
55
56 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
57 #define FIRST_USER_ADDRESS 0UL
58
59 #define pte_ERROR(e) \
60 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
61 (unsigned long long)pte_val(e))
62 #define pgd_ERROR(e) \
63 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
64
65 /*
66 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
67 * value (for now) on others, from where we can start layout kernel
68 * virtual space that goes below PKMAP and FIXMAP
69 */
70 #ifdef CONFIG_HIGHMEM
71 #define KVIRT_TOP PKMAP_BASE
72 #else
73 #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
74 #endif
75
76 /*
77 * ioremap_bot starts at that address. Early ioremaps move down from there,
78 * until mem_init() at which point this becomes the top of the vmalloc
79 * and ioremap space
80 */
81 #ifdef CONFIG_NOT_COHERENT_CACHE
82 #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
83 #else
84 #define IOREMAP_TOP KVIRT_TOP
85 #endif
86
87 /*
88 * Just any arbitrary offset to the start of the vmalloc VM area: the
89 * current 16MB value just means that there will be a 64MB "hole" after the
90 * physical memory until the kernel virtual memory starts. That means that
91 * any out-of-bounds memory accesses will hopefully be caught.
92 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
93 * area for the same reason. ;)
94 *
95 * We no longer map larger than phys RAM with the BATs so we don't have
96 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
97 * about clashes between our early calls to ioremap() that start growing down
98 * from IOREMAP_TOP being run into the VM area allocations (growing upwards
99 * from VMALLOC_START). For this reason we have ioremap_bot to check when
100 * we actually run into our mappings setup in the early boot with the VM
101 * system. This really does become a problem for machines with good amounts
102 * of RAM. -- Cort
103 */
104 #define VMALLOC_OFFSET (0x1000000) /* 16M */
105 #ifdef PPC_PIN_SIZE
106 #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
107 #else
108 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
109 #endif
110 #define VMALLOC_END ioremap_bot
111
112 /*
113 * Bits in a linux-style PTE. These match the bits in the
114 * (hardware-defined) PowerPC PTE as closely as possible.
115 */
116
117 #if defined(CONFIG_40x)
118 #include <asm/nohash/32/pte-40x.h>
119 #elif defined(CONFIG_44x)
120 #include <asm/nohash/32/pte-44x.h>
121 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
122 #include <asm/nohash/pte-book3e.h>
123 #elif defined(CONFIG_FSL_BOOKE)
124 #include <asm/nohash/32/pte-fsl-booke.h>
125 #elif defined(CONFIG_PPC_8xx)
126 #include <asm/nohash/32/pte-8xx.h>
127 #endif
128
129 /* And here we include common definitions */
130 #include <asm/pte-common.h>
131
132 #ifndef __ASSEMBLY__
133
134 #define pte_clear(mm, addr, ptep) \
135 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
136
137 #define pmd_none(pmd) (!pmd_val(pmd))
138 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
139 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
140 static inline void pmd_clear(pmd_t *pmdp)
141 {
142 *pmdp = __pmd(0);
143 }
144
145
146
147 /*
148 * When flushing the tlb entry for a page, we also need to flush the hash
149 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
150 */
151 extern int flush_hash_pages(unsigned context, unsigned long va,
152 unsigned long pmdval, int count);
153
154 /* Add an HPTE to the hash table */
155 extern void add_hash_page(unsigned context, unsigned long va,
156 unsigned long pmdval);
157
158 /* Flush an entry from the TLB/hash table */
159 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
160 unsigned long address);
161
162 /*
163 * PTE updates. This function is called whenever an existing
164 * valid PTE is updated. This does -not- include set_pte_at()
165 * which nowadays only sets a new PTE.
166 *
167 * Depending on the type of MMU, we may need to use atomic updates
168 * and the PTE may be either 32 or 64 bit wide. In the later case,
169 * when using atomic updates, only the low part of the PTE is
170 * accessed atomically.
171 *
172 * In addition, on 44x, we also maintain a global flag indicating
173 * that an executable user mapping was modified, which is needed
174 * to properly flush the virtually tagged instruction cache of
175 * those implementations.
176 */
177 #ifndef CONFIG_PTE_64BIT
178 static inline unsigned long pte_update(pte_t *p,
179 unsigned long clr,
180 unsigned long set)
181 {
182 #ifdef PTE_ATOMIC_UPDATES
183 unsigned long old, tmp;
184
185 __asm__ __volatile__("\
186 1: lwarx %0,0,%3\n\
187 andc %1,%0,%4\n\
188 or %1,%1,%5\n"
189 PPC405_ERR77(0,%3)
190 " stwcx. %1,0,%3\n\
191 bne- 1b"
192 : "=&r" (old), "=&r" (tmp), "=m" (*p)
193 : "r" (p), "r" (clr), "r" (set), "m" (*p)
194 : "cc" );
195 #else /* PTE_ATOMIC_UPDATES */
196 unsigned long old = pte_val(*p);
197 *p = __pte((old & ~clr) | set);
198 #endif /* !PTE_ATOMIC_UPDATES */
199
200 #ifdef CONFIG_44x
201 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
202 icache_44x_need_flush = 1;
203 #endif
204 return old;
205 }
206 #else /* CONFIG_PTE_64BIT */
207 static inline unsigned long long pte_update(pte_t *p,
208 unsigned long clr,
209 unsigned long set)
210 {
211 #ifdef PTE_ATOMIC_UPDATES
212 unsigned long long old;
213 unsigned long tmp;
214
215 __asm__ __volatile__("\
216 1: lwarx %L0,0,%4\n\
217 lwzx %0,0,%3\n\
218 andc %1,%L0,%5\n\
219 or %1,%1,%6\n"
220 PPC405_ERR77(0,%3)
221 " stwcx. %1,0,%4\n\
222 bne- 1b"
223 : "=&r" (old), "=&r" (tmp), "=m" (*p)
224 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
225 : "cc" );
226 #else /* PTE_ATOMIC_UPDATES */
227 unsigned long long old = pte_val(*p);
228 *p = __pte((old & ~(unsigned long long)clr) | set);
229 #endif /* !PTE_ATOMIC_UPDATES */
230
231 #ifdef CONFIG_44x
232 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
233 icache_44x_need_flush = 1;
234 #endif
235 return old;
236 }
237 #endif /* CONFIG_PTE_64BIT */
238
239 /*
240 * 2.6 calls this without flushing the TLB entry; this is wrong
241 * for our hash-based implementation, we fix that up here.
242 */
243 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
244 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
245 {
246 unsigned long old;
247 old = pte_update(ptep, _PAGE_ACCESSED, 0);
248 #if _PAGE_HASHPTE != 0
249 if (old & _PAGE_HASHPTE) {
250 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
251 flush_hash_pages(context, addr, ptephys, 1);
252 }
253 #endif
254 return (old & _PAGE_ACCESSED) != 0;
255 }
256 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
257 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
258
259 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
260 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
261 pte_t *ptep)
262 {
263 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
264 }
265
266 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
267 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
268 pte_t *ptep)
269 {
270 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
271 }
272 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
273 unsigned long addr, pte_t *ptep)
274 {
275 ptep_set_wrprotect(mm, addr, ptep);
276 }
277
278
279 static inline void __ptep_set_access_flags(struct mm_struct *mm,
280 pte_t *ptep, pte_t entry,
281 unsigned long address)
282 {
283 unsigned long set = pte_val(entry) &
284 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
285 unsigned long clr = ~pte_val(entry) & _PAGE_RO;
286
287 pte_update(ptep, clr, set);
288 }
289
290 #define __HAVE_ARCH_PTE_SAME
291 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
292
293 /*
294 * Note that on Book E processors, the pmd contains the kernel virtual
295 * (lowmem) address of the pte page. The physical address is less useful
296 * because everything runs with translation enabled (even the TLB miss
297 * handler). On everything else the pmd contains the physical address
298 * of the pte page. -- paulus
299 */
300 #ifndef CONFIG_BOOKE
301 #define pmd_page_vaddr(pmd) \
302 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
303 #define pmd_page(pmd) \
304 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
305 #else
306 #define pmd_page_vaddr(pmd) \
307 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
308 #define pmd_page(pmd) \
309 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
310 #endif
311
312 /* to find an entry in a kernel page-table-directory */
313 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
314
315 /* to find an entry in a page-table-directory */
316 #define pgd_index(address) ((address) >> PGDIR_SHIFT)
317 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
318
319 /* Find an entry in the third-level page table.. */
320 #define pte_index(address) \
321 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
322 #define pte_offset_kernel(dir, addr) \
323 (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
324 pte_index(addr))
325 #define pte_offset_map(dir, addr) \
326 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
327 #define pte_unmap(pte) kunmap_atomic(pte)
328
329 /*
330 * Encode and decode a swap entry.
331 * Note that the bits we use in a PTE for representing a swap entry
332 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
333 * -- paulus
334 */
335 #define __swp_type(entry) ((entry).val & 0x1f)
336 #define __swp_offset(entry) ((entry).val >> 5)
337 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
338 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
339 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
340
341 int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
342
343 #endif /* !__ASSEMBLY__ */
344
345 #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */