]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f5df8e26 JH |
2 | /* |
3 | * Macros and functions to manipulate Meta page tables. | |
4 | */ | |
5 | ||
6 | #ifndef _METAG_PGTABLE_H | |
7 | #define _METAG_PGTABLE_H | |
8 | ||
0164a711 | 9 | #include <asm/pgtable-bits.h> |
9849a569 | 10 | #define __ARCH_USE_5LEVEL_HACK |
f5df8e26 JH |
11 | #include <asm-generic/pgtable-nopmd.h> |
12 | ||
13 | /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ | |
14 | #if PAGE_OFFSET >= LINGLOBAL_BASE | |
15 | #define CONSISTENT_START 0xF7000000 | |
16 | #define CONSISTENT_END 0xF73FFFFF | |
17 | #define VMALLOC_START 0xF8000000 | |
18 | #define VMALLOC_END 0xFFFEFFFF | |
19 | #else | |
20 | #define CONSISTENT_START 0x77000000 | |
21 | #define CONSISTENT_END 0x773FFFFF | |
22 | #define VMALLOC_START 0x78000000 | |
23 | #define VMALLOC_END 0x7FFFFFFF | |
24 | #endif | |
25 | ||
f5df8e26 JH |
26 | /* |
27 | * The Linux memory management assumes a three-level page table setup. On | |
28 | * Meta, we use that, but "fold" the mid level into the top-level page | |
29 | * table. | |
30 | */ | |
31 | ||
32 | /* PGDIR_SHIFT determines the size of the area a second-level page table can | |
33 | * map. This is always 4MB. | |
34 | */ | |
35 | ||
36 | #define PGDIR_SHIFT 22 | |
37 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
38 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
39 | ||
40 | /* | |
41 | * Entries per page directory level: we use a two-level, so | |
42 | * we don't really have any PMD directory physically. First level tables | |
43 | * always map 2Gb (local or global) at a granularity of 4MB, second-level | |
44 | * tables map 4MB with a granularity between 4MB and 4kB (between 1 and | |
45 | * 1024 entries). | |
46 | */ | |
47 | #define PTRS_PER_PTE (PGDIR_SIZE/PAGE_SIZE) | |
48 | #define HPTRS_PER_PTE (PGDIR_SIZE/HPAGE_SIZE) | |
49 | #define PTRS_PER_PGD 512 | |
50 | ||
51 | #define USER_PTRS_PER_PGD 256 | |
52 | #define FIRST_USER_ADDRESS META_MEMORY_BASE | |
53 | #define FIRST_USER_PGD_NR pgd_index(FIRST_USER_ADDRESS) | |
54 | ||
55 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | |
56 | _PAGE_CACHEABLE) | |
57 | ||
58 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \ | |
59 | _PAGE_ACCESSED | _PAGE_CACHEABLE) | |
60 | #define PAGE_SHARED_C PAGE_SHARED | |
61 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | |
62 | _PAGE_CACHEABLE) | |
63 | #define PAGE_COPY_C PAGE_COPY | |
64 | ||
65 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | |
66 | _PAGE_CACHEABLE) | |
67 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \ | |
68 | _PAGE_ACCESSED | _PAGE_WRITE | \ | |
69 | _PAGE_CACHEABLE | _PAGE_KERNEL) | |
70 | ||
71 | #define __P000 PAGE_NONE | |
72 | #define __P001 PAGE_READONLY | |
73 | #define __P010 PAGE_COPY | |
74 | #define __P011 PAGE_COPY | |
75 | #define __P100 PAGE_READONLY | |
76 | #define __P101 PAGE_READONLY | |
77 | #define __P110 PAGE_COPY_C | |
78 | #define __P111 PAGE_COPY_C | |
79 | ||
80 | #define __S000 PAGE_NONE | |
81 | #define __S001 PAGE_READONLY | |
82 | #define __S010 PAGE_SHARED | |
83 | #define __S011 PAGE_SHARED | |
84 | #define __S100 PAGE_READONLY | |
85 | #define __S101 PAGE_READONLY | |
86 | #define __S110 PAGE_SHARED_C | |
87 | #define __S111 PAGE_SHARED_C | |
88 | ||
89 | #ifndef __ASSEMBLY__ | |
90 | ||
91 | #include <asm/page.h> | |
92 | ||
93 | /* zero page used for uninitialized stuff */ | |
94 | extern unsigned long empty_zero_page; | |
95 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
96 | ||
97 | /* Certain architectures need to do special things when pte's | |
98 | * within a page table are directly modified. Thus, the following | |
99 | * hook is made available. | |
100 | */ | |
101 | #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) | |
102 | #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) | |
103 | ||
104 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | |
105 | ||
106 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | |
107 | ||
108 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | |
109 | ||
110 | #define pte_none(x) (!pte_val(x)) | |
111 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) | |
112 | #define pte_clear(mm, addr, xp) do { pte_val(*(xp)) = 0; } while (0) | |
113 | ||
114 | #define pmd_none(x) (!pmd_val(x)) | |
115 | #define pmd_bad(x) ((pmd_val(x) & ~(PAGE_MASK | _PAGE_SZ_MASK)) \ | |
116 | != (_PAGE_TABLE & ~_PAGE_SZ_MASK)) | |
117 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | |
118 | #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) | |
119 | ||
120 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
121 | ||
122 | /* | |
123 | * The following only work if pte_present() is true. | |
124 | * Undefined behaviour if not.. | |
125 | */ | |
126 | ||
127 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } | |
128 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | |
129 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | |
f5df8e26 JH |
130 | static inline int pte_special(pte_t pte) { return 0; } |
131 | ||
132 | static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= (~_PAGE_WRITE); return pte; } | |
133 | static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } | |
134 | static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | |
135 | static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } | |
136 | static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } | |
137 | static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | |
138 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | |
139 | static inline pte_t pte_mkhuge(pte_t pte) { return pte; } | |
140 | ||
141 | /* | |
142 | * Macro and implementation to make a page protection as uncacheable. | |
143 | */ | |
144 | #define pgprot_writecombine(prot) \ | |
145 | __pgprot(pgprot_val(prot) & ~(_PAGE_CACHE_CTRL1 | _PAGE_CACHE_CTRL0)) | |
146 | ||
147 | #define pgprot_noncached(prot) \ | |
148 | __pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE) | |
149 | ||
150 | ||
151 | /* | |
152 | * Conversion functions: convert a page and protection to a page entry, | |
153 | * and a page entry and page directory to the page they refer to. | |
154 | */ | |
155 | ||
156 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
157 | ||
158 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
159 | { | |
160 | pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); | |
161 | return pte; | |
162 | } | |
163 | ||
164 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) | |
165 | { | |
166 | unsigned long paddr = pmd_val(pmd) & PAGE_MASK; | |
167 | if (!paddr) | |
168 | return 0; | |
169 | return (unsigned long)__va(paddr); | |
170 | } | |
171 | ||
172 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | |
173 | #define pmd_page_shift(pmd) (12 + ((pmd_val(pmd) & _PAGE_SZ_MASK) \ | |
174 | >> _PAGE_SZ_SHIFT)) | |
175 | #define pmd_num_ptrs(pmd) (PGDIR_SIZE >> pmd_page_shift(pmd)) | |
176 | ||
177 | /* | |
178 | * Each pgd is only 2k, mapping 2Gb (local or global). If we're in global | |
179 | * space drop the top bit before indexing the pgd. | |
180 | */ | |
181 | #if PAGE_OFFSET >= LINGLOBAL_BASE | |
182 | #define pgd_index(address) ((((address) & ~0x80000000) >> PGDIR_SHIFT) \ | |
183 | & (PTRS_PER_PGD-1)) | |
184 | #else | |
185 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | |
186 | #endif | |
187 | ||
188 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
189 | ||
190 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
191 | ||
192 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | |
193 | ||
194 | /* Find an entry in the second-level page table.. */ | |
195 | #if !defined(CONFIG_HUGETLB_PAGE) | |
196 | /* all pages are of size (1 << PAGE_SHIFT), so no need to read 1st level pt */ | |
197 | # define pte_index(pmd, address) \ | |
198 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
199 | #else | |
200 | /* some pages are huge, so read 1st level pt to find out */ | |
201 | # define pte_index(pmd, address) \ | |
202 | (((address) >> pmd_page_shift(pmd)) & (pmd_num_ptrs(pmd) - 1)) | |
203 | #endif | |
204 | #define pte_offset_kernel(dir, address) \ | |
205 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(*(dir), address)) | |
206 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) | |
207 | #define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address) | |
208 | ||
209 | #define pte_unmap(pte) do { } while (0) | |
210 | #define pte_unmap_nested(pte) do { } while (0) | |
211 | ||
212 | #define pte_ERROR(e) \ | |
213 | pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | |
214 | #define pgd_ERROR(e) \ | |
215 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
216 | ||
217 | /* | |
218 | * Meta doesn't have any external MMU info: the kernel page | |
219 | * tables contain all the necessary information. | |
220 | */ | |
221 | static inline void update_mmu_cache(struct vm_area_struct *vma, | |
222 | unsigned long address, pte_t *pte) | |
223 | { | |
224 | } | |
225 | ||
226 | /* | |
227 | * Encode and decode a swap entry (must be !pte_none(e) && !pte_present(e)) | |
228 | * Since PAGE_PRESENT is bit 1, we can use the bits above that. | |
229 | */ | |
230 | #define __swp_type(x) (((x).val >> 1) & 0xff) | |
231 | #define __swp_offset(x) ((x).val >> 10) | |
232 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | \ | |
233 | ((offset) << 10) }) | |
234 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
235 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
236 | ||
f5df8e26 JH |
237 | #define kern_addr_valid(addr) (1) |
238 | ||
f5df8e26 JH |
239 | /* |
240 | * No page table caches to initialise | |
241 | */ | |
242 | #define pgtable_cache_init() do { } while (0) | |
243 | ||
244 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |
245 | void paging_init(unsigned long mem_end); | |
246 | ||
247 | #ifdef CONFIG_METAG_META12 | |
248 | /* This is a workaround for an issue in Meta 1 cores. These cores cache | |
249 | * invalid entries in the TLB so we always need to flush whenever we add | |
250 | * a new pte. Unfortunately we can only flush the whole TLB not shoot down | |
251 | * single entries so this is sub-optimal. This implementation ensures that | |
252 | * we will get a flush at the second attempt, so we may still get repeated | |
253 | * faults, we just don't overflow the kernel stack handling them. | |
254 | */ | |
255 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
256 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | |
257 | ({ \ | |
258 | int __changed = !pte_same(*(__ptep), __entry); \ | |
259 | if (__changed) { \ | |
260 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | |
261 | } \ | |
262 | flush_tlb_page(__vma, __address); \ | |
263 | __changed; \ | |
264 | }) | |
265 | #endif | |
266 | ||
267 | #include <asm-generic/pgtable.h> | |
268 | ||
269 | #endif /* __ASSEMBLY__ */ | |
270 | #endif /* _METAG_PGTABLE_H */ |