]>
Commit | Line | Data |
---|---|---|
50acfb2b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
07037db5 PD |
2 | /* |
3 | * Copyright (C) 2012 Regents of the University of California | |
07037db5 PD |
4 | */ |
5 | ||
6 | #ifndef _ASM_RISCV_PGTABLE_H | |
7 | #define _ASM_RISCV_PGTABLE_H | |
8 | ||
9 | #include <linux/mmzone.h> | |
00a5bf3a | 10 | #include <linux/sizes.h> |
07037db5 PD |
11 | |
12 | #include <asm/pgtable-bits.h> | |
13 | ||
14 | #ifndef __ASSEMBLY__ | |
15 | ||
07037db5 PD |
16 | /* Page Upper Directory not used in RISC-V */ |
17 | #include <asm-generic/pgtable-nopud.h> | |
18 | #include <asm/page.h> | |
19 | #include <asm/tlbflush.h> | |
20 | #include <linux/mm_types.h> | |
21 | ||
9f40b6e7 AP |
22 | #ifdef CONFIG_MMU |
23 | ||
24 | #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) | |
25 | #define VMALLOC_END (PAGE_OFFSET - 1) | |
26 | #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) | |
27 | ||
28 | #define BPF_JIT_REGION_SIZE (SZ_128M) | |
29 | #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) | |
30 | #define BPF_JIT_REGION_END (VMALLOC_END) | |
31 | ||
32 | /* | |
33 | * Roughly size the vmemmap space to be large enough to fit enough | |
34 | * struct pages to map half the virtual address space. Then | |
35 | * position vmemmap directly below the VMALLOC region. | |
36 | */ | |
37 | #define VMEMMAP_SHIFT \ | |
38 | (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) | |
39 | #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) | |
40 | #define VMEMMAP_END (VMALLOC_START - 1) | |
41 | #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) | |
42 | ||
43 | /* | |
44 | * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel | |
45 | * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. | |
46 | */ | |
47 | #define vmemmap ((struct page *)VMEMMAP_START) | |
48 | ||
49 | #define PCI_IO_SIZE SZ_16M | |
50 | #define PCI_IO_END VMEMMAP_START | |
51 | #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) | |
52 | ||
53 | #define FIXADDR_TOP PCI_IO_START | |
54 | #ifdef CONFIG_64BIT | |
55 | #define FIXADDR_SIZE PMD_SIZE | |
56 | #else | |
57 | #define FIXADDR_SIZE PGDIR_SIZE | |
58 | #endif | |
59 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) | |
60 | ||
61 | #endif | |
62 | ||
07037db5 PD |
63 | #ifdef CONFIG_64BIT |
64 | #include <asm/pgtable-64.h> | |
65 | #else | |
66 | #include <asm/pgtable-32.h> | |
67 | #endif /* CONFIG_64BIT */ | |
68 | ||
6bd33e1e | 69 | #ifdef CONFIG_MMU |
07037db5 PD |
70 | /* Number of entries in the page global directory */ |
71 | #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) | |
72 | /* Number of entries in the page table */ | |
73 | #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) | |
74 | ||
75 | /* Number of PGD entries that a user-mode program can use */ | |
76 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | |
07037db5 PD |
77 | |
78 | /* Page protection bits */ | |
79 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) | |
80 | ||
e3613bb8 | 81 | #define PAGE_NONE __pgprot(_PAGE_PROT_NONE) |
07037db5 PD |
82 | #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) |
83 | #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) | |
84 | #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) | |
85 | #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) | |
86 | #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ | |
87 | _PAGE_EXEC | _PAGE_WRITE) | |
88 | ||
89 | #define PAGE_COPY PAGE_READ | |
90 | #define PAGE_COPY_EXEC PAGE_EXEC | |
91 | #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC | |
92 | #define PAGE_SHARED PAGE_WRITE | |
93 | #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC | |
94 | ||
95 | #define _PAGE_KERNEL (_PAGE_READ \ | |
96 | | _PAGE_WRITE \ | |
97 | | _PAGE_PRESENT \ | |
98 | | _PAGE_ACCESSED \ | |
99 | | _PAGE_DIRTY) | |
100 | ||
101 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | |
102 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) | |
103 | ||
671f9a3e AP |
104 | #define PAGE_TABLE __pgprot(_PAGE_TABLE) |
105 | ||
38af5782 CH |
106 | /* |
107 | * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't | |
108 | * change the properties of memory regions. | |
109 | */ | |
110 | #define _PAGE_IOREMAP _PAGE_KERNEL | |
111 | ||
07037db5 PD |
112 | extern pgd_t swapper_pg_dir[]; |
113 | ||
114 | /* MAP_PRIVATE permissions: xwr (copy-on-write) */ | |
115 | #define __P000 PAGE_NONE | |
116 | #define __P001 PAGE_READ | |
117 | #define __P010 PAGE_COPY | |
118 | #define __P011 PAGE_COPY | |
119 | #define __P100 PAGE_EXEC | |
120 | #define __P101 PAGE_READ_EXEC | |
121 | #define __P110 PAGE_COPY_EXEC | |
122 | #define __P111 PAGE_COPY_READ_EXEC | |
123 | ||
124 | /* MAP_SHARED permissions: xwr */ | |
125 | #define __S000 PAGE_NONE | |
126 | #define __S001 PAGE_READ | |
127 | #define __S010 PAGE_SHARED | |
128 | #define __S011 PAGE_SHARED | |
129 | #define __S100 PAGE_EXEC | |
130 | #define __S101 PAGE_READ_EXEC | |
131 | #define __S110 PAGE_SHARED_EXEC | |
132 | #define __S111 PAGE_SHARED_EXEC | |
133 | ||
07037db5 PD |
134 | static inline int pmd_present(pmd_t pmd) |
135 | { | |
e3613bb8 | 136 | return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
07037db5 PD |
137 | } |
138 | ||
139 | static inline int pmd_none(pmd_t pmd) | |
140 | { | |
141 | return (pmd_val(pmd) == 0); | |
142 | } | |
143 | ||
144 | static inline int pmd_bad(pmd_t pmd) | |
145 | { | |
146 | return !pmd_present(pmd); | |
147 | } | |
148 | ||
af6513ea SP |
149 | #define pmd_leaf pmd_leaf |
150 | static inline int pmd_leaf(pmd_t pmd) | |
151 | { | |
152 | return pmd_present(pmd) && | |
153 | (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); | |
154 | } | |
155 | ||
07037db5 PD |
156 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
157 | { | |
158 | *pmdp = pmd; | |
159 | } | |
160 | ||
161 | static inline void pmd_clear(pmd_t *pmdp) | |
162 | { | |
163 | set_pmd(pmdp, __pmd(0)); | |
164 | } | |
165 | ||
07037db5 PD |
166 | static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) |
167 | { | |
168 | return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); | |
169 | } | |
170 | ||
671f9a3e AP |
171 | static inline unsigned long _pgd_pfn(pgd_t pgd) |
172 | { | |
173 | return pgd_val(pgd) >> _PAGE_PFN_SHIFT; | |
174 | } | |
175 | ||
07037db5 PD |
176 | #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
177 | ||
178 | /* Locate an entry in the page global directory */ | |
179 | static inline pgd_t *pgd_offset(const struct mm_struct *mm, unsigned long addr) | |
180 | { | |
181 | return mm->pgd + pgd_index(addr); | |
182 | } | |
183 | /* Locate an entry in the kernel page global directory */ | |
184 | #define pgd_offset_k(addr) pgd_offset(&init_mm, (addr)) | |
185 | ||
186 | static inline struct page *pmd_page(pmd_t pmd) | |
187 | { | |
188 | return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT); | |
189 | } | |
190 | ||
191 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) | |
192 | { | |
193 | return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT); | |
194 | } | |
195 | ||
196 | /* Yields the page frame number (PFN) of a page table entry */ | |
197 | static inline unsigned long pte_pfn(pte_t pte) | |
198 | { | |
199 | return (pte_val(pte) >> _PAGE_PFN_SHIFT); | |
200 | } | |
201 | ||
202 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
203 | ||
204 | /* Constructs a page table entry */ | |
205 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) | |
206 | { | |
207 | return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); | |
208 | } | |
209 | ||
62103ece | 210 | #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) |
07037db5 PD |
211 | |
212 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
213 | ||
214 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) | |
215 | { | |
216 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(addr); | |
217 | } | |
218 | ||
219 | #define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) | |
220 | #define pte_unmap(pte) ((void)(pte)) | |
221 | ||
07037db5 PD |
222 | static inline int pte_present(pte_t pte) |
223 | { | |
e3613bb8 | 224 | return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
07037db5 PD |
225 | } |
226 | ||
227 | static inline int pte_none(pte_t pte) | |
228 | { | |
229 | return (pte_val(pte) == 0); | |
230 | } | |
231 | ||
07037db5 PD |
232 | static inline int pte_write(pte_t pte) |
233 | { | |
234 | return pte_val(pte) & _PAGE_WRITE; | |
235 | } | |
236 | ||
08f051ed AW |
237 | static inline int pte_exec(pte_t pte) |
238 | { | |
239 | return pte_val(pte) & _PAGE_EXEC; | |
240 | } | |
241 | ||
07037db5 PD |
242 | static inline int pte_huge(pte_t pte) |
243 | { | |
244 | return pte_present(pte) | |
245 | && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); | |
246 | } | |
247 | ||
07037db5 PD |
248 | static inline int pte_dirty(pte_t pte) |
249 | { | |
250 | return pte_val(pte) & _PAGE_DIRTY; | |
251 | } | |
252 | ||
253 | static inline int pte_young(pte_t pte) | |
254 | { | |
255 | return pte_val(pte) & _PAGE_ACCESSED; | |
256 | } | |
257 | ||
258 | static inline int pte_special(pte_t pte) | |
259 | { | |
260 | return pte_val(pte) & _PAGE_SPECIAL; | |
261 | } | |
262 | ||
263 | /* static inline pte_t pte_rdprotect(pte_t pte) */ | |
264 | ||
265 | static inline pte_t pte_wrprotect(pte_t pte) | |
266 | { | |
267 | return __pte(pte_val(pte) & ~(_PAGE_WRITE)); | |
268 | } | |
269 | ||
270 | /* static inline pte_t pte_mkread(pte_t pte) */ | |
271 | ||
272 | static inline pte_t pte_mkwrite(pte_t pte) | |
273 | { | |
274 | return __pte(pte_val(pte) | _PAGE_WRITE); | |
275 | } | |
276 | ||
277 | /* static inline pte_t pte_mkexec(pte_t pte) */ | |
278 | ||
279 | static inline pte_t pte_mkdirty(pte_t pte) | |
280 | { | |
281 | return __pte(pte_val(pte) | _PAGE_DIRTY); | |
282 | } | |
283 | ||
284 | static inline pte_t pte_mkclean(pte_t pte) | |
285 | { | |
286 | return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); | |
287 | } | |
288 | ||
289 | static inline pte_t pte_mkyoung(pte_t pte) | |
290 | { | |
291 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | |
292 | } | |
293 | ||
294 | static inline pte_t pte_mkold(pte_t pte) | |
295 | { | |
296 | return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); | |
297 | } | |
298 | ||
299 | static inline pte_t pte_mkspecial(pte_t pte) | |
300 | { | |
301 | return __pte(pte_val(pte) | _PAGE_SPECIAL); | |
302 | } | |
303 | ||
9e953cda AG |
304 | static inline pte_t pte_mkhuge(pte_t pte) |
305 | { | |
306 | return pte; | |
307 | } | |
308 | ||
07037db5 PD |
309 | /* Modify page protection bits */ |
310 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |
311 | { | |
312 | return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); | |
313 | } | |
314 | ||
315 | #define pgd_ERROR(e) \ | |
316 | pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) | |
317 | ||
318 | ||
319 | /* Commit new configuration to MMU hardware */ | |
320 | static inline void update_mmu_cache(struct vm_area_struct *vma, | |
321 | unsigned long address, pte_t *ptep) | |
322 | { | |
323 | /* | |
324 | * The kernel assumes that TLBs don't cache invalid entries, but | |
325 | * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a | |
326 | * cache flush; it is necessary even after writing invalid entries. | |
327 | * Relying on flush_tlb_fix_spurious_fault would suffice, but | |
328 | * the extra traps reduce performance. So, eagerly SFENCE.VMA. | |
329 | */ | |
330 | local_flush_tlb_page(address); | |
331 | } | |
332 | ||
333 | #define __HAVE_ARCH_PTE_SAME | |
334 | static inline int pte_same(pte_t pte_a, pte_t pte_b) | |
335 | { | |
336 | return pte_val(pte_a) == pte_val(pte_b); | |
337 | } | |
338 | ||
08f051ed AW |
339 | /* |
340 | * Certain architectures need to do special things when PTEs within | |
341 | * a page table are directly modified. Thus, the following hook is | |
342 | * made available. | |
343 | */ | |
344 | static inline void set_pte(pte_t *ptep, pte_t pteval) | |
345 | { | |
346 | *ptep = pteval; | |
347 | } | |
348 | ||
349 | void flush_icache_pte(pte_t pte); | |
350 | ||
351 | static inline void set_pte_at(struct mm_struct *mm, | |
352 | unsigned long addr, pte_t *ptep, pte_t pteval) | |
353 | { | |
354 | if (pte_present(pteval) && pte_exec(pteval)) | |
355 | flush_icache_pte(pteval); | |
356 | ||
357 | set_pte(ptep, pteval); | |
358 | } | |
359 | ||
360 | static inline void pte_clear(struct mm_struct *mm, | |
361 | unsigned long addr, pte_t *ptep) | |
362 | { | |
363 | set_pte_at(mm, addr, ptep, __pte(0)); | |
364 | } | |
365 | ||
07037db5 PD |
366 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
367 | static inline int ptep_set_access_flags(struct vm_area_struct *vma, | |
368 | unsigned long address, pte_t *ptep, | |
369 | pte_t entry, int dirty) | |
370 | { | |
371 | if (!pte_same(*ptep, entry)) | |
372 | set_pte_at(vma->vm_mm, address, ptep, entry); | |
373 | /* | |
374 | * update_mmu_cache will unconditionally execute, handling both | |
375 | * the case that the PTE changed and the spurious fault case. | |
376 | */ | |
377 | return true; | |
378 | } | |
379 | ||
380 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
381 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
382 | unsigned long address, pte_t *ptep) | |
383 | { | |
384 | return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); | |
385 | } | |
386 | ||
387 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
388 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
389 | unsigned long address, | |
390 | pte_t *ptep) | |
391 | { | |
392 | if (!pte_young(*ptep)) | |
393 | return 0; | |
394 | return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); | |
395 | } | |
396 | ||
397 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
398 | static inline void ptep_set_wrprotect(struct mm_struct *mm, | |
399 | unsigned long address, pte_t *ptep) | |
400 | { | |
401 | atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); | |
402 | } | |
403 | ||
404 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
405 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |
406 | unsigned long address, pte_t *ptep) | |
407 | { | |
408 | /* | |
409 | * This comment is borrowed from x86, but applies equally to RISC-V: | |
410 | * | |
411 | * Clearing the accessed bit without a TLB flush | |
412 | * doesn't cause data corruption. [ It could cause incorrect | |
413 | * page aging and the (mistaken) reclaim of hot pages, but the | |
414 | * chance of that should be relatively low. ] | |
415 | * | |
416 | * So as a performance optimization don't flush the TLB when | |
417 | * clearing the accessed bit, it will eventually be flushed by | |
418 | * a context switch or a VM operation anyway. [ In the rare | |
419 | * event of it not getting flushed for a long time the delay | |
420 | * shouldn't really matter because there's no real memory | |
421 | * pressure for swapout to react to. ] | |
422 | */ | |
423 | return ptep_test_and_clear_young(vma, address, ptep); | |
424 | } | |
425 | ||
426 | /* | |
427 | * Encode and decode a swap entry | |
428 | * | |
429 | * Format of swap PTE: | |
430 | * bit 0: _PAGE_PRESENT (zero) | |
e3613bb8 | 431 | * bit 1: _PAGE_PROT_NONE (zero) |
07037db5 PD |
432 | * bits 2 to 6: swap type |
433 | * bits 7 to XLEN-1: swap offset | |
434 | */ | |
435 | #define __SWP_TYPE_SHIFT 2 | |
436 | #define __SWP_TYPE_BITS 5 | |
437 | #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) | |
438 | #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) | |
439 | ||
440 | #define MAX_SWAPFILES_CHECK() \ | |
441 | BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) | |
442 | ||
443 | #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) | |
444 | #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) | |
445 | #define __swp_entry(type, offset) ((swp_entry_t) \ | |
446 | { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) | |
447 | ||
448 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | |
449 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | |
450 | ||
59c4da86 ZL |
451 | /* |
452 | * In the RV64 Linux scheme, we give the user half of the virtual-address space | |
453 | * and give the kernel the other (upper) half. | |
454 | */ | |
455 | #ifdef CONFIG_64BIT | |
456 | #define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE) | |
457 | #else | |
458 | #define KERN_VIRT_START FIXADDR_START | |
459 | #endif | |
460 | ||
07037db5 | 461 | /* |
a256f2e3 | 462 | * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. |
07037db5 PD |
463 | * Note that PGDIR_SIZE must evenly divide TASK_SIZE. |
464 | */ | |
465 | #ifdef CONFIG_64BIT | |
466 | #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) | |
467 | #else | |
a256f2e3 | 468 | #define TASK_SIZE FIXADDR_START |
07037db5 PD |
469 | #endif |
470 | ||
6bd33e1e CH |
471 | #else /* CONFIG_MMU */ |
472 | ||
fa8174aa | 473 | #define PAGE_SHARED __pgprot(0) |
6bd33e1e CH |
474 | #define PAGE_KERNEL __pgprot(0) |
475 | #define swapper_pg_dir NULL | |
476 | #define VMALLOC_START 0 | |
477 | ||
478 | #define TASK_SIZE 0xffffffffUL | |
479 | ||
9a6630ae KW |
480 | static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {} |
481 | ||
6bd33e1e CH |
482 | #endif /* !CONFIG_MMU */ |
483 | ||
484 | #define kern_addr_valid(addr) (1) /* FIXME */ | |
485 | ||
486 | extern void *dtb_early_va; | |
487 | void setup_bootmem(void); | |
488 | void paging_init(void); | |
489 | ||
490 | #define FIRST_USER_ADDRESS 0 | |
491 | ||
492 | /* | |
493 | * ZERO_PAGE is a global shared page that is always zero, | |
494 | * used for zero-mapped memory areas, etc. | |
495 | */ | |
496 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |
497 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
498 | ||
07037db5 PD |
499 | #include <asm-generic/pgtable.h> |
500 | ||
501 | #endif /* !__ASSEMBLY__ */ | |
502 | ||
503 | #endif /* _ASM_RISCV_PGTABLE_H */ |