1 /* SPDX-License-Identifier: GPL-2.0 */
3 * pgtable.h: SpitFire page table operations.
5 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 #ifndef _SPARC64_PGTABLE_H
10 #define _SPARC64_PGTABLE_H
12 /* This file contains the functions and defines necessary to modify and use
13 * the SpitFire page tables.
16 #include <asm-generic/pgtable-nop4d.h>
17 #include <linux/compiler.h>
18 #include <linux/const.h>
19 #include <asm/types.h>
20 #include <asm/spitfire.h>
24 #include <asm/processor.h>
26 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
27 * The page copy blockops can use 0x6000000 to 0x8000000.
28 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
29 * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
30 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
31 * The vmalloc area spans 0x100000000 to 0x200000000.
32 * Since modules need to be in the lowest 32-bits of the address space,
33 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
34 * There is a single static kernel PMD which maps from 0x0 to address
37 #define TLBTEMP_BASE _AC(0x0000000006000000,UL)
38 #define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
39 #define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
40 #define MODULES_VADDR _AC(0x0000000010000000,UL)
41 #define MODULES_LEN _AC(0x00000000e0000000,UL)
42 #define MODULES_END _AC(0x00000000f0000000,UL)
43 #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
44 #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
45 #define VMALLOC_START _AC(0x0000000100000000,UL)
46 #define VMEMMAP_BASE VMALLOC_END
48 /* PMD_SHIFT determines the size of the area a second-level page
51 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
52 #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
53 #define PMD_MASK (~(PMD_SIZE-1))
54 #define PMD_BITS (PAGE_SHIFT - 3)
56 /* PUD_SHIFT determines the size of the area a third-level page
59 #define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
60 #define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
61 #define PUD_MASK (~(PUD_SIZE-1))
62 #define PUD_BITS (PAGE_SHIFT - 3)
64 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
65 #define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
66 #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
67 #define PGDIR_MASK (~(PGDIR_SIZE-1))
68 #define PGDIR_BITS (PAGE_SHIFT - 3)
70 #if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
71 #error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
74 #if (PGDIR_SHIFT + PGDIR_BITS) != 53
75 #error Page table parameters do not cover virtual address space properly.
78 #if (PMD_SHIFT != HPAGE_SHIFT)
79 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
84 extern unsigned long VMALLOC_END
;
86 #define vmemmap ((struct page *)VMEMMAP_BASE)
88 #include <linux/sched.h>
89 #include <asm/tlbflush.h>
91 bool kern_addr_valid(unsigned long addr
);
93 /* Entries per page directory level. */
94 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
95 #define PTRS_PER_PMD (1UL << PMD_BITS)
96 #define PTRS_PER_PUD (1UL << PUD_BITS)
97 #define PTRS_PER_PGD (1UL << PGDIR_BITS)
99 #define pmd_ERROR(e) \
100 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
101 __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
102 #define pud_ERROR(e) \
103 pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
104 __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
105 #define pgd_ERROR(e) \
106 pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
107 __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
109 #endif /* !(__ASSEMBLY__) */
111 /* PTE bits which are the same in SUN4U and SUN4V format. */
112 #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
113 #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
114 #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
115 #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
116 #define _PAGE_PUD_HUGE _PAGE_PMD_HUGE
118 /* SUN4U pte bits... */
119 #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
120 #define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
121 #define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
122 #define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
123 #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
124 #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
125 #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
126 #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */
127 #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */
128 #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
129 #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
130 #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
131 #define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
132 #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
133 #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
134 #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
135 #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
136 #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
137 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
138 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
139 #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
140 #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
141 #define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
142 #define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
143 #define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
144 #define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
145 #define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
146 #define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
147 #define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
149 /* SUN4V pte bits... */
150 #define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
151 #define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
152 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
153 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
154 #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
155 #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
156 #define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */
157 #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page */
158 #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
159 #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
160 #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
161 #define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
162 #define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
163 /* Bit 9 is used to enable MCD corruption detection instead on M7 */
164 #define _PAGE_MCD_4V _AC(0x0000000000000200,UL) /* Memory Corruption */
165 #define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
166 #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
167 #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
168 #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
169 #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
170 #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
171 #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
172 #define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
173 #define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
174 #define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
175 #define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
176 #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
177 #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
178 #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
179 #define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
181 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
182 #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
184 #if REAL_HPAGE_SHIFT != 22
185 #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
188 #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
189 #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
191 /* We borrow bit 20 to store the exclusive marker in swap PTEs. */
192 #define _PAGE_SWP_EXCLUSIVE _AC(0x0000000000100000, UL)
196 pte_t
mk_pte_io(unsigned long, pgprot_t
, int, unsigned long);
198 unsigned long pte_sz_bits(unsigned long size
);
200 extern pgprot_t PAGE_KERNEL
;
201 extern pgprot_t PAGE_KERNEL_LOCKED
;
202 extern pgprot_t PAGE_COPY
;
203 extern pgprot_t PAGE_SHARED
;
205 /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
206 extern unsigned long _PAGE_IE
;
207 extern unsigned long _PAGE_E
;
208 extern unsigned long _PAGE_CACHE
;
210 extern unsigned long pg_iobits
;
211 extern unsigned long _PAGE_ALL_SZ_BITS
;
213 extern struct page
*mem_map_zero
;
214 #define ZERO_PAGE(vaddr) (mem_map_zero)
216 /* PFNs are real physical page numbers. However, mem_map only begins to record
217 * per-page information starting at pfn_base. This is to handle systems where
218 * the first physical page in the machine is at some huge physical address,
219 * such as 4GB. This is common on a partitioned E10000, for example.
221 static inline pte_t
pfn_pte(unsigned long pfn
, pgprot_t prot
)
223 unsigned long paddr
= pfn
<< PAGE_SHIFT
;
225 BUILD_BUG_ON(_PAGE_SZBITS_4U
!= 0UL || _PAGE_SZBITS_4V
!= 0UL);
226 return __pte(paddr
| pgprot_val(prot
));
228 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
230 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
231 static inline pmd_t
pfn_pmd(unsigned long page_nr
, pgprot_t pgprot
)
233 pte_t pte
= pfn_pte(page_nr
, pgprot
);
235 return __pmd(pte_val(pte
));
237 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
240 /* This one can be done with two shifts. */
241 static inline unsigned long pte_pfn(pte_t pte
)
245 __asm__
__volatile__(
246 "\n661: sllx %1, %2, %0\n"
248 " .section .sun4v_2insn_patch, \"ax\"\n"
254 : "r" (pte_val(pte
)),
255 "i" (21), "i" (21 + PAGE_SHIFT
),
256 "i" (8), "i" (8 + PAGE_SHIFT
));
260 #define pte_page(x) pfn_to_page(pte_pfn(x))
262 static inline pte_t
pte_modify(pte_t pte
, pgprot_t prot
)
264 unsigned long mask
, tmp
;
266 /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
267 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
269 * Even if we use negation tricks the result is still a 6
270 * instruction sequence, so don't try to play fancy and just
271 * do the most straightforward implementation.
273 * Note: We encode this into 3 sun4v 2-insn patch sequences.
276 BUILD_BUG_ON(_PAGE_SZBITS_4U
!= 0UL || _PAGE_SZBITS_4V
!= 0UL);
277 __asm__
__volatile__(
278 "\n661: sethi %%uhi(%2), %1\n"
279 " sethi %%hi(%2), %0\n"
280 "\n662: or %1, %%ulo(%2), %1\n"
281 " or %0, %%lo(%2), %0\n"
282 "\n663: sllx %1, 32, %1\n"
284 " .section .sun4v_2insn_patch, \"ax\"\n"
286 " sethi %%uhi(%3), %1\n"
287 " sethi %%hi(%3), %0\n"
289 " or %1, %%ulo(%3), %1\n"
290 " or %0, %%lo(%3), %0\n"
295 " .section .sun_m7_2insn_patch, \"ax\"\n"
297 " sethi %%uhi(%4), %1\n"
298 " sethi %%hi(%4), %0\n"
300 " or %1, %%ulo(%4), %1\n"
301 " or %0, %%lo(%4), %0\n"
306 : "=r" (mask
), "=r" (tmp
)
307 : "i" (_PAGE_PADDR_4U
| _PAGE_MODIFIED_4U
| _PAGE_ACCESSED_4U
|
308 _PAGE_CP_4U
| _PAGE_CV_4U
| _PAGE_E_4U
|
309 _PAGE_SPECIAL
| _PAGE_PMD_HUGE
| _PAGE_SZALL_4U
),
310 "i" (_PAGE_PADDR_4V
| _PAGE_MODIFIED_4V
| _PAGE_ACCESSED_4V
|
311 _PAGE_CP_4V
| _PAGE_CV_4V
| _PAGE_E_4V
|
312 _PAGE_SPECIAL
| _PAGE_PMD_HUGE
| _PAGE_SZALL_4V
),
313 "i" (_PAGE_PADDR_4V
| _PAGE_MODIFIED_4V
| _PAGE_ACCESSED_4V
|
314 _PAGE_CP_4V
| _PAGE_E_4V
|
315 _PAGE_SPECIAL
| _PAGE_PMD_HUGE
| _PAGE_SZALL_4V
));
317 return __pte((pte_val(pte
) & mask
) | (pgprot_val(prot
) & ~mask
));
320 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
321 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
323 pte_t pte
= __pte(pmd_val(pmd
));
325 pte
= pte_modify(pte
, newprot
);
327 return __pmd(pte_val(pte
));
331 static inline pgprot_t
pgprot_noncached(pgprot_t prot
)
333 unsigned long val
= pgprot_val(prot
);
335 __asm__
__volatile__(
336 "\n661: andn %0, %2, %0\n"
338 " .section .sun4v_2insn_patch, \"ax\"\n"
343 " .section .sun_m7_2insn_patch, \"ax\"\n"
349 : "0" (val
), "i" (_PAGE_CP_4U
| _PAGE_CV_4U
), "i" (_PAGE_E_4U
),
350 "i" (_PAGE_CP_4V
| _PAGE_CV_4V
), "i" (_PAGE_E_4V
),
353 return __pgprot(val
);
355 /* Various pieces of code check for platform support by ifdef testing
356 * on "pgprot_noncached". That's broken and should be fixed, but for
359 #define pgprot_noncached pgprot_noncached
361 static inline unsigned long pte_dirty(pte_t pte
)
365 __asm__
__volatile__(
366 "\n661: mov %1, %0\n"
368 " .section .sun4v_2insn_patch, \"ax\"\n"
370 " sethi %%uhi(%2), %0\n"
374 : "i" (_PAGE_MODIFIED_4U
), "i" (_PAGE_MODIFIED_4V
));
376 return (pte_val(pte
) & mask
);
379 static inline unsigned long pte_write(pte_t pte
)
383 __asm__
__volatile__(
384 "\n661: mov %1, %0\n"
386 " .section .sun4v_2insn_patch, \"ax\"\n"
388 " sethi %%uhi(%2), %0\n"
392 : "i" (_PAGE_WRITE_4U
), "i" (_PAGE_WRITE_4V
));
394 return (pte_val(pte
) & mask
);
397 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
398 pte_t
arch_make_huge_pte(pte_t entry
, unsigned int shift
, vm_flags_t flags
);
399 #define arch_make_huge_pte arch_make_huge_pte
400 static inline unsigned long __pte_default_huge_mask(void)
404 __asm__
__volatile__(
405 "\n661: sethi %%uhi(%1), %0\n"
407 " .section .sun4v_2insn_patch, \"ax\"\n"
413 : "i" (_PAGE_SZHUGE_4U
), "i" (_PAGE_SZHUGE_4V
));
418 static inline pte_t
pte_mkhuge(pte_t pte
)
420 return __pte(pte_val(pte
) | __pte_default_huge_mask());
423 static inline bool is_default_hugetlb_pte(pte_t pte
)
425 unsigned long mask
= __pte_default_huge_mask();
427 return (pte_val(pte
) & mask
) == mask
;
430 static inline bool is_hugetlb_pmd(pmd_t pmd
)
432 return !!(pmd_val(pmd
) & _PAGE_PMD_HUGE
);
435 static inline bool is_hugetlb_pud(pud_t pud
)
437 return !!(pud_val(pud
) & _PAGE_PUD_HUGE
);
440 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
441 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
443 pte_t pte
= __pte(pmd_val(pmd
));
445 pte
= pte_mkhuge(pte
);
446 pte_val(pte
) |= _PAGE_PMD_HUGE
;
448 return __pmd(pte_val(pte
));
452 static inline bool is_hugetlb_pte(pte_t pte
)
458 static inline pte_t
__pte_mkhwwrite(pte_t pte
)
460 unsigned long val
= pte_val(pte
);
463 * Note: we only want to set the HW writable bit if the SW writable bit
464 * and the SW dirty bit are set.
466 __asm__
__volatile__(
467 "\n661: or %0, %2, %0\n"
468 " .section .sun4v_1insn_patch, \"ax\"\n"
473 : "0" (val
), "i" (_PAGE_W_4U
), "i" (_PAGE_W_4V
));
478 static inline pte_t
pte_mkdirty(pte_t pte
)
480 unsigned long val
= pte_val(pte
), mask
;
482 __asm__
__volatile__(
483 "\n661: mov %1, %0\n"
485 " .section .sun4v_2insn_patch, \"ax\"\n"
487 " sethi %%uhi(%2), %0\n"
491 : "i" (_PAGE_MODIFIED_4U
), "i" (_PAGE_MODIFIED_4V
));
493 pte
= __pte(val
| mask
);
494 return pte_write(pte
) ? __pte_mkhwwrite(pte
) : pte
;
497 static inline pte_t
pte_mkclean(pte_t pte
)
499 unsigned long val
= pte_val(pte
), tmp
;
501 __asm__
__volatile__(
502 "\n661: andn %0, %3, %0\n"
506 " .section .sun4v_2insn_patch, \"ax\"\n"
508 " sethi %%uhi(%4), %1\n"
511 " or %1, %%lo(%4), %1\n"
514 : "=r" (val
), "=r" (tmp
)
515 : "0" (val
), "i" (_PAGE_MODIFIED_4U
| _PAGE_W_4U
),
516 "i" (_PAGE_MODIFIED_4V
| _PAGE_W_4V
));
521 static inline pte_t
pte_mkwrite_novma(pte_t pte
)
523 unsigned long val
= pte_val(pte
), mask
;
525 __asm__
__volatile__(
526 "\n661: mov %1, %0\n"
528 " .section .sun4v_2insn_patch, \"ax\"\n"
530 " sethi %%uhi(%2), %0\n"
534 : "i" (_PAGE_WRITE_4U
), "i" (_PAGE_WRITE_4V
));
536 pte
= __pte(val
| mask
);
537 return pte_dirty(pte
) ? __pte_mkhwwrite(pte
) : pte
;
540 static inline pte_t
pte_wrprotect(pte_t pte
)
542 unsigned long val
= pte_val(pte
), tmp
;
544 __asm__
__volatile__(
545 "\n661: andn %0, %3, %0\n"
549 " .section .sun4v_2insn_patch, \"ax\"\n"
551 " sethi %%uhi(%4), %1\n"
554 " or %1, %%lo(%4), %1\n"
557 : "=r" (val
), "=r" (tmp
)
558 : "0" (val
), "i" (_PAGE_WRITE_4U
| _PAGE_W_4U
),
559 "i" (_PAGE_WRITE_4V
| _PAGE_W_4V
));
564 static inline pte_t
pte_mkold(pte_t pte
)
568 __asm__
__volatile__(
569 "\n661: mov %1, %0\n"
571 " .section .sun4v_2insn_patch, \"ax\"\n"
573 " sethi %%uhi(%2), %0\n"
577 : "i" (_PAGE_ACCESSED_4U
), "i" (_PAGE_ACCESSED_4V
));
581 return __pte(pte_val(pte
) & ~mask
);
584 static inline pte_t
pte_mkyoung(pte_t pte
)
588 __asm__
__volatile__(
589 "\n661: mov %1, %0\n"
591 " .section .sun4v_2insn_patch, \"ax\"\n"
593 " sethi %%uhi(%2), %0\n"
597 : "i" (_PAGE_ACCESSED_4U
), "i" (_PAGE_ACCESSED_4V
));
601 return __pte(pte_val(pte
) | mask
);
604 static inline pte_t
pte_mkspecial(pte_t pte
)
606 pte_val(pte
) |= _PAGE_SPECIAL
;
610 static inline pte_t
pte_mkmcd(pte_t pte
)
612 pte_val(pte
) |= _PAGE_MCD_4V
;
616 static inline pte_t
pte_mknotmcd(pte_t pte
)
618 pte_val(pte
) &= ~_PAGE_MCD_4V
;
622 static inline unsigned long pte_young(pte_t pte
)
626 __asm__
__volatile__(
627 "\n661: mov %1, %0\n"
629 " .section .sun4v_2insn_patch, \"ax\"\n"
631 " sethi %%uhi(%2), %0\n"
635 : "i" (_PAGE_ACCESSED_4U
), "i" (_PAGE_ACCESSED_4V
));
637 return (pte_val(pte
) & mask
);
640 static inline unsigned long pte_exec(pte_t pte
)
644 __asm__
__volatile__(
645 "\n661: sethi %%hi(%1), %0\n"
646 " .section .sun4v_1insn_patch, \"ax\"\n"
651 : "i" (_PAGE_EXEC_4U
), "i" (_PAGE_EXEC_4V
));
653 return (pte_val(pte
) & mask
);
656 static inline unsigned long pte_present(pte_t pte
)
658 unsigned long val
= pte_val(pte
);
660 __asm__
__volatile__(
661 "\n661: and %0, %2, %0\n"
662 " .section .sun4v_1insn_patch, \"ax\"\n"
667 : "0" (val
), "i" (_PAGE_PRESENT_4U
), "i" (_PAGE_PRESENT_4V
));
672 #define pte_accessible pte_accessible
673 static inline unsigned long pte_accessible(struct mm_struct
*mm
, pte_t a
)
675 return pte_val(a
) & _PAGE_VALID
;
678 static inline unsigned long pte_special(pte_t pte
)
680 return pte_val(pte
) & _PAGE_SPECIAL
;
683 #define pmd_leaf pmd_large
684 static inline unsigned long pmd_large(pmd_t pmd
)
686 pte_t pte
= __pte(pmd_val(pmd
));
688 return pte_val(pte
) & _PAGE_PMD_HUGE
;
691 static inline unsigned long pmd_pfn(pmd_t pmd
)
693 pte_t pte
= __pte(pmd_val(pmd
));
698 #define pmd_write pmd_write
699 static inline unsigned long pmd_write(pmd_t pmd
)
701 pte_t pte
= __pte(pmd_val(pmd
));
703 return pte_write(pte
);
706 #define pud_write(pud) pte_write(__pte(pud_val(pud)))
708 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
709 static inline unsigned long pmd_dirty(pmd_t pmd
)
711 pte_t pte
= __pte(pmd_val(pmd
));
713 return pte_dirty(pte
);
716 #define pmd_young pmd_young
717 static inline unsigned long pmd_young(pmd_t pmd
)
719 pte_t pte
= __pte(pmd_val(pmd
));
721 return pte_young(pte
);
724 static inline unsigned long pmd_trans_huge(pmd_t pmd
)
726 pte_t pte
= __pte(pmd_val(pmd
));
728 return pte_val(pte
) & _PAGE_PMD_HUGE
;
731 static inline pmd_t
pmd_mkold(pmd_t pmd
)
733 pte_t pte
= __pte(pmd_val(pmd
));
735 pte
= pte_mkold(pte
);
737 return __pmd(pte_val(pte
));
740 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
742 pte_t pte
= __pte(pmd_val(pmd
));
744 pte
= pte_wrprotect(pte
);
746 return __pmd(pte_val(pte
));
749 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
751 pte_t pte
= __pte(pmd_val(pmd
));
753 pte
= pte_mkdirty(pte
);
755 return __pmd(pte_val(pte
));
758 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
760 pte_t pte
= __pte(pmd_val(pmd
));
762 pte
= pte_mkclean(pte
);
764 return __pmd(pte_val(pte
));
767 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
769 pte_t pte
= __pte(pmd_val(pmd
));
771 pte
= pte_mkyoung(pte
);
773 return __pmd(pte_val(pte
));
776 static inline pmd_t
pmd_mkwrite_novma(pmd_t pmd
)
778 pte_t pte
= __pte(pmd_val(pmd
));
780 pte
= pte_mkwrite_novma(pte
);
782 return __pmd(pte_val(pte
));
785 static inline pgprot_t
pmd_pgprot(pmd_t entry
)
787 unsigned long val
= pmd_val(entry
);
789 return __pgprot(val
);
793 static inline int pmd_present(pmd_t pmd
)
795 return pmd_val(pmd
) != 0UL;
798 #define pmd_none(pmd) (!pmd_val(pmd))
800 /* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
801 * very simple, it's just the physical address. PTE tables are of
802 * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
803 * the top bits outside of the range of any physical address size we
804 * support are clear as well. We also validate the physical itself.
806 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
808 #define pud_none(pud) (!pud_val(pud))
810 #define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
812 #define p4d_none(p4d) (!p4d_val(p4d))
814 #define p4d_bad(p4d) (p4d_val(p4d) & ~PAGE_MASK)
816 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
817 void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
818 pmd_t
*pmdp
, pmd_t pmd
);
820 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
821 pmd_t
*pmdp
, pmd_t pmd
)
827 static inline void pmd_set(struct mm_struct
*mm
, pmd_t
*pmdp
, pte_t
*ptep
)
829 unsigned long val
= __pa((unsigned long) (ptep
));
831 pmd_val(*pmdp
) = val
;
834 #define pud_set(pudp, pmdp) \
835 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
836 static inline unsigned long pmd_page_vaddr(pmd_t pmd
)
838 pte_t pte
= __pte(pmd_val(pmd
));
843 return ((unsigned long) __va(pfn
<< PAGE_SHIFT
));
846 static inline pmd_t
*pud_pgtable(pud_t pud
)
848 pte_t pte
= __pte(pud_val(pud
));
853 return ((pmd_t
*) __va(pfn
<< PAGE_SHIFT
));
856 #define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
857 #define pud_page(pud) virt_to_page((void *)pud_pgtable(pud))
858 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
859 #define pud_present(pud) (pud_val(pud) != 0U)
860 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
861 #define p4d_pgtable(p4d) \
862 ((pud_t *) __va(p4d_val(p4d)))
863 #define p4d_present(p4d) (p4d_val(p4d) != 0U)
864 #define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
866 /* only used by the stubbed out hugetlb gup code, should never be called */
867 #define p4d_page(p4d) NULL
869 #define pud_leaf pud_large
870 static inline unsigned long pud_large(pud_t pud
)
872 pte_t pte
= __pte(pud_val(pud
));
874 return pte_val(pte
) & _PAGE_PMD_HUGE
;
877 static inline unsigned long pud_pfn(pud_t pud
)
879 pte_t pte
= __pte(pud_val(pud
));
884 /* Same in both SUN4V and SUN4U. */
885 #define pte_none(pte) (!pte_val(pte))
887 #define p4d_set(p4dp, pudp) \
888 (p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
890 /* We cannot include <linux/mm_types.h> at this point yet: */
891 extern struct mm_struct init_mm
;
893 /* Actual page table PTE updates. */
894 void tlb_batch_add(struct mm_struct
*mm
, unsigned long vaddr
,
895 pte_t
*ptep
, pte_t orig
, int fullmm
,
896 unsigned int hugepage_shift
);
898 static void maybe_tlb_batch_add(struct mm_struct
*mm
, unsigned long vaddr
,
899 pte_t
*ptep
, pte_t orig
, int fullmm
,
900 unsigned int hugepage_shift
)
902 /* It is more efficient to let flush_tlb_kernel_range()
903 * handle init_mm tlb flushes.
905 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
906 * and SUN4V pte layout, so this inline test is fine.
908 if (likely(mm
!= &init_mm
) && pte_accessible(mm
, orig
))
909 tlb_batch_add(mm
, vaddr
, ptep
, orig
, fullmm
, hugepage_shift
);
912 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
913 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
918 set_pmd_at(mm
, addr
, pmdp
, __pmd(0UL));
922 static inline void __set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
923 pte_t
*ptep
, pte_t pte
, int fullmm
)
928 maybe_tlb_batch_add(mm
, addr
, ptep
, orig
, fullmm
, PAGE_SHIFT
);
931 static inline void set_ptes(struct mm_struct
*mm
, unsigned long addr
,
932 pte_t
*ptep
, pte_t pte
, unsigned int nr
)
934 arch_enter_lazy_mmu_mode();
936 __set_pte_at(mm
, addr
, ptep
, pte
, 0);
940 pte_val(pte
) += PAGE_SIZE
;
943 arch_leave_lazy_mmu_mode();
945 #define set_ptes set_ptes
947 #define pte_clear(mm,addr,ptep) \
948 set_pte_at((mm), (addr), (ptep), __pte(0UL))
950 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
951 #define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
952 __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
954 #ifdef DCACHE_ALIASING_POSSIBLE
955 #define __HAVE_ARCH_MOVE_PTE
956 #define move_pte(pte, prot, old_addr, new_addr) \
958 pte_t newpte = (pte); \
959 if (tlb_type != hypervisor && pte_present(pte)) { \
960 unsigned long this_pfn = pte_pfn(pte); \
962 if (pfn_valid(this_pfn) && \
963 (((old_addr) ^ (new_addr)) & (1 << 13))) \
964 flush_dcache_folio_all(current->mm, \
965 page_folio(pfn_to_page(this_pfn))); \
971 extern pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
973 void paging_init(void);
974 unsigned long find_ecache_flush_span(unsigned long size
);
977 void mmu_info(struct seq_file
*);
979 struct vm_area_struct
;
980 void update_mmu_cache_range(struct vm_fault
*, struct vm_area_struct
*,
981 unsigned long addr
, pte_t
*ptep
, unsigned int nr
);
982 #define update_mmu_cache(vma, addr, ptep) \
983 update_mmu_cache_range(NULL, vma, addr, ptep, 1)
984 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
985 void update_mmu_cache_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
988 #define __HAVE_ARCH_PMDP_INVALIDATE
989 extern pmd_t
pmdp_invalidate(struct vm_area_struct
*vma
, unsigned long address
,
992 #define __HAVE_ARCH_PGTABLE_DEPOSIT
993 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
996 #define __HAVE_ARCH_PGTABLE_WITHDRAW
997 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
);
1001 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
1002 * are !pte_none() && !pte_present().
1004 * Format of swap PTEs:
1006 * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
1007 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
1008 * <--------------------------- offset ---------------------------
1010 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
1011 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
1012 * --------------------> E <-- type ---> <------- zeroes -------->
1014 #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0x7fUL)
1015 #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
1016 #define __swp_entry(type, offset) \
1019 ((((long)(type) & 0x7fUL) << PAGE_SHIFT) | \
1020 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
1022 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1023 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1025 static inline int pte_swp_exclusive(pte_t pte
)
1027 return pte_val(pte
) & _PAGE_SWP_EXCLUSIVE
;
1030 static inline pte_t
pte_swp_mkexclusive(pte_t pte
)
1032 return __pte(pte_val(pte
) | _PAGE_SWP_EXCLUSIVE
);
1035 static inline pte_t
pte_swp_clear_exclusive(pte_t pte
)
1037 return __pte(pte_val(pte
) & ~_PAGE_SWP_EXCLUSIVE
);
1040 int page_in_phys_avail(unsigned long paddr
);
1043 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
1044 * its high 4 bits. These macros/functions put it there or get it from there.
1046 #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
1047 #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
1048 #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
1050 int remap_pfn_range(struct vm_area_struct
*, unsigned long, unsigned long,
1051 unsigned long, pgprot_t
);
1053 void adi_restore_tags(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
1054 unsigned long addr
, pte_t pte
);
1056 int adi_save_tags(struct mm_struct
*mm
, struct vm_area_struct
*vma
,
1057 unsigned long addr
, pte_t oldpte
);
1059 #define __HAVE_ARCH_DO_SWAP_PAGE
1060 static inline void arch_do_swap_page(struct mm_struct
*mm
,
1061 struct vm_area_struct
*vma
,
1063 pte_t pte
, pte_t oldpte
)
1065 /* If this is a new page being mapped in, there can be no
1066 * ADI tags stored away for this page. Skip looking for
1069 if (pte_none(oldpte
))
1072 if (adi_state
.enabled
&& (pte_val(pte
) & _PAGE_MCD_4V
))
1073 adi_restore_tags(mm
, vma
, addr
, pte
);
1076 #define __HAVE_ARCH_UNMAP_ONE
1077 static inline int arch_unmap_one(struct mm_struct
*mm
,
1078 struct vm_area_struct
*vma
,
1079 unsigned long addr
, pte_t oldpte
)
1081 if (adi_state
.enabled
&& (pte_val(oldpte
) & _PAGE_MCD_4V
))
1082 return adi_save_tags(mm
, vma
, addr
, oldpte
);
1086 static inline int io_remap_pfn_range(struct vm_area_struct
*vma
,
1087 unsigned long from
, unsigned long pfn
,
1088 unsigned long size
, pgprot_t prot
)
1090 unsigned long offset
= GET_PFN(pfn
) << PAGE_SHIFT
;
1091 int space
= GET_IOSPACE(pfn
);
1092 unsigned long phys_base
;
1094 phys_base
= offset
| (((unsigned long) space
) << 32UL);
1096 return remap_pfn_range(vma
, from
, phys_base
>> PAGE_SHIFT
, size
, prot
);
1098 #define io_remap_pfn_range io_remap_pfn_range
1100 static inline unsigned long __untagged_addr(unsigned long start
)
1102 if (adi_capable()) {
1105 /* If userspace has passed a versioned address, kernel
1106 * will not find it in the VMAs since it does not store
1107 * the version tags in the list of VMAs. Storing version
1108 * tags in list of VMAs is impractical since they can be
1109 * changed any time from userspace without dropping into
1110 * kernel. Any address search in VMAs will be done with
1111 * non-versioned addresses. Ensure the ADI version bits
1112 * are dropped here by sign extending the last bit before
1113 * ADI bits. IOMMU does not implement version tags.
1115 return (addr
<< (long)adi_nbits()) >> (long)adi_nbits();
1120 #define untagged_addr(addr) \
1121 ((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
1123 static inline bool pte_access_permitted(pte_t pte
, bool write
)
1127 if (tlb_type
== hypervisor
) {
1128 prot
= _PAGE_PRESENT_4V
| _PAGE_P_4V
;
1130 prot
|= _PAGE_WRITE_4V
;
1132 prot
= _PAGE_PRESENT_4U
| _PAGE_P_4U
;
1134 prot
|= _PAGE_WRITE_4U
;
1137 return (pte_val(pte
) & (prot
| _PAGE_SPECIAL
)) == prot
;
1139 #define pte_access_permitted pte_access_permitted
1141 /* We provide our own get_unmapped_area to cope with VA holes and
1142 * SHM area cache aliasing for userland.
1144 #define HAVE_ARCH_UNMAPPED_AREA
1145 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1147 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
1148 * the largest alignment possible such that larget PTEs can be used.
1150 unsigned long get_fb_unmapped_area(struct file
*filp
, unsigned long,
1151 unsigned long, unsigned long,
1153 #define HAVE_ARCH_FB_UNMAPPED_AREA
1155 void sun4v_register_fault_status(void);
1156 void sun4v_ktsb_register(void);
1157 void __init
cheetah_ecache_flush_init(void);
1158 void sun4v_patch_tlb_handlers(void);
1160 extern unsigned long cmdline_memory_size
;
1162 asmlinkage
void do_sparc64_fault(struct pt_regs
*regs
);
1164 #define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
1166 #ifdef CONFIG_HUGETLB_PAGE
1168 #define pud_leaf_size pud_leaf_size
1169 extern unsigned long pud_leaf_size(pud_t pud
);
1171 #define pmd_leaf_size pmd_leaf_size
1172 extern unsigned long pmd_leaf_size(pmd_t pmd
);
1174 #define pte_leaf_size pte_leaf_size
1175 extern unsigned long pte_leaf_size(pte_t pte
);
1177 #endif /* CONFIG_HUGETLB_PAGE */
1179 #endif /* !(__ASSEMBLY__) */
1181 #endif /* !(_SPARC64_PGTABLE_H) */