]>
Commit | Line | Data |
---|---|---|
18aecc2b CM |
1 | /* |
2 | * Copyright 2011 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | */ | |
15 | ||
16 | #ifndef _ASM_TILE_PGTABLE_64_H | |
17 | #define _ASM_TILE_PGTABLE_64_H | |
18 | ||
19 | /* The level-0 page table breaks the address space into 32-bit chunks. */ | |
20 | #define PGDIR_SHIFT HV_LOG2_L1_SPAN | |
21 | #define PGDIR_SIZE HV_L1_SPAN | |
22 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
23 | #define PTRS_PER_PGD HV_L0_ENTRIES | |
d5d14ed6 CM |
24 | #define PGD_INDEX(va) HV_L0_INDEX(va) |
25 | #define SIZEOF_PGD HV_L0_SIZE | |
18aecc2b CM |
26 | |
27 | /* | |
28 | * The level-1 index is defined by the huge page size. A PMD is composed | |
29 | * of PTRS_PER_PMD pgd_t's and is the middle level of the page table. | |
30 | */ | |
d5d14ed6 CM |
31 | #define PMD_SHIFT HPAGE_SHIFT |
32 | #define PMD_SIZE HPAGE_SIZE | |
18aecc2b | 33 | #define PMD_MASK (~(PMD_SIZE-1)) |
d5d14ed6 CM |
34 | #define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT) |
35 | #define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT) | |
36 | #define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT) | |
18aecc2b CM |
37 | |
38 | /* | |
39 | * The level-2 index is defined by the difference between the huge | |
40 | * page size and the normal page size. A PTE is composed of | |
41 | * PTRS_PER_PTE pte_t's and is the bottom level of the page table. | |
42 | * Note that the hypervisor docs use PTE for what we call pte_t, so | |
43 | * this nomenclature is somewhat confusing. | |
44 | */ | |
d5d14ed6 CM |
45 | #define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT) |
46 | #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT) | |
47 | #define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT) | |
18aecc2b CM |
48 | |
49 | /* | |
d5d14ed6 CM |
50 | * Align the vmalloc area to an L2 page table. Omit guard pages at |
51 | * the beginning and end for simplicity (particularly in the per-cpu | |
52 | * memory allocation code). The vmalloc code puts in an internal | |
18aecc2b CM |
53 | * guard page between each allocation. |
54 | */ | |
4b12909f | 55 | #define _VMALLOC_END MEM_SV_START |
d5d14ed6 CM |
56 | #define VMALLOC_END _VMALLOC_END |
57 | #define VMALLOC_START _VMALLOC_START | |
18aecc2b | 58 | |
18aecc2b CM |
59 | #ifndef __ASSEMBLY__ |
60 | ||
61 | /* We have no pud since we are a three-level page table. */ | |
9849a569 | 62 | #define __ARCH_USE_5LEVEL_HACK |
18aecc2b CM |
63 | #include <asm-generic/pgtable-nopud.h> |
64 | ||
a718e10c CM |
65 | /* |
66 | * pmds are the same as pgds and ptes, so converting is a no-op. | |
67 | */ | |
68 | #define pmd_pte(pmd) (pmd) | |
69 | #define pmdp_ptep(pmdp) (pmdp) | |
70 | #define pte_pmd(pte) (pte) | |
71 | ||
72 | #define pud_pte(pud) ((pud).pgd) | |
73 | ||
18aecc2b CM |
74 | static inline int pud_none(pud_t pud) |
75 | { | |
76 | return pud_val(pud) == 0; | |
77 | } | |
78 | ||
79 | static inline int pud_present(pud_t pud) | |
80 | { | |
81 | return pud_val(pud) & _PAGE_PRESENT; | |
82 | } | |
83 | ||
a718e10c CM |
84 | static inline int pud_huge_page(pud_t pud) |
85 | { | |
86 | return pud_val(pud) & _PAGE_HUGE_PAGE; | |
87 | } | |
88 | ||
18aecc2b | 89 | #define pmd_ERROR(e) \ |
f4743673 | 90 | pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e)) |
18aecc2b CM |
91 | |
92 | static inline void pud_clear(pud_t *pudp) | |
93 | { | |
94 | __pte_clear(&pudp->pgd); | |
95 | } | |
96 | ||
97 | static inline int pud_bad(pud_t pud) | |
98 | { | |
99 | return ((pud_val(pud) & _PAGE_ALL) != _PAGE_TABLE); | |
100 | } | |
101 | ||
102 | /* Return the page-table frame number (ptfn) that a pud_t points at. */ | |
103 | #define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd) | |
104 | ||
a718e10c CM |
105 | /* Return the page frame number (pfn) that a pud_t points at. */ |
106 | #define pud_pfn(pud) pte_pfn(pud_pte(pud)) | |
107 | ||
18aecc2b CM |
108 | /* |
109 | * A given kernel pud_t maps to a kernel pmd_t table at a specific | |
110 | * virtual address. Since kernel pmd_t tables can be aligned at | |
111 | * sub-page granularity, this macro can return non-page-aligned | |
112 | * pointers, despite its name. | |
113 | */ | |
114 | #define pud_page_vaddr(pud) \ | |
115 | (__va((phys_addr_t)pud_ptfn(pud) << HV_LOG2_PAGE_TABLE_ALIGN)) | |
116 | ||
117 | /* | |
118 | * A pud_t points to a pmd_t array. Since we can have multiple per | |
119 | * page, we don't have a one-to-one mapping of pud_t's to pages. | |
120 | */ | |
d5d14ed6 | 121 | #define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud)))) |
18aecc2b CM |
122 | |
123 | static inline unsigned long pud_index(unsigned long address) | |
124 | { | |
125 | return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); | |
126 | } | |
127 | ||
128 | #define pmd_offset(pud, address) \ | |
129 | ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address)) | |
130 | ||
18aecc2b CM |
131 | /* Normalize an address to having the correct high bits set. */ |
132 | #define pgd_addr_normalize pgd_addr_normalize | |
133 | static inline unsigned long pgd_addr_normalize(unsigned long addr) | |
134 | { | |
135 | return ((long)addr << (CHIP_WORD_SIZE() - CHIP_VA_WIDTH())) >> | |
136 | (CHIP_WORD_SIZE() - CHIP_VA_WIDTH()); | |
137 | } | |
138 | ||
139 | /* We don't define any pgds for these addresses. */ | |
140 | static inline int pgd_addr_invalid(unsigned long addr) | |
141 | { | |
acbde1db | 142 | return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr); |
18aecc2b CM |
143 | } |
144 | ||
145 | /* | |
146 | * Use atomic instructions to provide atomicity against the hypervisor. | |
147 | */ | |
148 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
149 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
150 | unsigned long addr, pte_t *ptep) | |
151 | { | |
152 | return (__insn_fetchand(&ptep->val, ~HV_PTE_ACCESSED) >> | |
153 | HV_PTE_INDEX_ACCESSED) & 0x1; | |
154 | } | |
155 | ||
156 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
157 | static inline void ptep_set_wrprotect(struct mm_struct *mm, | |
158 | unsigned long addr, pte_t *ptep) | |
159 | { | |
160 | __insn_fetchand(&ptep->val, ~HV_PTE_WRITABLE); | |
161 | } | |
162 | ||
163 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
164 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
165 | unsigned long addr, pte_t *ptep) | |
166 | { | |
167 | return hv_pte(__insn_exch(&ptep->val, 0UL)); | |
168 | } | |
169 | ||
170 | #endif /* __ASSEMBLY__ */ | |
171 | ||
172 | #endif /* _ASM_TILE_PGTABLE_64_H */ |