]> git.ipfire.org Git - people/ms/linux.git/blob - arch/x86/include/asm/pgtable_64.h
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux...
[people/ms/linux.git] / arch / x86 / include / asm / pgtable_64.h
1 #ifndef _ASM_X86_PGTABLE_64_H
2 #define _ASM_X86_PGTABLE_64_H
3
4 #include <linux/const.h>
5 #include <asm/pgtable_64_types.h>
6
7 #ifndef __ASSEMBLY__
8
9 /*
10 * This file contains the functions and defines necessary to modify and use
11 * the x86-64 page table tree.
12 */
13 #include <asm/processor.h>
14 #include <linux/bitops.h>
15 #include <linux/threads.h>
16
17 extern pud_t level3_kernel_pgt[512];
18 extern pud_t level3_ident_pgt[512];
19 extern pmd_t level2_kernel_pgt[512];
20 extern pmd_t level2_fixmap_pgt[512];
21 extern pmd_t level2_ident_pgt[512];
22 extern pte_t level1_fixmap_pgt[512];
23 extern pgd_t init_level4_pgt[];
24
25 #define swapper_pg_dir init_level4_pgt
26
27 extern void paging_init(void);
28
29 #define pte_ERROR(e) \
30 pr_err("%s:%d: bad pte %p(%016lx)\n", \
31 __FILE__, __LINE__, &(e), pte_val(e))
32 #define pmd_ERROR(e) \
33 pr_err("%s:%d: bad pmd %p(%016lx)\n", \
34 __FILE__, __LINE__, &(e), pmd_val(e))
35 #define pud_ERROR(e) \
36 pr_err("%s:%d: bad pud %p(%016lx)\n", \
37 __FILE__, __LINE__, &(e), pud_val(e))
38 #define pgd_ERROR(e) \
39 pr_err("%s:%d: bad pgd %p(%016lx)\n", \
40 __FILE__, __LINE__, &(e), pgd_val(e))
41
42 struct mm_struct;
43
44 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
45
46
47 static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
48 pte_t *ptep)
49 {
50 *ptep = native_make_pte(0);
51 }
52
53 static inline void native_set_pte(pte_t *ptep, pte_t pte)
54 {
55 *ptep = pte;
56 }
57
58 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
59 {
60 native_set_pte(ptep, pte);
61 }
62
63 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
64 {
65 *pmdp = pmd;
66 }
67
68 static inline void native_pmd_clear(pmd_t *pmd)
69 {
70 native_set_pmd(pmd, native_make_pmd(0));
71 }
72
73 static inline pte_t native_ptep_get_and_clear(pte_t *xp)
74 {
75 #ifdef CONFIG_SMP
76 return native_make_pte(xchg(&xp->pte, 0));
77 #else
78 /* native_local_ptep_get_and_clear,
79 but duplicated because of cyclic dependency */
80 pte_t ret = *xp;
81 native_pte_clear(NULL, 0, xp);
82 return ret;
83 #endif
84 }
85
86 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
87 {
88 #ifdef CONFIG_SMP
89 return native_make_pmd(xchg(&xp->pmd, 0));
90 #else
91 /* native_local_pmdp_get_and_clear,
92 but duplicated because of cyclic dependency */
93 pmd_t ret = *xp;
94 native_pmd_clear(xp);
95 return ret;
96 #endif
97 }
98
99 static inline void native_set_pud(pud_t *pudp, pud_t pud)
100 {
101 *pudp = pud;
102 }
103
104 static inline void native_pud_clear(pud_t *pud)
105 {
106 native_set_pud(pud, native_make_pud(0));
107 }
108
109 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
110 {
111 *pgdp = pgd;
112 }
113
114 static inline void native_pgd_clear(pgd_t *pgd)
115 {
116 native_set_pgd(pgd, native_make_pgd(0));
117 }
118
119 extern void sync_global_pgds(unsigned long start, unsigned long end,
120 int removed);
121
122 /*
123 * Conversion functions: convert a page and protection to a page entry,
124 * and a page entry and page directory to the page they refer to.
125 */
126
127 /*
128 * Level 4 access.
129 */
130 static inline int pgd_large(pgd_t pgd) { return 0; }
131 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
132
133 /* PUD - Level3 access */
134
135 /* PMD - Level 2 access */
136 #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
137 #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | \
138 _PAGE_FILE })
139 #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
140
141 /* PTE - Level 1 access. */
142
143 /* x86-64 always has all page tables mapped. */
144 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
145 #define pte_unmap(pte) ((void)(pte))/* NOP */
146
147 /* Encode and de-code a swap entry */
148 #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
149 #ifdef CONFIG_NUMA_BALANCING
150 /* Automatic NUMA balancing needs to be distinguishable from swap entries */
151 #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 2)
152 #else
153 #define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
154 #endif
155
156 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
157
158 #define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
159 & ((1U << SWP_TYPE_BITS) - 1))
160 #define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
161 #define __swp_entry(type, offset) ((swp_entry_t) { \
162 ((type) << (_PAGE_BIT_PRESENT + 1)) \
163 | ((offset) << SWP_OFFSET_SHIFT) })
164 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
165 #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
166
167 extern int kern_addr_valid(unsigned long addr);
168 extern void cleanup_highmap(void);
169
170 #define HAVE_ARCH_UNMAPPED_AREA
171 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
172
173 #define pgtable_cache_init() do { } while (0)
174 #define check_pgt_cache() do { } while (0)
175
176 #define PAGE_AGP PAGE_KERNEL_NOCACHE
177 #define HAVE_PAGE_AGP 1
178
179 /* fs/proc/kcore.c */
180 #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
181 #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
182
183 #define __HAVE_ARCH_PTE_SAME
184
185 #define vmemmap ((struct page *)VMEMMAP_START)
186
187 extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
188 extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
189
190 #endif /* !__ASSEMBLY__ */
191
192 #endif /* _ASM_X86_PGTABLE_64_H */