]>
Commit | Line | Data |
---|---|---|
c577b098 GP |
1 | /* |
2 | * fixmap.h: compile-time virtual memory allocation | |
3 | * | |
4 | * This file is subject to the terms and conditions of the GNU General Public | |
5 | * License. See the file "COPYING" in the main directory of this archive | |
6 | * for more details. | |
7 | * | |
8 | * Copyright (C) 1998 Ingo Molnar | |
9 | * | |
10 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
11 | * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009 | |
12 | */ | |
13 | ||
1965aae3 PA |
14 | #ifndef _ASM_X86_FIXMAP_H |
15 | #define _ASM_X86_FIXMAP_H | |
3ec96783 | 16 | |
c577b098 GP |
17 | #ifndef __ASSEMBLY__ |
18 | #include <linux/kernel.h> | |
19 | #include <asm/acpi.h> | |
20 | #include <asm/apicdef.h> | |
21 | #include <asm/page.h> | |
22 | #ifdef CONFIG_X86_32 | |
23 | #include <linux/threads.h> | |
24 | #include <asm/kmap_types.h> | |
25 | #else | |
f40c3300 | 26 | #include <uapi/asm/vsyscall.h> |
c577b098 GP |
27 | #endif |
28 | ||
29 | /* | |
30 | * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall | |
31 | * uses fixmaps that relies on FIXADDR_TOP for proper address calculation. | |
32 | * Because of this, FIXADDR_TOP x86 integration was left as later work. | |
33 | */ | |
34 | #ifdef CONFIG_X86_32 | |
35 | /* used by vmalloc.c, vsyscall.lds.S. | |
36 | * | |
37 | * Leave one empty page between vmalloc'ed areas and | |
38 | * the start of the fixmap. | |
39 | */ | |
40 | extern unsigned long __FIXADDR_TOP; | |
41 | #define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP) | |
c577b098 | 42 | #else |
f40c3300 AL |
43 | #define FIXADDR_TOP (round_up(VSYSCALL_ADDR + PAGE_SIZE, 1<<PMD_SHIFT) - \ |
44 | PAGE_SIZE) | |
c577b098 GP |
45 | #endif |
46 | ||
ef8813ab AL |
47 | /* |
48 | * cpu_entry_area is a percpu region in the fixmap that contains things | |
49 | * needed by the CPU and early entry/exit code. Real types aren't used | |
50 | * for all fields here to avoid circular header dependencies. | |
51 | * | |
52 | * Every field is a virtual alias of some other allocated backing store. | |
53 | * There is no direct allocation of a struct cpu_entry_area. | |
54 | */ | |
55 | struct cpu_entry_area { | |
56 | char gdt[PAGE_SIZE]; | |
72f5e08d AL |
57 | |
58 | /* | |
c482feef AL |
59 | * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as |
60 | * a a read-only guard page. | |
61 | */ | |
62 | struct SYSENTER_stack_page SYSENTER_stack_page; | |
63 | ||
64 | /* | |
65 | * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because | |
66 | * we need task switches to work, and task switches write to the TSS. | |
72f5e08d AL |
67 | */ |
68 | struct tss_struct tss; | |
3386bc8a AL |
69 | |
70 | char entry_trampoline[PAGE_SIZE]; | |
40e7f949 AL |
71 | |
72 | #ifdef CONFIG_X86_64 | |
73 | /* | |
74 | * Exception stacks used for IST entries. | |
75 | * | |
76 | * In the future, this should have a separate slot for each stack | |
77 | * with guard pages between them. | |
78 | */ | |
79 | char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; | |
80 | #endif | |
ef8813ab AL |
81 | }; |
82 | ||
83 | #define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE) | |
c577b098 | 84 | |
40e7f949 AL |
85 | extern void setup_cpu_entry_areas(void); |
86 | ||
c577b098 GP |
87 | /* |
88 | * Here we define all the compile-time 'special' virtual | |
89 | * addresses. The point is to have a constant address at | |
90 | * compile time, but to set the physical address only | |
91 | * in the boot process. | |
92 | * for x86_32: We allocate these special addresses | |
93 | * from the end of virtual memory (0xfffff000) backwards. | |
94 | * Also this lets us do fail-safe vmalloc(), we | |
95 | * can guarantee that these special addresses and | |
96 | * vmalloc()-ed addresses never overlap. | |
97 | * | |
98 | * These 'compile-time allocated' memory buffers are | |
99 | * fixed-size 4k pages (or larger if used with an increment | |
100 | * higher than 1). Use set_fixmap(idx,phys) to associate | |
101 | * physical memory with fixmap indices. | |
102 | * | |
103 | * TLB entries of such buffers will not be flushed across | |
104 | * task switches. | |
105 | */ | |
106 | enum fixed_addresses { | |
96a388de | 107 | #ifdef CONFIG_X86_32 |
c577b098 | 108 | FIX_HOLE, |
96a388de | 109 | #else |
1ad83c85 | 110 | #ifdef CONFIG_X86_VSYSCALL_EMULATION |
f40c3300 | 111 | VSYSCALL_PAGE = (FIXADDR_TOP - VSYSCALL_ADDR) >> PAGE_SHIFT, |
1ad83c85 | 112 | #endif |
96a388de | 113 | #endif |
c577b098 GP |
114 | FIX_DBGP_BASE, |
115 | FIX_EARLYCON_MEM_BASE, | |
ff30a054 JB |
116 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
117 | FIX_OHCI1394_BASE, | |
118 | #endif | |
c577b098 GP |
119 | #ifdef CONFIG_X86_LOCAL_APIC |
120 | FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */ | |
121 | #endif | |
122 | #ifdef CONFIG_X86_IO_APIC | |
123 | FIX_IO_APIC_BASE_0, | |
124 | FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1, | |
c577b098 | 125 | #endif |
4eefbe79 | 126 | FIX_RO_IDT, /* Virtual mapping for read-only IDT */ |
c577b098 GP |
127 | #ifdef CONFIG_X86_32 |
128 | FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ | |
129 | FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, | |
130 | #ifdef CONFIG_PCI_MMCONFIG | |
131 | FIX_PCIE_MCFG, | |
132 | #endif | |
133 | #endif | |
134 | #ifdef CONFIG_PARAVIRT | |
135 | FIX_PARAVIRT_BOOTMAP, | |
136 | #endif | |
12b9d7cc MD |
137 | FIX_TEXT_POKE1, /* reserve 2 pages for text_poke() */ |
138 | FIX_TEXT_POKE0, /* first page is last, because allocation is backward */ | |
933b9463 | 139 | #ifdef CONFIG_X86_INTEL_MID |
7309282c FT |
140 | FIX_LNW_VRTC, |
141 | #endif | |
69218e47 | 142 | /* Fixmap entries to remap the GDTs, one per processor. */ |
ef8813ab AL |
143 | FIX_CPU_ENTRY_AREA_TOP, |
144 | FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1, | |
69218e47 | 145 | |
4f89fa28 JM |
146 | #ifdef CONFIG_ACPI_APEI_GHES |
147 | /* Used for GHES mapping from assorted contexts */ | |
148 | FIX_APEI_GHES_IRQ, | |
149 | FIX_APEI_GHES_NMI, | |
150 | #endif | |
151 | ||
fa36e956 AV |
152 | __end_of_permanent_fixed_addresses, |
153 | ||
c577b098 | 154 | /* |
3eddc69f | 155 | * 512 temporary boot-time mappings, used by early_ioremap(), |
c577b098 GP |
156 | * before ioremap() is functional. |
157 | * | |
3eddc69f | 158 | * If necessary we round it up to the next 512 pages boundary so |
499a5f1e | 159 | * that we can have a single pgd entry and a single pte table: |
c577b098 GP |
160 | */ |
161 | #define NR_FIX_BTMAPS 64 | |
3eddc69f | 162 | #define FIX_BTMAPS_SLOTS 8 |
499a5f1e JB |
163 | #define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) |
164 | FIX_BTMAP_END = | |
165 | (__end_of_permanent_fixed_addresses ^ | |
166 | (__end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - 1)) & | |
167 | -PTRS_PER_PTE | |
168 | ? __end_of_permanent_fixed_addresses + TOTAL_FIX_BTMAPS - | |
169 | (__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1)) | |
170 | : __end_of_permanent_fixed_addresses, | |
171 | FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, | |
c577b098 GP |
172 | #ifdef CONFIG_X86_32 |
173 | FIX_WP_TEST, | |
31625340 JC |
174 | #endif |
175 | #ifdef CONFIG_INTEL_TXT | |
176 | FIX_TBOOT_BASE, | |
c577b098 GP |
177 | #endif |
178 | __end_of_fixed_addresses | |
179 | }; | |
180 | ||
181 | ||
182 | extern void reserve_top_address(unsigned long reserve); | |
183 | ||
184 | #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) | |
c577b098 | 185 | #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) |
3ec96783 | 186 | |
7c7e6e07 JF |
187 | extern int fixmaps_set; |
188 | ||
fd940934 | 189 | extern pte_t *kmap_pte; |
7c360572 | 190 | #define kmap_prot PAGE_KERNEL |
fd940934 KP |
191 | extern pte_t *pkmap_page_table; |
192 | ||
aeaaa59c JF |
193 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte); |
194 | void native_set_fixmap(enum fixed_addresses idx, | |
3b3809ac | 195 | phys_addr_t phys, pgprot_t flags); |
aeaaa59c JF |
196 | |
197 | #ifndef CONFIG_PARAVIRT | |
198 | static inline void __set_fixmap(enum fixed_addresses idx, | |
3b3809ac | 199 | phys_addr_t phys, pgprot_t flags) |
aeaaa59c JF |
200 | { |
201 | native_set_fixmap(idx, phys, flags); | |
202 | } | |
203 | #endif | |
204 | ||
21729f81 TL |
205 | /* |
206 | * FIXMAP_PAGE_NOCACHE is used for MMIO. Memory encryption is not | |
207 | * supported for MMIO addresses, so make sure that the memory encryption | |
208 | * mask is not part of the page attributes. | |
209 | */ | |
210 | #define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_IO_NOCACHE | |
211 | ||
f88a68fa TL |
212 | /* |
213 | * Early memremap routines used for in-place encryption. The mappings created | |
214 | * by these routines are intended to be used as temporary mappings. | |
215 | */ | |
216 | void __init *early_memremap_encrypted(resource_size_t phys_addr, | |
217 | unsigned long size); | |
218 | void __init *early_memremap_encrypted_wp(resource_size_t phys_addr, | |
219 | unsigned long size); | |
220 | void __init *early_memremap_decrypted(resource_size_t phys_addr, | |
221 | unsigned long size); | |
222 | void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, | |
223 | unsigned long size); | |
224 | ||
114cefc8 | 225 | #include <asm-generic/fixmap.h> |
5a47c7da | 226 | |
5b7c73e0 MS |
227 | #define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags) |
228 | #define __late_clear_fixmap(idx) __set_fixmap(idx, 0, __pgprot(0)) | |
229 | ||
230 | void __early_set_fixmap(enum fixed_addresses idx, | |
231 | phys_addr_t phys, pgprot_t flags); | |
232 | ||
ef8813ab AL |
233 | static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page) |
234 | { | |
235 | BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0); | |
236 | ||
237 | return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page; | |
238 | } | |
239 | ||
240 | #define __get_cpu_entry_area_offset_index(cpu, offset) ({ \ | |
241 | BUILD_BUG_ON(offset % PAGE_SIZE != 0); \ | |
242 | __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \ | |
243 | }) | |
244 | ||
245 | #define get_cpu_entry_area_index(cpu, field) \ | |
246 | __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field)) | |
247 | ||
248 | static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) | |
249 | { | |
250 | return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0)); | |
251 | } | |
252 | ||
0f9a4810 AL |
253 | static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu) |
254 | { | |
c482feef | 255 | return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack; |
0f9a4810 AL |
256 | } |
257 | ||
c577b098 | 258 | #endif /* !__ASSEMBLY__ */ |
1965aae3 | 259 | #endif /* _ASM_X86_FIXMAP_H */ |