]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/x86/kernel/vmlinux.lds.S
a349dbfc6d5ab47b2f8963bacd24915a963cb2a2
[thirdparty/linux.git] / arch / x86 / kernel / vmlinux.lds.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * ld script for the x86 kernel
4 *
5 * Historic 32-bit version written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6 *
7 * Modernisation, unification and other changes and fixes:
8 * Copyright (C) 2007-2009 Sam Ravnborg <sam@ravnborg.org>
9 *
10 *
11 * Don't define absolute symbols until and unless you know that symbol
12 * value is should remain constant even if kernel image is relocated
13 * at run time. Absolute symbols are not relocated. If symbol value should
14 * change if kernel is relocated, make the symbol section relative and
15 * put it inside the section definition.
16 */
17
18 #ifdef CONFIG_X86_32
19 #define LOAD_OFFSET __PAGE_OFFSET
20 #else
21 #define LOAD_OFFSET __START_KERNEL_map
22 #endif
23
24 #define RUNTIME_DISCARD_EXIT
25 #define EMITS_PT_NOTE
26 #define RO_EXCEPTION_TABLE_ALIGN 16
27
28 #include <asm-generic/vmlinux.lds.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/thread_info.h>
31 #include <asm/page_types.h>
32 #include <asm/orc_lookup.h>
33 #include <asm/cache.h>
34 #include <asm/boot.h>
35
36 #undef i386 /* in case the preprocessor is a 32bit one */
37
38 OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
39
40 #ifdef CONFIG_X86_32
41 OUTPUT_ARCH(i386)
42 ENTRY(phys_startup_32)
43 #else
44 OUTPUT_ARCH(i386:x86-64)
45 ENTRY(phys_startup_64)
46 #endif
47
48 jiffies = jiffies_64;
49
50 #if defined(CONFIG_X86_64)
51 /*
52 * On 64-bit, align RODATA to 2MB so we retain large page mappings for
53 * boundaries spanning kernel text, rodata and data sections.
54 *
55 * However, kernel identity mappings will have different RWX permissions
56 * to the pages mapping to text and to the pages padding (which are freed) the
57 * text section. Hence kernel identity mappings will be broken to smaller
58 * pages. For 64-bit, kernel text and kernel identity mappings are different,
59 * so we can enable protection checks as well as retain 2MB large page
60 * mappings for kernel text.
61 */
62 #define X86_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
63
64 #define X86_ALIGN_RODATA_END \
65 . = ALIGN(HPAGE_SIZE); \
66 __end_rodata_hpage_align = .; \
67 __end_rodata_aligned = .;
68
69 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
70 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
71
72 /*
73 * This section contains data which will be mapped as decrypted. Memory
74 * encryption operates on a page basis. Make this section PMD-aligned
75 * to avoid splitting the pages while mapping the section early.
76 *
77 * Note: We use a separate section so that only this section gets
78 * decrypted to avoid exposing more than we wish.
79 */
80 #define BSS_DECRYPTED \
81 . = ALIGN(PMD_SIZE); \
82 __start_bss_decrypted = .; \
83 *(.bss..decrypted); \
84 . = ALIGN(PAGE_SIZE); \
85 __start_bss_decrypted_unused = .; \
86 . = ALIGN(PMD_SIZE); \
87 __end_bss_decrypted = .; \
88
89 #else
90
91 #define X86_ALIGN_RODATA_BEGIN
92 #define X86_ALIGN_RODATA_END \
93 . = ALIGN(PAGE_SIZE); \
94 __end_rodata_aligned = .;
95
96 #define ALIGN_ENTRY_TEXT_BEGIN
97 #define ALIGN_ENTRY_TEXT_END
98 #define BSS_DECRYPTED
99
100 #endif
101
102 PHDRS {
103 text PT_LOAD FLAGS(5); /* R_E */
104 data PT_LOAD FLAGS(6); /* RW_ */
105 #ifdef CONFIG_X86_64
106 #ifdef CONFIG_SMP
107 percpu PT_LOAD FLAGS(6); /* RW_ */
108 #endif
109 init PT_LOAD FLAGS(7); /* RWE */
110 #endif
111 note PT_NOTE FLAGS(0); /* ___ */
112 }
113
114 SECTIONS
115 {
116 #ifdef CONFIG_X86_32
117 . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
118 phys_startup_32 = ABSOLUTE(startup_32 - LOAD_OFFSET);
119 #else
120 . = __START_KERNEL;
121 phys_startup_64 = ABSOLUTE(startup_64 - LOAD_OFFSET);
122 #endif
123
124 /* Text and read-only data */
125 .text : AT(ADDR(.text) - LOAD_OFFSET) {
126 _text = .;
127 _stext = .;
128 /* bootstrapping code */
129 HEAD_TEXT
130 TEXT_TEXT
131 SCHED_TEXT
132 LOCK_TEXT
133 KPROBES_TEXT
134 SOFTIRQENTRY_TEXT
135 #ifdef CONFIG_RETPOLINE
136 *(.text..__x86.indirect_thunk)
137 *(.text..__x86.return_thunk)
138 #endif
139 STATIC_CALL_TEXT
140
141 ALIGN_ENTRY_TEXT_BEGIN
142 *(.text..__x86.rethunk_untrain)
143 ENTRY_TEXT
144
145 #ifdef CONFIG_CPU_SRSO
146 /*
147 * See the comment above srso_alias_untrain_ret()'s
148 * definition.
149 */
150 . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
151 *(.text..__x86.rethunk_safe)
152 #endif
153 ALIGN_ENTRY_TEXT_END
154 *(.gnu.warning)
155
156 } :text = 0xcccccccc
157
158 /* End of text section, which should occupy whole number of pages */
159 _etext = .;
160 . = ALIGN(PAGE_SIZE);
161
162 X86_ALIGN_RODATA_BEGIN
163 RO_DATA(PAGE_SIZE)
164 X86_ALIGN_RODATA_END
165
166 /* Data */
167 .data : AT(ADDR(.data) - LOAD_OFFSET) {
168 /* Start of data section */
169 _sdata = .;
170
171 /* init_task */
172 INIT_TASK_DATA(THREAD_SIZE)
173
174 #ifdef CONFIG_X86_32
175 /* 32 bit has nosave before _edata */
176 NOSAVE_DATA
177 #endif
178
179 PAGE_ALIGNED_DATA(PAGE_SIZE)
180
181 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
182
183 DATA_DATA
184 CONSTRUCTORS
185
186 /* rarely changed data like cpu maps */
187 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
188
189 /* End of data section */
190 _edata = .;
191 } :data
192
193 BUG_TABLE
194
195 ORC_UNWIND_TABLE
196
197 . = ALIGN(PAGE_SIZE);
198 __vvar_page = .;
199
200 .vvar : AT(ADDR(.vvar) - LOAD_OFFSET) {
201 /* work around gold bug 13023 */
202 __vvar_beginning_hack = .;
203
204 /* Place all vvars at the offsets in asm/vvar.h. */
205 #define EMIT_VVAR(name, offset) \
206 . = __vvar_beginning_hack + offset; \
207 *(.vvar_ ## name)
208 #include <asm/vvar.h>
209 #undef EMIT_VVAR
210
211 /*
212 * Pad the rest of the page with zeros. Otherwise the loader
213 * can leave garbage here.
214 */
215 . = __vvar_beginning_hack + PAGE_SIZE;
216 } :data
217
218 . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE);
219
220 /* Init code and data - will be freed after init */
221 . = ALIGN(PAGE_SIZE);
222 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
223 __init_begin = .; /* paired with __init_end */
224 }
225
226 #if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
227 /*
228 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
229 * output PHDR, so the next output section - .init.text - should
230 * start another segment - init.
231 */
232 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
233 ASSERT(SIZEOF(.data..percpu) < CONFIG_PHYSICAL_START,
234 "per-CPU data too large - increase CONFIG_PHYSICAL_START")
235 #endif
236
237 INIT_TEXT_SECTION(PAGE_SIZE)
238 #ifdef CONFIG_X86_64
239 :init
240 #endif
241
242 /*
243 * Section for code used exclusively before alternatives are run. All
244 * references to such code must be patched out by alternatives, normally
245 * by using X86_FEATURE_ALWAYS CPU feature bit.
246 *
247 * See static_cpu_has() for an example.
248 */
249 .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
250 *(.altinstr_aux)
251 }
252
253 INIT_DATA_SECTION(16)
254
255 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
256 __x86_cpu_dev_start = .;
257 *(.x86_cpu_dev.init)
258 __x86_cpu_dev_end = .;
259 }
260
261 #ifdef CONFIG_X86_INTEL_MID
262 .x86_intel_mid_dev.init : AT(ADDR(.x86_intel_mid_dev.init) - \
263 LOAD_OFFSET) {
264 __x86_intel_mid_dev_start = .;
265 *(.x86_intel_mid_dev.init)
266 __x86_intel_mid_dev_end = .;
267 }
268 #endif
269
270 #ifdef CONFIG_RETPOLINE
271 /*
272 * List of instructions that call/jmp/jcc to retpoline thunks
273 * __x86_indirect_thunk_*(). These instructions can be patched along
274 * with alternatives, after which the section can be freed.
275 */
276 . = ALIGN(8);
277 .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) {
278 __retpoline_sites = .;
279 *(.retpoline_sites)
280 __retpoline_sites_end = .;
281 }
282
283 . = ALIGN(8);
284 .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
285 __return_sites = .;
286 *(.return_sites)
287 __return_sites_end = .;
288 }
289
290 . = ALIGN(8);
291 .call_sites : AT(ADDR(.call_sites) - LOAD_OFFSET) {
292 __call_sites = .;
293 *(.call_sites)
294 __call_sites_end = .;
295 }
296 #endif
297
298 #ifdef CONFIG_X86_KERNEL_IBT
299 . = ALIGN(8);
300 .ibt_endbr_seal : AT(ADDR(.ibt_endbr_seal) - LOAD_OFFSET) {
301 __ibt_endbr_seal = .;
302 *(.ibt_endbr_seal)
303 __ibt_endbr_seal_end = .;
304 }
305 #endif
306
307 #ifdef CONFIG_FINEIBT
308 . = ALIGN(8);
309 .cfi_sites : AT(ADDR(.cfi_sites) - LOAD_OFFSET) {
310 __cfi_sites = .;
311 *(.cfi_sites)
312 __cfi_sites_end = .;
313 }
314 #endif
315
316 /*
317 * struct alt_inst entries. From the header (alternative.h):
318 * "Alternative instructions for different CPU types or capabilities"
319 * Think locking instructions on spinlocks.
320 */
321 . = ALIGN(8);
322 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
323 __alt_instructions = .;
324 *(.altinstructions)
325 __alt_instructions_end = .;
326 }
327
328 /*
329 * And here are the replacement instructions. The linker sticks
330 * them as binary blobs. The .altinstructions has enough data to
331 * get the address and the length of them to patch the kernel safely.
332 */
333 .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
334 *(.altinstr_replacement)
335 }
336
337 . = ALIGN(8);
338 .apicdrivers : AT(ADDR(.apicdrivers) - LOAD_OFFSET) {
339 __apicdrivers = .;
340 *(.apicdrivers);
341 __apicdrivers_end = .;
342 }
343
344 . = ALIGN(8);
345 /*
346 * .exit.text is discarded at runtime, not link time, to deal with
347 * references from .altinstructions
348 */
349 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
350 EXIT_TEXT
351 }
352
353 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
354 EXIT_DATA
355 }
356
357 #if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
358 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
359 #endif
360
361 . = ALIGN(PAGE_SIZE);
362
363 /* freed after init ends here */
364 .init.end : AT(ADDR(.init.end) - LOAD_OFFSET) {
365 __init_end = .;
366 }
367
368 /*
369 * smp_locks might be freed after init
370 * start/end must be page aligned
371 */
372 . = ALIGN(PAGE_SIZE);
373 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
374 __smp_locks = .;
375 *(.smp_locks)
376 . = ALIGN(PAGE_SIZE);
377 __smp_locks_end = .;
378 }
379
380 #ifdef CONFIG_X86_64
381 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
382 NOSAVE_DATA
383 }
384 #endif
385
386 /* BSS */
387 . = ALIGN(PAGE_SIZE);
388 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
389 __bss_start = .;
390 *(.bss..page_aligned)
391 . = ALIGN(PAGE_SIZE);
392 *(BSS_MAIN)
393 BSS_DECRYPTED
394 . = ALIGN(PAGE_SIZE);
395 __bss_stop = .;
396 }
397
398 /*
399 * The memory occupied from _text to here, __end_of_kernel_reserve, is
400 * automatically reserved in setup_arch(). Anything after here must be
401 * explicitly reserved using memblock_reserve() or it will be discarded
402 * and treated as available memory.
403 */
404 __end_of_kernel_reserve = .;
405
406 . = ALIGN(PAGE_SIZE);
407 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
408 __brk_base = .;
409 . += 64 * 1024; /* 64k alignment slop space */
410 *(.bss..brk) /* areas brk users have reserved */
411 __brk_limit = .;
412 }
413
414 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
415 _end = .;
416
417 #ifdef CONFIG_AMD_MEM_ENCRYPT
418 /*
419 * Early scratch/workarea section: Lives outside of the kernel proper
420 * (_text - _end).
421 *
422 * Resides after _end because even though the .brk section is after
423 * __end_of_kernel_reserve, the .brk section is later reserved as a
424 * part of the kernel. Since it is located after __end_of_kernel_reserve
425 * it will be discarded and become part of the available memory. As
426 * such, it can only be used by very early boot code and must not be
427 * needed afterwards.
428 *
429 * Currently used by SME for performing in-place encryption of the
430 * kernel during boot. Resides on a 2MB boundary to simplify the
431 * pagetable setup used for SME in-place encryption.
432 */
433 . = ALIGN(HPAGE_SIZE);
434 .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
435 __init_scratch_begin = .;
436 *(.init.scratch)
437 . = ALIGN(HPAGE_SIZE);
438 __init_scratch_end = .;
439 }
440 #endif
441
442 STABS_DEBUG
443 DWARF_DEBUG
444 ELF_DETAILS
445
446 DISCARDS
447
448 /*
449 * Make sure that the .got.plt is either completely empty or it
450 * contains only the lazy dispatch entries.
451 */
452 .got.plt (INFO) : { *(.got.plt) }
453 ASSERT(SIZEOF(.got.plt) == 0 ||
454 #ifdef CONFIG_X86_64
455 SIZEOF(.got.plt) == 0x18,
456 #else
457 SIZEOF(.got.plt) == 0xc,
458 #endif
459 "Unexpected GOT/PLT entries detected!")
460
461 /*
462 * Sections that should stay zero sized, which is safer to
463 * explicitly check instead of blindly discarding.
464 */
465 .got : {
466 *(.got) *(.igot.*)
467 }
468 ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
469
470 .plt : {
471 *(.plt) *(.plt.*) *(.iplt)
472 }
473 ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
474
475 .rel.dyn : {
476 *(.rel.*) *(.rel_*)
477 }
478 ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
479
480 .rela.dyn : {
481 *(.rela.*) *(.rela_*)
482 }
483 ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
484 }
485
486 /*
487 * The ASSERT() sink to . is intentional, for binutils 2.14 compatibility:
488 */
489 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
490 "kernel image bigger than KERNEL_IMAGE_SIZE");
491
492 #ifdef CONFIG_X86_64
493 /*
494 * Per-cpu symbols which need to be offset from __per_cpu_load
495 * for the boot processor.
496 */
497 #define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
498 INIT_PER_CPU(gdt_page);
499 INIT_PER_CPU(fixed_percpu_data);
500 INIT_PER_CPU(irq_stack_backing_store);
501
502 #ifdef CONFIG_SMP
503 . = ASSERT((fixed_percpu_data == 0),
504 "fixed_percpu_data is not at start of per-cpu area");
505 #endif
506
507 #ifdef CONFIG_CPU_UNRET_ENTRY
508 . = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
509 #endif
510
511 #ifdef CONFIG_CPU_SRSO
512 . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
513 /*
514 * GNU ld cannot do XOR until 2.41.
515 * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
516 *
517 * LLVM lld cannot do XOR until lld-17.
518 * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
519 *
520 * Instead do: (A | B) - (A & B) in order to compute the XOR
521 * of the two function addresses:
522 */
523 . = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
524 (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
525 "SRSO function pair won't alias");
526 #endif
527
528 #endif /* CONFIG_X86_64 */