]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/x86/boot/compressed/head_64.S
Merge tag 'linux-watchdog-5.7-rc2' of git://www.linux-watchdog.org/linux-watchdog
[thirdparty/linux.git] / arch / x86 / boot / compressed / head_64.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * linux/boot/head.S
4 *
5 * Copyright (C) 1991, 1992, 1993 Linus Torvalds
6 */
7
8 /*
9 * head.S contains the 32-bit startup code.
10 *
11 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
12 * the page directory will exist. The startup code will be overwritten by
13 * the page directory. [According to comments etc elsewhere on a compressed
14 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
15 *
16 * Page 0 is deliberately kept safe, since System Management Mode code in
17 * laptops may need to access the BIOS data stored there. This is also
18 * useful for future device drivers that either access the BIOS via VM86
19 * mode.
20 */
21
22 /*
23 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
24 */
25 .code32
26 .text
27
28 #include <linux/init.h>
29 #include <linux/linkage.h>
30 #include <asm/segment.h>
31 #include <asm/boot.h>
32 #include <asm/msr.h>
33 #include <asm/processor-flags.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/bootparam.h>
36 #include "pgtable.h"
37
38 /*
39 * Locally defined symbols should be marked hidden:
40 */
41 .hidden _bss
42 .hidden _ebss
43 .hidden _got
44 .hidden _egot
45
46 __HEAD
47 .code32
48 SYM_FUNC_START(startup_32)
49 /*
50 * 32bit entry is 0 and it is ABI so immutable!
51 * If we come here directly from a bootloader,
52 * kernel(text+data+bss+brk) ramdisk, zero_page, command line
53 * all need to be under the 4G limit.
54 */
55 cld
56 cli
57
58 /*
59 * Calculate the delta between where we were compiled to run
60 * at and where we were actually loaded at. This can only be done
61 * with a short local call on x86. Nothing else will tell us what
62 * address we are running at. The reserved chunk of the real-mode
63 * data at 0x1e4 (defined as a scratch field) are used as the stack
64 * for this calculation. Only 4 bytes are needed.
65 */
66 leal (BP_scratch+4)(%esi), %esp
67 call 1f
68 1: popl %ebp
69 subl $1b, %ebp
70
71 /* Load new GDT with the 64bit segments using 32bit descriptor */
72 leal gdt(%ebp), %eax
73 movl %eax, 2(%eax)
74 lgdt (%eax)
75
76 /* Load segment registers with our descriptors */
77 movl $__BOOT_DS, %eax
78 movl %eax, %ds
79 movl %eax, %es
80 movl %eax, %fs
81 movl %eax, %gs
82 movl %eax, %ss
83
84 /* setup a stack and make sure cpu supports long mode. */
85 leal boot_stack_end(%ebp), %esp
86
87 call verify_cpu
88 testl %eax, %eax
89 jnz .Lno_longmode
90
91 /*
92 * Compute the delta between where we were compiled to run at
93 * and where the code will actually run at.
94 *
95 * %ebp contains the address we are loaded at by the boot loader and %ebx
96 * contains the address where we should move the kernel image temporarily
97 * for safe in-place decompression.
98 */
99
100 #ifdef CONFIG_RELOCATABLE
101 movl %ebp, %ebx
102
103 #ifdef CONFIG_EFI_STUB
104 /*
105 * If we were loaded via the EFI LoadImage service, startup_32 will be at an
106 * offset to the start of the space allocated for the image. efi_pe_entry will
107 * set up image_offset to tell us where the image actually starts, so that we
108 * can use the full available buffer.
109 * image_offset = startup_32 - image_base
110 * Otherwise image_offset will be zero and has no effect on the calculations.
111 */
112 subl image_offset(%ebp), %ebx
113 #endif
114
115 movl BP_kernel_alignment(%esi), %eax
116 decl %eax
117 addl %eax, %ebx
118 notl %eax
119 andl %eax, %ebx
120 cmpl $LOAD_PHYSICAL_ADDR, %ebx
121 jae 1f
122 #endif
123 movl $LOAD_PHYSICAL_ADDR, %ebx
124 1:
125
126 /* Target address to relocate to for decompression */
127 addl BP_init_size(%esi), %ebx
128 subl $_end, %ebx
129
130 /*
131 * Prepare for entering 64 bit mode
132 */
133
134 /* Enable PAE mode */
135 movl %cr4, %eax
136 orl $X86_CR4_PAE, %eax
137 movl %eax, %cr4
138
139 /*
140 * Build early 4G boot pagetable
141 */
142 /*
143 * If SEV is active then set the encryption mask in the page tables.
144 * This will insure that when the kernel is copied and decompressed
145 * it will be done so encrypted.
146 */
147 call get_sev_encryption_bit
148 xorl %edx, %edx
149 testl %eax, %eax
150 jz 1f
151 subl $32, %eax /* Encryption bit is always above bit 31 */
152 bts %eax, %edx /* Set encryption mask for page tables */
153 1:
154
155 /* Initialize Page tables to 0 */
156 leal pgtable(%ebx), %edi
157 xorl %eax, %eax
158 movl $(BOOT_INIT_PGT_SIZE/4), %ecx
159 rep stosl
160
161 /* Build Level 4 */
162 leal pgtable + 0(%ebx), %edi
163 leal 0x1007 (%edi), %eax
164 movl %eax, 0(%edi)
165 addl %edx, 4(%edi)
166
167 /* Build Level 3 */
168 leal pgtable + 0x1000(%ebx), %edi
169 leal 0x1007(%edi), %eax
170 movl $4, %ecx
171 1: movl %eax, 0x00(%edi)
172 addl %edx, 0x04(%edi)
173 addl $0x00001000, %eax
174 addl $8, %edi
175 decl %ecx
176 jnz 1b
177
178 /* Build Level 2 */
179 leal pgtable + 0x2000(%ebx), %edi
180 movl $0x00000183, %eax
181 movl $2048, %ecx
182 1: movl %eax, 0(%edi)
183 addl %edx, 4(%edi)
184 addl $0x00200000, %eax
185 addl $8, %edi
186 decl %ecx
187 jnz 1b
188
189 /* Enable the boot page tables */
190 leal pgtable(%ebx), %eax
191 movl %eax, %cr3
192
193 /* Enable Long mode in EFER (Extended Feature Enable Register) */
194 movl $MSR_EFER, %ecx
195 rdmsr
196 btsl $_EFER_LME, %eax
197 wrmsr
198
199 /* After gdt is loaded */
200 xorl %eax, %eax
201 lldt %ax
202 movl $__BOOT_TSS, %eax
203 ltr %ax
204
205 /*
206 * Setup for the jump to 64bit mode
207 *
208 * When the jump is performend we will be in long mode but
209 * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
210 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
211 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
212 * We place all of the values on our mini stack so lret can
213 * used to perform that far jump.
214 */
215 pushl $__KERNEL_CS
216 leal startup_64(%ebp), %eax
217 #ifdef CONFIG_EFI_MIXED
218 movl efi32_boot_args(%ebp), %edi
219 cmp $0, %edi
220 jz 1f
221 leal efi64_stub_entry(%ebp), %eax
222 movl efi32_boot_args+4(%ebp), %esi
223 movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer
224 cmpl $0, %edx
225 jnz 1f
226 leal efi_pe_entry(%ebp), %eax
227 movl %edi, %ecx // MS calling convention
228 movl %esi, %edx
229 1:
230 #endif
231 pushl %eax
232
233 /* Enter paged protected Mode, activating Long Mode */
234 movl $(X86_CR0_PG | X86_CR0_PE), %eax /* Enable Paging and Protected mode */
235 movl %eax, %cr0
236
237 /* Jump from 32bit compatibility mode into 64bit mode. */
238 lret
239 SYM_FUNC_END(startup_32)
240
241 #ifdef CONFIG_EFI_MIXED
242 .org 0x190
243 SYM_FUNC_START(efi32_stub_entry)
244 add $0x4, %esp /* Discard return address */
245 popl %ecx
246 popl %edx
247 popl %esi
248
249 call 1f
250 1: pop %ebp
251 subl $1b, %ebp
252
253 movl %esi, efi32_boot_args+8(%ebp)
254 SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
255 movl %ecx, efi32_boot_args(%ebp)
256 movl %edx, efi32_boot_args+4(%ebp)
257 movb $0, efi_is64(%ebp)
258
259 /* Save firmware GDTR and code/data selectors */
260 sgdtl efi32_boot_gdt(%ebp)
261 movw %cs, efi32_boot_cs(%ebp)
262 movw %ds, efi32_boot_ds(%ebp)
263
264 /* Disable paging */
265 movl %cr0, %eax
266 btrl $X86_CR0_PG_BIT, %eax
267 movl %eax, %cr0
268
269 jmp startup_32
270 SYM_FUNC_END(efi32_stub_entry)
271 #endif
272
273 .code64
274 .org 0x200
275 SYM_CODE_START(startup_64)
276 /*
277 * 64bit entry is 0x200 and it is ABI so immutable!
278 * We come here either from startup_32 or directly from a
279 * 64bit bootloader.
280 * If we come here from a bootloader, kernel(text+data+bss+brk),
281 * ramdisk, zero_page, command line could be above 4G.
282 * We depend on an identity mapped page table being provided
283 * that maps our entire kernel(text+data+bss+brk), zero page
284 * and command line.
285 */
286
287 cld
288 cli
289
290 /* Setup data segments. */
291 xorl %eax, %eax
292 movl %eax, %ds
293 movl %eax, %es
294 movl %eax, %ss
295 movl %eax, %fs
296 movl %eax, %gs
297
298 /*
299 * Compute the decompressed kernel start address. It is where
300 * we were loaded at aligned to a 2M boundary. %rbp contains the
301 * decompressed kernel start address.
302 *
303 * If it is a relocatable kernel then decompress and run the kernel
304 * from load address aligned to 2MB addr, otherwise decompress and
305 * run the kernel from LOAD_PHYSICAL_ADDR
306 *
307 * We cannot rely on the calculation done in 32-bit mode, since we
308 * may have been invoked via the 64-bit entry point.
309 */
310
311 /* Start with the delta to where the kernel will run at. */
312 #ifdef CONFIG_RELOCATABLE
313 leaq startup_32(%rip) /* - $startup_32 */, %rbp
314
315 #ifdef CONFIG_EFI_STUB
316 /*
317 * If we were loaded via the EFI LoadImage service, startup_32 will be at an
318 * offset to the start of the space allocated for the image. efi_pe_entry will
319 * set up image_offset to tell us where the image actually starts, so that we
320 * can use the full available buffer.
321 * image_offset = startup_32 - image_base
322 * Otherwise image_offset will be zero and has no effect on the calculations.
323 */
324 movl image_offset(%rip), %eax
325 subq %rax, %rbp
326 #endif
327
328 movl BP_kernel_alignment(%rsi), %eax
329 decl %eax
330 addq %rax, %rbp
331 notq %rax
332 andq %rax, %rbp
333 cmpq $LOAD_PHYSICAL_ADDR, %rbp
334 jae 1f
335 #endif
336 movq $LOAD_PHYSICAL_ADDR, %rbp
337 1:
338
339 /* Target address to relocate to for decompression */
340 movl BP_init_size(%rsi), %ebx
341 subl $_end, %ebx
342 addq %rbp, %rbx
343
344 /* Set up the stack */
345 leaq boot_stack_end(%rbx), %rsp
346
347 /*
348 * paging_prepare() and cleanup_trampoline() below can have GOT
349 * references. Adjust the table with address we are running at.
350 *
351 * Zero RAX for adjust_got: the GOT was not adjusted before;
352 * there's no adjustment to undo.
353 */
354 xorq %rax, %rax
355
356 /*
357 * Calculate the address the binary is loaded at and use it as
358 * a GOT adjustment.
359 */
360 call 1f
361 1: popq %rdi
362 subq $1b, %rdi
363
364 call .Ladjust_got
365
366 /*
367 * At this point we are in long mode with 4-level paging enabled,
368 * but we might want to enable 5-level paging or vice versa.
369 *
370 * The problem is that we cannot do it directly. Setting or clearing
371 * CR4.LA57 in long mode would trigger #GP. So we need to switch off
372 * long mode and paging first.
373 *
374 * We also need a trampoline in lower memory to switch over from
375 * 4- to 5-level paging for cases when the bootloader puts the kernel
376 * above 4G, but didn't enable 5-level paging for us.
377 *
378 * The same trampoline can be used to switch from 5- to 4-level paging
379 * mode, like when starting 4-level paging kernel via kexec() when
380 * original kernel worked in 5-level paging mode.
381 *
382 * For the trampoline, we need the top page table to reside in lower
383 * memory as we don't have a way to load 64-bit values into CR3 in
384 * 32-bit mode.
385 *
386 * We go though the trampoline even if we don't have to: if we're
387 * already in a desired paging mode. This way the trampoline code gets
388 * tested on every boot.
389 */
390
391 /* Make sure we have GDT with 32-bit code segment */
392 leaq gdt64(%rip), %rax
393 addq %rax, 2(%rax)
394 lgdt (%rax)
395
396 /*
397 * paging_prepare() sets up the trampoline and checks if we need to
398 * enable 5-level paging.
399 *
400 * paging_prepare() returns a two-quadword structure which lands
401 * into RDX:RAX:
402 * - Address of the trampoline is returned in RAX.
403 * - Non zero RDX means trampoline needs to enable 5-level
404 * paging.
405 *
406 * RSI holds real mode data and needs to be preserved across
407 * this function call.
408 */
409 pushq %rsi
410 movq %rsi, %rdi /* real mode address */
411 call paging_prepare
412 popq %rsi
413
414 /* Save the trampoline address in RCX */
415 movq %rax, %rcx
416
417 /*
418 * Load the address of trampoline_return() into RDI.
419 * It will be used by the trampoline to return to the main code.
420 */
421 leaq trampoline_return(%rip), %rdi
422
423 /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
424 pushq $__KERNEL32_CS
425 leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
426 pushq %rax
427 lretq
428 trampoline_return:
429 /* Restore the stack, the 32-bit trampoline uses its own stack */
430 leaq boot_stack_end(%rbx), %rsp
431
432 /*
433 * cleanup_trampoline() would restore trampoline memory.
434 *
435 * RDI is address of the page table to use instead of page table
436 * in trampoline memory (if required).
437 *
438 * RSI holds real mode data and needs to be preserved across
439 * this function call.
440 */
441 pushq %rsi
442 leaq top_pgtable(%rbx), %rdi
443 call cleanup_trampoline
444 popq %rsi
445
446 /* Zero EFLAGS */
447 pushq $0
448 popfq
449
450 /*
451 * Previously we've adjusted the GOT with address the binary was
452 * loaded at. Now we need to re-adjust for relocation address.
453 *
454 * Calculate the address the binary is loaded at, so that we can
455 * undo the previous GOT adjustment.
456 */
457 call 1f
458 1: popq %rax
459 subq $1b, %rax
460
461 /* The new adjustment is the relocation address */
462 movq %rbx, %rdi
463 call .Ladjust_got
464
465 /*
466 * Copy the compressed kernel to the end of our buffer
467 * where decompression in place becomes safe.
468 */
469 pushq %rsi
470 leaq (_bss-8)(%rip), %rsi
471 leaq (_bss-8)(%rbx), %rdi
472 movq $_bss /* - $startup_32 */, %rcx
473 shrq $3, %rcx
474 std
475 rep movsq
476 cld
477 popq %rsi
478
479 /*
480 * The GDT may get overwritten either during the copy we just did or
481 * during extract_kernel below. To avoid any issues, repoint the GDTR
482 * to the new copy of the GDT.
483 */
484 leaq gdt64(%rbx), %rax
485 leaq gdt(%rbx), %rdx
486 movq %rdx, 2(%rax)
487 lgdt (%rax)
488
489 /*
490 * Jump to the relocated address.
491 */
492 leaq .Lrelocated(%rbx), %rax
493 jmp *%rax
494 SYM_CODE_END(startup_64)
495
496 #ifdef CONFIG_EFI_STUB
497 .org 0x390
498 SYM_FUNC_START(efi64_stub_entry)
499 SYM_FUNC_START_ALIAS(efi_stub_entry)
500 and $~0xf, %rsp /* realign the stack */
501 movq %rdx, %rbx /* save boot_params pointer */
502 call efi_main
503 movq %rbx,%rsi
504 leaq startup_64(%rax), %rax
505 jmp *%rax
506 SYM_FUNC_END(efi64_stub_entry)
507 SYM_FUNC_END_ALIAS(efi_stub_entry)
508 #endif
509
510 .text
511 SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
512
513 /*
514 * Clear BSS (stack is currently empty)
515 */
516 xorl %eax, %eax
517 leaq _bss(%rip), %rdi
518 leaq _ebss(%rip), %rcx
519 subq %rdi, %rcx
520 shrq $3, %rcx
521 rep stosq
522
523 /*
524 * Do the extraction, and jump to the new kernel..
525 */
526 pushq %rsi /* Save the real mode argument */
527 movq %rsi, %rdi /* real mode address */
528 leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
529 leaq input_data(%rip), %rdx /* input_data */
530 movl $z_input_len, %ecx /* input_len */
531 movq %rbp, %r8 /* output target address */
532 movl $z_output_len, %r9d /* decompressed length, end of relocs */
533 call extract_kernel /* returns kernel location in %rax */
534 popq %rsi
535
536 /*
537 * Jump to the decompressed kernel.
538 */
539 jmp *%rax
540 SYM_FUNC_END(.Lrelocated)
541
542 /*
543 * Adjust the global offset table
544 *
545 * RAX is the previous adjustment of the table to undo (use 0 if it's the
546 * first time we touch GOT).
547 * RDI is the new adjustment to apply.
548 */
549 .Ladjust_got:
550 /* Walk through the GOT adding the address to the entries */
551 leaq _got(%rip), %rdx
552 leaq _egot(%rip), %rcx
553 1:
554 cmpq %rcx, %rdx
555 jae 2f
556 subq %rax, (%rdx) /* Undo previous adjustment */
557 addq %rdi, (%rdx) /* Apply the new adjustment */
558 addq $8, %rdx
559 jmp 1b
560 2:
561 ret
562
563 .code32
564 /*
565 * This is the 32-bit trampoline that will be copied over to low memory.
566 *
567 * RDI contains the return address (might be above 4G).
568 * ECX contains the base address of the trampoline memory.
569 * Non zero RDX means trampoline needs to enable 5-level paging.
570 */
571 SYM_CODE_START(trampoline_32bit_src)
572 /* Set up data and stack segments */
573 movl $__KERNEL_DS, %eax
574 movl %eax, %ds
575 movl %eax, %ss
576
577 /* Set up new stack */
578 leal TRAMPOLINE_32BIT_STACK_END(%ecx), %esp
579
580 /* Disable paging */
581 movl %cr0, %eax
582 btrl $X86_CR0_PG_BIT, %eax
583 movl %eax, %cr0
584
585 /* Check what paging mode we want to be in after the trampoline */
586 cmpl $0, %edx
587 jz 1f
588
589 /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */
590 movl %cr4, %eax
591 testl $X86_CR4_LA57, %eax
592 jnz 3f
593 jmp 2f
594 1:
595 /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */
596 movl %cr4, %eax
597 testl $X86_CR4_LA57, %eax
598 jz 3f
599 2:
600 /* Point CR3 to the trampoline's new top level page table */
601 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
602 movl %eax, %cr3
603 3:
604 /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
605 pushl %ecx
606 pushl %edx
607 movl $MSR_EFER, %ecx
608 rdmsr
609 btsl $_EFER_LME, %eax
610 wrmsr
611 popl %edx
612 popl %ecx
613
614 /* Enable PAE and LA57 (if required) paging modes */
615 movl $X86_CR4_PAE, %eax
616 cmpl $0, %edx
617 jz 1f
618 orl $X86_CR4_LA57, %eax
619 1:
620 movl %eax, %cr4
621
622 /* Calculate address of paging_enabled() once we are executing in the trampoline */
623 leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
624
625 /* Prepare the stack for far return to Long Mode */
626 pushl $__KERNEL_CS
627 pushl %eax
628
629 /* Enable paging again */
630 movl $(X86_CR0_PG | X86_CR0_PE), %eax
631 movl %eax, %cr0
632
633 lret
634 SYM_CODE_END(trampoline_32bit_src)
635
636 .code64
637 SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
638 /* Return from the trampoline */
639 jmp *%rdi
640 SYM_FUNC_END(.Lpaging_enabled)
641
642 /*
643 * The trampoline code has a size limit.
644 * Make sure we fail to compile if the trampoline code grows
645 * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes.
646 */
647 .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
648
649 .code32
650 SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
651 /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
652 1:
653 hlt
654 jmp 1b
655 SYM_FUNC_END(.Lno_longmode)
656
657 #include "../../kernel/verify_cpu.S"
658
659 .data
660 SYM_DATA_START_LOCAL(gdt64)
661 .word gdt_end - gdt - 1
662 .quad gdt - gdt64
663 SYM_DATA_END(gdt64)
664 .balign 8
665 SYM_DATA_START_LOCAL(gdt)
666 .word gdt_end - gdt - 1
667 .long 0
668 .word 0
669 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
670 .quad 0x00af9a000000ffff /* __KERNEL_CS */
671 .quad 0x00cf92000000ffff /* __KERNEL_DS */
672 .quad 0x0080890000000000 /* TS descriptor */
673 .quad 0x0000000000000000 /* TS continued */
674 SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
675
676 #ifdef CONFIG_EFI_STUB
677 SYM_DATA(image_offset, .long 0)
678 #endif
679
680 #ifdef CONFIG_EFI_MIXED
681 SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
682 SYM_DATA(efi_is64, .byte 1)
683
684 #define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime)
685 #define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
686 #define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
687
688 .text
689 .code32
690 SYM_FUNC_START(efi32_pe_entry)
691 /*
692 * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
693 * efi_system_table_32_t *sys_table)
694 */
695
696 pushl %ebp
697 movl %esp, %ebp
698 pushl %eax // dummy push to allocate loaded_image
699
700 pushl %ebx // save callee-save registers
701 pushl %edi
702
703 call verify_cpu // check for long mode support
704 testl %eax, %eax
705 movl $0x80000003, %eax // EFI_UNSUPPORTED
706 jnz 2f
707
708 call 1f
709 1: pop %ebx
710 subl $1b, %ebx
711
712 /* Get the loaded image protocol pointer from the image handle */
713 leal -4(%ebp), %eax
714 pushl %eax // &loaded_image
715 leal loaded_image_proto(%ebx), %eax
716 pushl %eax // pass the GUID address
717 pushl 8(%ebp) // pass the image handle
718
719 /*
720 * Note the alignment of the stack frame.
721 * sys_table
722 * handle <-- 16-byte aligned on entry by ABI
723 * return address
724 * frame pointer
725 * loaded_image <-- local variable
726 * saved %ebx <-- 16-byte aligned here
727 * saved %edi
728 * &loaded_image
729 * &loaded_image_proto
730 * handle <-- 16-byte aligned for call to handle_protocol
731 */
732
733 movl 12(%ebp), %eax // sys_table
734 movl ST32_boottime(%eax), %eax // sys_table->boottime
735 call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol
736 addl $12, %esp // restore argument space
737 testl %eax, %eax
738 jnz 2f
739
740 movl 8(%ebp), %ecx // image_handle
741 movl 12(%ebp), %edx // sys_table
742 movl -4(%ebp), %esi // loaded_image
743 movl LI32_image_base(%esi), %esi // loaded_image->image_base
744 movl %ebx, %ebp // startup_32 for efi32_pe_stub_entry
745 /*
746 * We need to set the image_offset variable here since startup_32() will
747 * use it before we get to the 64-bit efi_pe_entry() in C code.
748 */
749 subl %esi, %ebx
750 movl %ebx, image_offset(%ebp) // save image_offset
751 jmp efi32_pe_stub_entry
752
753 2: popl %edi // restore callee-save registers
754 popl %ebx
755 leave
756 ret
757 SYM_FUNC_END(efi32_pe_entry)
758
759 .section ".rodata"
760 /* EFI loaded image protocol GUID */
761 .balign 4
762 SYM_DATA_START_LOCAL(loaded_image_proto)
763 .long 0x5b1b31a1
764 .word 0x9562, 0x11d2
765 .byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
766 SYM_DATA_END(loaded_image_proto)
767 #endif
768
769 /*
770 * Stack and heap for uncompression
771 */
772 .bss
773 .balign 4
774 SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0)
775
776 SYM_DATA_START_LOCAL(boot_stack)
777 .fill BOOT_STACK_SIZE, 1, 0
778 SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
779
780 /*
781 * Space for page tables (not in .bss so not zeroed)
782 */
783 .section ".pgtable","aw",@nobits
784 .balign 4096
785 SYM_DATA_LOCAL(pgtable, .fill BOOT_PGT_SIZE, 1, 0)
786
787 /*
788 * The page table is going to be used instead of page table in the trampoline
789 * memory.
790 */
791 SYM_DATA_LOCAL(top_pgtable, .fill PAGE_SIZE, 1, 0)