]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/x86_64/kernel/acpi/wakeup.S
Linux-2.6.12-rc2
[thirdparty/kernel/stable.git] / arch / x86_64 / kernel / acpi / wakeup.S
1 .text
2 #include <linux/linkage.h>
3 #include <asm/segment.h>
4 #include <asm/page.h>
5 #include <asm/msr.h>
6
7 # Copyright 2003 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
8 #
9 # wakeup_code runs in real mode, and at unknown address (determined at run-time).
10 # Therefore it must only use relative jumps/calls.
11 #
12 # Do we need to deal with A20? It is okay: ACPI specs says A20 must be enabled
13 #
14 # If physical address of wakeup_code is 0x12345, BIOS should call us with
15 # cs = 0x1234, eip = 0x05
16 #
17
18
19 ALIGN
20 .align 16
21 ENTRY(wakeup_start)
22 wakeup_code:
23 wakeup_code_start = .
24 .code16
25
26 # Running in *copy* of this code, somewhere in low 1MB.
27
28 movb $0xa1, %al ; outb %al, $0x80
29 cli
30 cld
31 # setup data segment
32 movw %cs, %ax
33 movw %ax, %ds # Make ds:0 point to wakeup_start
34 movw %ax, %ss
35 mov $(wakeup_stack - wakeup_code), %sp # Private stack is needed for ASUS board
36
37 pushl $0 # Kill any dangerous flags
38 popfl
39
40 movl real_magic - wakeup_code, %eax
41 cmpl $0x12345678, %eax
42 jne bogus_real_magic
43
44 testl $1, video_flags - wakeup_code
45 jz 1f
46 lcall $0xc000,$3
47 movw %cs, %ax
48 movw %ax, %ds # Bios might have played with that
49 movw %ax, %ss
50 1:
51
52 testl $2, video_flags - wakeup_code
53 jz 1f
54 mov video_mode - wakeup_code, %ax
55 call mode_seta
56 1:
57
58 movw $0xb800, %ax
59 movw %ax,%fs
60 movw $0x0e00 + 'L', %fs:(0x10)
61
62 movb $0xa2, %al ; outb %al, $0x80
63
64 lidt %ds:idt_48a - wakeup_code
65 xorl %eax, %eax
66 movw %ds, %ax # (Convert %ds:gdt to a linear ptr)
67 shll $4, %eax
68 addl $(gdta - wakeup_code), %eax
69 movl %eax, gdt_48a +2 - wakeup_code
70 lgdt %ds:gdt_48a - wakeup_code # load gdt with whatever is
71 # appropriate
72
73 movl $1, %eax # protected mode (PE) bit
74 lmsw %ax # This is it!
75 jmp 1f
76 1:
77
78 .byte 0x66, 0xea # prefix + jmpi-opcode
79 .long wakeup_32 - __START_KERNEL_map
80 .word __KERNEL_CS
81
82 .code32
83 wakeup_32:
84 # Running in this code, but at low address; paging is not yet turned on.
85 movb $0xa5, %al ; outb %al, $0x80
86
87 /* Check if extended functions are implemented */
88 movl $0x80000000, %eax
89 cpuid
90 cmpl $0x80000000, %eax
91 jbe bogus_cpu
92 wbinvd
93 mov $0x80000001, %eax
94 cpuid
95 btl $29, %edx
96 jnc bogus_cpu
97 movl %edx,%edi
98
99 movw $__KERNEL_DS, %ax
100 movw %ax, %ds
101 movw %ax, %es
102 movw %ax, %fs
103 movw %ax, %gs
104
105 movw $__KERNEL_DS, %ax
106 movw %ax, %ss
107
108 mov $(wakeup_stack - __START_KERNEL_map), %esp
109 movl saved_magic - __START_KERNEL_map, %eax
110 cmpl $0x9abcdef0, %eax
111 jne bogus_32_magic
112
113 /*
114 * Prepare for entering 64bits mode
115 */
116
117 /* Enable PAE mode and PGE */
118 xorl %eax, %eax
119 btsl $5, %eax
120 btsl $7, %eax
121 movl %eax, %cr4
122
123 /* Setup early boot stage 4 level pagetables */
124 movl $(wakeup_level4_pgt - __START_KERNEL_map), %eax
125 movl %eax, %cr3
126
127 /* Setup EFER (Extended Feature Enable Register) */
128 movl $MSR_EFER, %ecx
129 rdmsr
130 /* Fool rdmsr and reset %eax to avoid dependences */
131 xorl %eax, %eax
132 /* Enable Long Mode */
133 btsl $_EFER_LME, %eax
134 /* Enable System Call */
135 btsl $_EFER_SCE, %eax
136
137 /* No Execute supported? */
138 btl $20,%edi
139 jnc 1f
140 btsl $_EFER_NX, %eax
141 1:
142
143 /* Make changes effective */
144 wrmsr
145 wbinvd
146
147 xorl %eax, %eax
148 btsl $31, %eax /* Enable paging and in turn activate Long Mode */
149 btsl $0, %eax /* Enable protected mode */
150 btsl $1, %eax /* Enable MP */
151 btsl $4, %eax /* Enable ET */
152 btsl $5, %eax /* Enable NE */
153 btsl $16, %eax /* Enable WP */
154 btsl $18, %eax /* Enable AM */
155
156 /* Make changes effective */
157 movl %eax, %cr0
158 /* At this point:
159 CR4.PAE must be 1
160 CS.L must be 0
161 CR3 must point to PML4
162 Next instruction must be a branch
163 This must be on identity-mapped page
164 */
165 jmp reach_compatibility_mode
166 reach_compatibility_mode:
167 movw $0x0e00 + 'i', %ds:(0xb8012)
168 movb $0xa8, %al ; outb %al, $0x80;
169
170 /*
171 * At this point we're in long mode but in 32bit compatibility mode
172 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
173 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we load
174 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
175 */
176
177 movw $0x0e00 + 'n', %ds:(0xb8014)
178 movb $0xa9, %al ; outb %al, $0x80
179
180 /* Load new GDT with the 64bit segment using 32bit descriptor */
181 movl $(pGDT32 - __START_KERNEL_map), %eax
182 lgdt (%eax)
183
184 movl $(wakeup_jumpvector - __START_KERNEL_map), %eax
185 /* Finally jump in 64bit mode */
186 ljmp *(%eax)
187
188 wakeup_jumpvector:
189 .long wakeup_long64 - __START_KERNEL_map
190 .word __KERNEL_CS
191
192 .code64
193
194 /* Hooray, we are in Long 64-bit mode (but still running in low memory) */
195 wakeup_long64:
196 /*
197 * We must switch to a new descriptor in kernel space for the GDT
198 * because soon the kernel won't have access anymore to the userspace
199 * addresses where we're currently running on. We have to do that here
200 * because in 32bit we couldn't load a 64bit linear address.
201 */
202 lgdt cpu_gdt_descr - __START_KERNEL_map
203
204 movw $0x0e00 + 'u', %ds:(0xb8016)
205
206 nop
207 nop
208 movw $__KERNEL_DS, %ax
209 movw %ax, %ss
210 movw %ax, %ds
211 movw %ax, %es
212 movw %ax, %fs
213 movw %ax, %gs
214 movq saved_esp, %rsp
215
216 movw $0x0e00 + 'x', %ds:(0xb8018)
217 movq saved_ebx, %rbx
218 movq saved_edi, %rdi
219 movq saved_esi, %rsi
220 movq saved_ebp, %rbp
221
222 movw $0x0e00 + '!', %ds:(0xb801a)
223 movq saved_eip, %rax
224 jmp *%rax
225
226 .code32
227
228 .align 64
229 gdta:
230 .word 0, 0, 0, 0 # dummy
231
232 .word 0, 0, 0, 0 # unused
233
234 .word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb)
235 .word 0 # base address = 0
236 .word 0x9B00 # code read/exec. ??? Why I need 0x9B00 (as opposed to 0x9A00 in order for this to work?)
237 .word 0x00CF # granularity = 4096, 386
238 # (+5th nibble of limit)
239
240 .word 0xFFFF # 4Gb - (0x100000*0x1000 = 4Gb)
241 .word 0 # base address = 0
242 .word 0x9200 # data read/write
243 .word 0x00CF # granularity = 4096, 386
244 # (+5th nibble of limit)
245 # this is 64bit descriptor for code
246 .word 0xFFFF
247 .word 0
248 .word 0x9A00 # code read/exec
249 .word 0x00AF # as above, but it is long mode and with D=0
250
251 idt_48a:
252 .word 0 # idt limit = 0
253 .word 0, 0 # idt base = 0L
254
255 gdt_48a:
256 .word 0x8000 # gdt limit=2048,
257 # 256 GDT entries
258 .word 0, 0 # gdt base (filled in later)
259
260
261 real_save_gdt: .word 0
262 .quad 0
263 real_magic: .quad 0
264 video_mode: .quad 0
265 video_flags: .quad 0
266
267 bogus_real_magic:
268 movb $0xba,%al ; outb %al,$0x80
269 jmp bogus_real_magic
270
271 bogus_32_magic:
272 movb $0xb3,%al ; outb %al,$0x80
273 jmp bogus_32_magic
274
275 bogus_31_magic:
276 movb $0xb1,%al ; outb %al,$0x80
277 jmp bogus_31_magic
278
279 bogus_cpu:
280 movb $0xbc,%al ; outb %al,$0x80
281 jmp bogus_cpu
282
283
284 /* This code uses an extended set of video mode numbers. These include:
285 * Aliases for standard modes
286 * NORMAL_VGA (-1)
287 * EXTENDED_VGA (-2)
288 * ASK_VGA (-3)
289 * Video modes numbered by menu position -- NOT RECOMMENDED because of lack
290 * of compatibility when extending the table. These are between 0x00 and 0xff.
291 */
292 #define VIDEO_FIRST_MENU 0x0000
293
294 /* Standard BIOS video modes (BIOS number + 0x0100) */
295 #define VIDEO_FIRST_BIOS 0x0100
296
297 /* VESA BIOS video modes (VESA number + 0x0200) */
298 #define VIDEO_FIRST_VESA 0x0200
299
300 /* Video7 special modes (BIOS number + 0x0900) */
301 #define VIDEO_FIRST_V7 0x0900
302
303 # Setting of user mode (AX=mode ID) => CF=success
304 mode_seta:
305 movw %ax, %bx
306 #if 0
307 cmpb $0xff, %ah
308 jz setalias
309
310 testb $VIDEO_RECALC>>8, %ah
311 jnz _setrec
312
313 cmpb $VIDEO_FIRST_RESOLUTION>>8, %ah
314 jnc setres
315
316 cmpb $VIDEO_FIRST_SPECIAL>>8, %ah
317 jz setspc
318
319 cmpb $VIDEO_FIRST_V7>>8, %ah
320 jz setv7
321 #endif
322
323 cmpb $VIDEO_FIRST_VESA>>8, %ah
324 jnc check_vesaa
325 #if 0
326 orb %ah, %ah
327 jz setmenu
328 #endif
329
330 decb %ah
331 # jz setbios Add bios modes later
332
333 setbada: clc
334 ret
335
336 check_vesaa:
337 subb $VIDEO_FIRST_VESA>>8, %bh
338 orw $0x4000, %bx # Use linear frame buffer
339 movw $0x4f02, %ax # VESA BIOS mode set call
340 int $0x10
341 cmpw $0x004f, %ax # AL=4f if implemented
342 jnz _setbada # AH=0 if OK
343
344 stc
345 ret
346
347 _setbada: jmp setbada
348
349 .code64
350 bogus_magic:
351 movw $0x0e00 + 'B', %ds:(0xb8018)
352 jmp bogus_magic
353
354 bogus_magic2:
355 movw $0x0e00 + '2', %ds:(0xb8018)
356 jmp bogus_magic2
357
358
359 wakeup_stack_begin: # Stack grows down
360
361 .org 0xff0
362 wakeup_stack: # Just below end of page
363
364 ENTRY(wakeup_end)
365
366 ##
367 # acpi_copy_wakeup_routine
368 #
369 # Copy the above routine to low memory.
370 #
371 # Parameters:
372 # %rdi: place to copy wakeup routine to
373 #
374 # Returned address is location of code in low memory (past data and stack)
375 #
376 ENTRY(acpi_copy_wakeup_routine)
377 pushq %rax
378 pushq %rcx
379 pushq %rdx
380
381 sgdt saved_gdt
382 sidt saved_idt
383 sldt saved_ldt
384 str saved_tss
385
386 movq %cr3, %rdx
387 movq %rdx, saved_cr3
388 movq %cr4, %rdx
389 movq %rdx, saved_cr4
390 movq %cr0, %rdx
391 movq %rdx, saved_cr0
392 sgdt real_save_gdt - wakeup_start (,%rdi)
393 movl $MSR_EFER, %ecx
394 rdmsr
395 movl %eax, saved_efer
396 movl %edx, saved_efer2
397
398 movl saved_video_mode, %edx
399 movl %edx, video_mode - wakeup_start (,%rdi)
400 movl acpi_video_flags, %edx
401 movl %edx, video_flags - wakeup_start (,%rdi)
402 movq $0x12345678, real_magic - wakeup_start (,%rdi)
403 movq $0x123456789abcdef0, %rdx
404 movq %rdx, saved_magic
405
406 movl saved_magic - __START_KERNEL_map, %eax
407 cmpl $0x9abcdef0, %eax
408 jne bogus_32_magic
409
410 # make sure %cr4 is set correctly (features, etc)
411 movl saved_cr4 - __START_KERNEL_map, %eax
412 movq %rax, %cr4
413
414 movl saved_cr0 - __START_KERNEL_map, %eax
415 movq %rax, %cr0
416 jmp 1f # Flush pipelines
417 1:
418 # restore the regs we used
419 popq %rdx
420 popq %rcx
421 popq %rax
422 ENTRY(do_suspend_lowlevel_s4bios)
423 ret
424
425 .align 2
426 .p2align 4,,15
427 .globl do_suspend_lowlevel
428 .type do_suspend_lowlevel,@function
429 do_suspend_lowlevel:
430 .LFB5:
431 subq $8, %rsp
432 xorl %eax, %eax
433 call save_processor_state
434
435 movq %rsp, saved_context_esp(%rip)
436 movq %rax, saved_context_eax(%rip)
437 movq %rbx, saved_context_ebx(%rip)
438 movq %rcx, saved_context_ecx(%rip)
439 movq %rdx, saved_context_edx(%rip)
440 movq %rbp, saved_context_ebp(%rip)
441 movq %rsi, saved_context_esi(%rip)
442 movq %rdi, saved_context_edi(%rip)
443 movq %r8, saved_context_r08(%rip)
444 movq %r9, saved_context_r09(%rip)
445 movq %r10, saved_context_r10(%rip)
446 movq %r11, saved_context_r11(%rip)
447 movq %r12, saved_context_r12(%rip)
448 movq %r13, saved_context_r13(%rip)
449 movq %r14, saved_context_r14(%rip)
450 movq %r15, saved_context_r15(%rip)
451 pushfq ; popq saved_context_eflags(%rip)
452
453 movq $.L97, saved_eip(%rip)
454
455 movq %rsp,saved_esp
456 movq %rbp,saved_ebp
457 movq %rbx,saved_ebx
458 movq %rdi,saved_edi
459 movq %rsi,saved_esi
460
461 addq $8, %rsp
462 movl $3, %edi
463 xorl %eax, %eax
464 jmp acpi_enter_sleep_state
465 .L97:
466 .p2align 4,,7
467 .L99:
468 .align 4
469 movl $24, %eax
470 movw %ax, %ds
471 movq saved_context+58(%rip), %rax
472 movq %rax, %cr4
473 movq saved_context+50(%rip), %rax
474 movq %rax, %cr3
475 movq saved_context+42(%rip), %rax
476 movq %rax, %cr2
477 movq saved_context+34(%rip), %rax
478 movq %rax, %cr0
479 pushq saved_context_eflags(%rip) ; popfq
480 movq saved_context_esp(%rip), %rsp
481 movq saved_context_ebp(%rip), %rbp
482 movq saved_context_eax(%rip), %rax
483 movq saved_context_ebx(%rip), %rbx
484 movq saved_context_ecx(%rip), %rcx
485 movq saved_context_edx(%rip), %rdx
486 movq saved_context_esi(%rip), %rsi
487 movq saved_context_edi(%rip), %rdi
488 movq saved_context_r08(%rip), %r8
489 movq saved_context_r09(%rip), %r9
490 movq saved_context_r10(%rip), %r10
491 movq saved_context_r11(%rip), %r11
492 movq saved_context_r12(%rip), %r12
493 movq saved_context_r13(%rip), %r13
494 movq saved_context_r14(%rip), %r14
495 movq saved_context_r15(%rip), %r15
496
497 xorl %eax, %eax
498 addq $8, %rsp
499 jmp restore_processor_state
500 .LFE5:
501 .Lfe5:
502 .size do_suspend_lowlevel,.Lfe5-do_suspend_lowlevel
503
504 .data
505 ALIGN
506 ENTRY(saved_ebp) .quad 0
507 ENTRY(saved_esi) .quad 0
508 ENTRY(saved_edi) .quad 0
509 ENTRY(saved_ebx) .quad 0
510
511 ENTRY(saved_eip) .quad 0
512 ENTRY(saved_esp) .quad 0
513
514 ENTRY(saved_magic) .quad 0
515
516 ALIGN
517 # saved registers
518 saved_gdt: .quad 0,0
519 saved_idt: .quad 0,0
520 saved_ldt: .quad 0
521 saved_tss: .quad 0
522
523 saved_cr0: .quad 0
524 saved_cr3: .quad 0
525 saved_cr4: .quad 0
526 saved_efer: .quad 0
527 saved_efer2: .quad 0