]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blob - src/patches/suse-2.6.27.25/patches.xen/linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch
Updated xen patches taken from suse.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.25 / patches.xen / linux-2.6.19-rc1-kexec-move_segment_code-x86_64.patch
1 Subject: kexec: Move asm segment handling code to the assembly file (x86_64)
2 From: http://xenbits.xensource.com/xen-unstable.hg (tip 13816)
3 Patch-mainline: obsolete
4
5 This patch moves the idt, gdt, and segment handling code from machine_kexec.c
6 to relocate_kernel.S. The main reason behind this move is to avoid code
7 duplication in the Xen hypervisor. With this patch all code required to kexec
8 is put on the control page.
9
10 On top of that this patch also counts as a cleanup - I think it is much
11 nicer to write assembly directly in assembly files than wrap inline assembly
12 in C functions for no apparent reason.
13
14 Signed-off-by: Magnus Damm <magnus@valinux.co.jp>
15 Acked-by: jbeulich@novell.com
16 ---
17
18 Applies to 2.6.19-rc1.
19
20 machine_kexec.c | 58 -----------------------------------------------------
21 relocate_kernel.S | 50 +++++++++++++++++++++++++++++++++++++++++----
22 2 files changed, 45 insertions(+), 63 deletions(-)
23
24 Index: head-2008-08-18/arch/x86/kernel/machine_kexec_64.c
25 ===================================================================
26 --- head-2008-08-18.orig/arch/x86/kernel/machine_kexec_64.c 2008-08-18 09:05:04.000000000 +0200
27 +++ head-2008-08-18/arch/x86/kernel/machine_kexec_64.c 2008-08-18 10:13:08.000000000 +0200
28 @@ -115,47 +115,6 @@ static int init_pgtable(struct kimage *i
29 return init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
30 }
31
32 -static void set_idt(void *newidt, u16 limit)
33 -{
34 - struct desc_ptr curidt;
35 -
36 - /* x86-64 supports unaliged loads & stores */
37 - curidt.size = limit;
38 - curidt.address = (unsigned long)newidt;
39 -
40 - __asm__ __volatile__ (
41 - "lidtq %0\n"
42 - : : "m" (curidt)
43 - );
44 -};
45 -
46 -
47 -static void set_gdt(void *newgdt, u16 limit)
48 -{
49 - struct desc_ptr curgdt;
50 -
51 - /* x86-64 supports unaligned loads & stores */
52 - curgdt.size = limit;
53 - curgdt.address = (unsigned long)newgdt;
54 -
55 - __asm__ __volatile__ (
56 - "lgdtq %0\n"
57 - : : "m" (curgdt)
58 - );
59 -};
60 -
61 -static void load_segments(void)
62 -{
63 - __asm__ __volatile__ (
64 - "\tmovl %0,%%ds\n"
65 - "\tmovl %0,%%es\n"
66 - "\tmovl %0,%%ss\n"
67 - "\tmovl %0,%%fs\n"
68 - "\tmovl %0,%%gs\n"
69 - : : "a" (__KERNEL_DS) : "memory"
70 - );
71 -}
72 -
73 int machine_kexec_prepare(struct kimage *image)
74 {
75 unsigned long start_pgtable;
76 @@ -214,23 +173,6 @@ void machine_kexec(struct kimage *image)
77 page_list[PA_TABLE_PAGE] =
78 (unsigned long)__pa(page_address(image->control_code_page));
79
80 - /* The segment registers are funny things, they have both a
81 - * visible and an invisible part. Whenever the visible part is
82 - * set to a specific selector, the invisible part is loaded
83 - * with from a table in memory. At no other time is the
84 - * descriptor table in memory accessed.
85 - *
86 - * I take advantage of this here by force loading the
87 - * segments, before I zap the gdt with an invalid value.
88 - */
89 - load_segments();
90 - /* The gdt & idt are now invalid.
91 - * If you want to load them you must set up your own idt & gdt.
92 - */
93 - set_gdt(phys_to_virt(0),0);
94 - set_idt(phys_to_virt(0),0);
95 -
96 - /* now call it */
97 relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
98 image->start);
99 }
100 Index: head-2008-08-18/arch/x86/kernel/relocate_kernel_64.S
101 ===================================================================
102 --- head-2008-08-18.orig/arch/x86/kernel/relocate_kernel_64.S 2008-07-13 23:51:29.000000000 +0200
103 +++ head-2008-08-18/arch/x86/kernel/relocate_kernel_64.S 2008-08-18 10:13:08.000000000 +0200
104 @@ -160,13 +160,39 @@ relocate_new_kernel:
105 movq PTR(PA_PGD)(%rsi), %r9
106 movq %r9, %cr3
107
108 + /* setup idt */
109 + movq %r8, %rax
110 + addq $(idt_80 - relocate_kernel), %rax
111 + lidtq (%rax)
112 +
113 + /* setup gdt */
114 + movq %r8, %rax
115 + addq $(gdt - relocate_kernel), %rax
116 + movq %r8, %r9
117 + addq $((gdt_80 - relocate_kernel) + 2), %r9
118 + movq %rax, (%r9)
119 +
120 + movq %r8, %rax
121 + addq $(gdt_80 - relocate_kernel), %rax
122 + lgdtq (%rax)
123 +
124 + /* setup data segment registers */
125 + xorl %eax, %eax
126 + movl %eax, %ds
127 + movl %eax, %es
128 + movl %eax, %fs
129 + movl %eax, %gs
130 + movl %eax, %ss
131 +
132 /* setup a new stack at the end of the physical control page */
133 lea PAGE_SIZE(%r8), %rsp
134
135 - /* jump to identity mapped page */
136 - addq $(identity_mapped - relocate_kernel), %r8
137 - pushq %r8
138 - ret
139 + /* load new code segment and jump to identity mapped page */
140 + movq %r8, %rax
141 + addq $(identity_mapped - relocate_kernel), %rax
142 + pushq $(gdt_cs - gdt)
143 + pushq %rax
144 + lretq
145
146 identity_mapped:
147 /* store the start address on the stack */
148 @@ -262,5 +288,19 @@ identity_mapped:
149 xorq %r13, %r13
150 xorq %r14, %r14
151 xorq %r15, %r15
152 -
153 ret
154 +
155 + .align 16
156 +gdt:
157 + .quad 0x0000000000000000 /* NULL descriptor */
158 +gdt_cs:
159 + .quad 0x00af9a000000ffff
160 +gdt_end:
161 +
162 +gdt_80:
163 + .word gdt_end - gdt - 1 /* limit */
164 + .quad 0 /* base - filled in by code above */
165 +
166 +idt_80:
167 + .word 0 /* limit */
168 + .quad 0 /* base */