]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.xen/xen3-patch-2.6.24
Imported linux-2.6.27.39 suse/xen patches.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.xen / xen3-patch-2.6.24
CommitLineData
2cb7cef9
BS
1From: www.kernel.org
2Subject: Update to 2.6.24
3Patch-mainline: 2.6.24
4
5Automatically created from "patches.kernel.org/patch-2.6.24" by xen-port-patches.py
6
7Acked-by: jbeulich@novell.com
8
82094b55
AF
9--- sle11-2009-10-16.orig/arch/x86/Kconfig 2009-02-05 10:22:38.000000000 +0100
10+++ sle11-2009-10-16/arch/x86/Kconfig 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
11@@ -50,15 +50,16 @@ config GENERIC_CMOS_UPDATE
12
13 config CLOCKSOURCE_WATCHDOG
14 def_bool y
15- depends on !X86_XEN
16+ depends on !XEN
17
18 config GENERIC_CLOCKEVENTS
19 def_bool y
20- depends on !X86_XEN
21+ depends on !XEN
22
23 config GENERIC_CLOCKEVENTS_BROADCAST
24 def_bool y
25- depends on X86_64 || (X86_32 && X86_LOCAL_APIC && !X86_XEN)
26+ depends on X86_64 || (X86_32 && X86_LOCAL_APIC)
27+ depends on !XEN
28
29 config LOCKDEP_SUPPORT
30 def_bool y
31@@ -211,12 +212,12 @@ config X86_TRAMPOLINE
32
33 config X86_NO_TSS
34 bool
35- depends on X86_XEN || X86_64_XEN
36+ depends on XEN
37 default y
38
39 config X86_NO_IDT
40 bool
41- depends on X86_XEN || X86_64_XEN
42+ depends on XEN
43 default y
44
45 config KTIME_SCALAR
46@@ -287,6 +288,7 @@ config X86_PC
47
48 config X86_XEN
49 bool "Xen-compatible"
50+ depends on X86_32
51 select XEN
52 select X86_PAE
53 select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST
54@@ -365,6 +367,7 @@ endif
55
56 config X86_64_XEN
57 bool "Enable Xen compatible kernel"
58+ depends on X86_64
59 select XEN
60 select SWIOTLB
61 help
62@@ -417,7 +420,7 @@ config SCHED_NO_NO_OMIT_FRAME_POINTER
63
64 menuconfig PARAVIRT_GUEST
65 bool "Paravirtualized guest support"
66- depends on !X86_XEN && !X86_64_XEN
67+ depends on !XEN
68 help
69 Say Y here to get to see options related to running Linux under
70 various hypervisors. This option alone does not add any kernel code.
71@@ -511,7 +514,7 @@ source "arch/x86/Kconfig.cpu"
72 config HPET_TIMER
73 def_bool X86_64
74 prompt "HPET Timer Support" if X86_32
75- depends on !X86_XEN && !X86_64_XEN
76+ depends on !XEN
77 help
78 Use the IA-PC HPET (High Precision Event Timer) to manage
79 time in preference to the PIT and RTC, if a HPET is
80@@ -831,7 +834,7 @@ config I8K
81 config X86_REBOOTFIXUPS
82 def_bool n
83 prompt "Enable X86 board specific fixups for reboot"
84- depends on X86_32 && !X86_XEN
85+ depends on X86_32 && !XEN
86 ---help---
87 This enables chipset and/or board specific fixups to be done
88 in order to get reboot to work correctly. This is only needed on
89@@ -1164,7 +1167,7 @@ config X86_RESERVE_LOW_64K
90 config MATH_EMULATION
91 bool
92 prompt "Math emulation" if X86_32
93- depends on !X86_XEN
94+ depends on !XEN
95 ---help---
96 Linux can emulate a math coprocessor (used for floating point
97 operations) if you don't have one. 486DX and Pentium processors have
98@@ -1272,7 +1275,7 @@ config X86_PAT
99 config EFI
100 def_bool n
101 prompt "EFI runtime service support"
102- depends on ACPI && !X86_XEN && !X86_64_XEN
103+ depends on ACPI && !XEN
104 ---help---
105 This enables the kernel to use EFI runtime services that are
106 available (such as the EFI variable services).
107@@ -1287,7 +1290,7 @@ config EFI
108 config IRQBALANCE
109 def_bool y
110 prompt "Enable kernel irq balancing"
111- depends on X86_32 && SMP && X86_IO_APIC && !X86_XEN
112+ depends on X86_32 && SMP && X86_IO_APIC && !XEN
113 help
114 The default yes will allow the kernel to do irq load balancing.
115 Saying no will keep the kernel from doing irq load balancing.
116@@ -1433,7 +1436,7 @@ config PHYSICAL_START
117
118 config RELOCATABLE
119 bool "Build a relocatable kernel (EXPERIMENTAL)"
120- depends on EXPERIMENTAL && !X86_XEN && !X86_64_XEN
121+ depends on EXPERIMENTAL && !XEN
122 help
123 This builds a kernel image that retains relocation information
124 so it can be loaded someplace besides the default 1MB.
125@@ -1503,6 +1506,7 @@ endmenu
126 config ARCH_ENABLE_MEMORY_HOTPLUG
127 def_bool y
128 depends on X86_64 || (X86_32 && HIGHMEM)
129+ depends on !XEN
130
131 config HAVE_ARCH_EARLY_PFN_TO_NID
132 def_bool X86_64
133@@ -1693,7 +1697,7 @@ choice
134
135 config PCI_GOBIOS
136 bool "BIOS"
137- depends on !X86_XEN
138+ depends on !XEN
139
140 config PCI_GOMMCONFIG
141 bool "MMConfig"
142@@ -1744,7 +1748,7 @@ config PCI_MMCONFIG
143
144 config XEN_PCIDEV_FRONTEND
145 bool "Xen PCI Frontend" if X86_64
146- depends on PCI && ((X86_XEN && (PCI_GOXEN_FE || PCI_GOANY)) || X86_64_XEN)
147+ depends on PCI && XEN && (PCI_GOXEN_FE || PCI_GOANY || X86_64)
148 select HOTPLUG
149 default y
150 help
151@@ -1761,6 +1765,7 @@ config XEN_PCIDEV_FE_DEBUG
152 config DMAR
153 bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
154 depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL
155+ depends on !XEN
156 help
157 DMA remapping (DMAR) devices support enables independent address
158 translations for Direct Memory Access (DMA) from devices.
82094b55
AF
159--- sle11-2009-10-16.orig/arch/x86/Makefile 2009-02-16 16:17:21.000000000 +0100
160+++ sle11-2009-10-16/arch/x86/Makefile 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
161@@ -191,8 +191,8 @@ PHONY += zImage bzImage vmlinuz compress
162 zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
163
164 ifdef CONFIG_XEN
165-CPPFLAGS := -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION) \
166- -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
167+KBUILD_CPPFLAGS := -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION) \
168+ -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(KBUILD_CPPFLAGS)
169
170 ifdef CONFIG_X86_64
171 LDFLAGS_vmlinux := -e startup_64
172@@ -206,6 +206,8 @@ KBUILD_IMAGE := $(boot)/vmlinuz
173
174 vmlinuz: vmlinux
175 $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
176+ $(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot
177+ $(Q)ln -fsn ../../x86/boot/$@ $(objtree)/arch/$(UTS_MACHINE)/boot/$@
178 else
179 # Default kernel to build
180 all: bzImage
82094b55
AF
181--- sle11-2009-10-16.orig/arch/x86/ia32/ia32entry-xen.S 2009-02-16 16:17:21.000000000 +0100
182+++ sle11-2009-10-16/arch/x86/ia32/ia32entry-xen.S 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
183@@ -125,20 +125,16 @@ sysenter_do_call:
184 jmp int_ret_from_sys_call
185
186 sysenter_tracesys:
187+ xchgl %r9d,%ebp
188 SAVE_REST
189 CLEAR_RREGS
190+ movq %r9,R9(%rsp)
191 movq $-ENOSYS,RAX(%rsp) /* really needed? */
192 movq %rsp,%rdi /* &pt_regs -> arg1 */
193 call syscall_trace_enter
194 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
195 RESTORE_REST
196- movl %ebp, %ebp
197- /* no need to do an access_ok check here because rbp has been
198- 32bit zero extended */
199-1: movl (%rbp),%r9d
200- .section __ex_table,"a"
201- .quad 1b,ia32_badarg
202- .previous
203+ xchgl %ebp,%r9d
204 jmp sysenter_do_call
205 CFI_ENDPROC
206 ENDPROC(ia32_sysenter_target)
207@@ -200,20 +196,17 @@ cstar_do_call:
208 jmp int_ret_from_sys_call
209
210 cstar_tracesys:
211+ xchgl %r9d,%ebp
212 SAVE_REST
213 CLEAR_RREGS
214+ movq %r9,R9(%rsp)
215 movq $-ENOSYS,RAX(%rsp) /* really needed? */
216 movq %rsp,%rdi /* &pt_regs -> arg1 */
217 call syscall_trace_enter
218 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
219 RESTORE_REST
220+ xchgl %ebp,%r9d
221 movl RSP-ARGOFFSET(%rsp), %r8d
222- /* no need to do an access_ok check here because r8 has been
223- 32bit zero extended */
224-1: movl (%r8),%r9d
225- .section __ex_table,"a"
226- .quad 1b,ia32_badarg
227- .previous
228 jmp cstar_do_call
229 END(ia32_cstar_target)
230
82094b55
AF
231--- sle11-2009-10-16.orig/arch/x86/kernel/Makefile 2008-12-15 11:27:22.000000000 +0100
232+++ sle11-2009-10-16/arch/x86/kernel/Makefile 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
233@@ -127,4 +127,4 @@ endif
234 disabled-obj-$(CONFIG_XEN) := early-quirks.o hpet.o i8253.o i8259_$(BITS).o reboot.o \
235 smpboot_$(BITS).o tsc_$(BITS).o tsc_sync.o
236 disabled-obj-$(CONFIG_XEN_UNPRIVILEGED_GUEST) += mpparse_64.o
237-%/head_64.o %/head_64.s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) :=
238+%/head_64.o %/head_64.s: asflags-$(CONFIG_XEN) :=
82094b55
AF
239--- sle11-2009-10-16.orig/arch/x86/kernel/acpi/sleep_32-xen.c 2009-02-16 16:17:21.000000000 +0100
240+++ sle11-2009-10-16/arch/x86/kernel/acpi/sleep_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
241@@ -90,7 +90,7 @@ __setup("acpi_sleep=", acpi_sleep_setup)
242
243 /* Ouch, we want to delete this. We already have better version in userspace, in
244 s2ram from suspend.sf.net project */
245-static __init int reset_videomode_after_s3(struct dmi_system_id *d)
246+static __init int reset_videomode_after_s3(const struct dmi_system_id *d)
247 {
248 acpi_realmode_flags |= 2;
249 return 0;
82094b55
AF
250--- sle11-2009-10-16.orig/arch/x86/kernel/acpi/sleep_64-xen.c 2009-02-16 16:17:21.000000000 +0100
251+++ sle11-2009-10-16/arch/x86/kernel/acpi/sleep_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
252@@ -123,6 +123,3 @@ static int __init acpi_sleep_setup(char
253 __setup("acpi_sleep=", acpi_sleep_setup);
254 #endif /* CONFIG_ACPI_PV_SLEEP */
255
256-void acpi_pci_link_exit(void)
257-{
258-}
82094b55
AF
259--- sle11-2009-10-16.orig/arch/x86/kernel/apic_64-xen.c 2009-02-16 16:17:21.000000000 +0100
260+++ sle11-2009-10-16/arch/x86/kernel/apic_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
261@@ -63,22 +63,38 @@ int setup_profiling_timer(unsigned int m
262
263 void smp_local_timer_interrupt(void)
264 {
265- profile_tick(CPU_PROFILING);
266 #ifndef CONFIG_XEN
267-#ifdef CONFIG_SMP
268- update_process_times(user_mode(get_irq_regs()));
269-#endif
270-#endif
271+ int cpu = smp_processor_id();
272+ struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
273+
274 /*
275- * We take the 'long' return path, and there every subsystem
276- * grabs the appropriate locks (kernel lock/ irq lock).
277+ * Normally we should not be here till LAPIC has been initialized but
278+ * in some cases like kdump, its possible that there is a pending LAPIC
279+ * timer interrupt from previous kernel's context and is delivered in
280+ * new kernel the moment interrupts are enabled.
281 *
282- * We might want to decouple profiling from the 'long path',
283- * and do the profiling totally in assembly.
284- *
285- * Currently this isn't too much of an issue (performance wise),
286- * we can take more than 100K local irqs per second on a 100 MHz P5.
287+ * Interrupts are enabled early and LAPIC is setup much later, hence
288+ * its possible that when we get here evt->event_handler is NULL.
289+ * Check for event_handler being NULL and discard the interrupt as
290+ * spurious.
291+ */
292+ if (!evt->event_handler) {
293+ printk(KERN_WARNING
294+ "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
295+ /* Switch it off */
296+ lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
297+ return;
298+ }
299+#endif
300+
301+ /*
302+ * the NMI deadlock-detector uses this.
303 */
304+ add_pda(apic_timer_irqs, 1);
305+
306+#ifndef CONFIG_XEN
307+ evt->event_handler(evt);
308+#endif
309 }
310
311 /*
312@@ -94,11 +110,6 @@ void smp_apic_timer_interrupt(struct pt_
313 struct pt_regs *old_regs = set_irq_regs(regs);
314
315 /*
316- * the NMI deadlock-detector uses this.
317- */
318- add_pda(apic_timer_irqs, 1);
319-
320- /*
321 * NOTE! We'd better ACK the irq immediately,
322 * because timer handling can be slow.
323 */
324@@ -132,6 +143,7 @@ asmlinkage void smp_spurious_interrupt(v
325 if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
326 ack_APIC_irq();
327
328+ add_pda(irq_spurious_count, 1);
329 irq_exit();
330 }
331
82094b55
AF
332--- sle11-2009-10-16.orig/arch/x86/kernel/cpu/common-xen.c 2009-02-16 16:17:21.000000000 +0100
333+++ sle11-2009-10-16/arch/x86/kernel/cpu/common-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
334@@ -214,7 +214,7 @@ static void __cpuinit get_cpu_vendor(str
335
336 static int __init x86_fxsr_setup(char * s)
337 {
338- /* Tell all the other CPU's to not use it... */
339+ /* Tell all the other CPUs to not use it... */
340 disable_x86_fxsr = 1;
341
342 /*
82094b55
AF
343--- sle11-2009-10-16.orig/arch/x86/kernel/e820_32-xen.c 2009-02-16 16:17:21.000000000 +0100
344+++ sle11-2009-10-16/arch/x86/kernel/e820_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
345@@ -52,6 +52,13 @@ struct resource code_resource = {
346 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
347 };
348
349+struct resource bss_resource = {
350+ .name = "Kernel bss",
351+ .start = 0,
352+ .end = 0,
353+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
354+};
355+
356 static struct resource system_rom_resource = {
357 .name = "System ROM",
358 .start = 0xf0000,
359@@ -266,7 +273,9 @@ static struct e820map machine_e820;
360 * and also for regions reported as reserved by the e820.
361 */
362 static void __init
363-legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
364+legacy_init_iomem_resources(struct resource *code_resource,
365+ struct resource *data_resource,
366+ struct resource *bss_resource)
367 {
368 int i;
369
370@@ -300,9 +309,11 @@ legacy_init_iomem_resources(struct resou
371 #ifndef CONFIG_XEN
372 request_resource(res, code_resource);
373 request_resource(res, data_resource);
374+ request_resource(res, bss_resource);
375 #endif
376 #ifdef CONFIG_KEXEC
377- request_resource(res, &crashk_res);
378+ if (crashk_res.start != crashk_res.end)
379+ request_resource(res, &crashk_res);
380 #ifdef CONFIG_XEN
381 xen_machine_kexec_register_resources(res);
382 #endif
383@@ -329,9 +340,11 @@ static int __init request_standard_resou
384
385 printk("Setting up standard PCI resources\n");
386 if (efi_enabled)
387- efi_initialize_iomem_resources(&code_resource, &data_resource);
388+ efi_initialize_iomem_resources(&code_resource,
389+ &data_resource, &bss_resource);
390 else
391- legacy_init_iomem_resources(&code_resource, &data_resource);
392+ legacy_init_iomem_resources(&code_resource,
393+ &data_resource, &bss_resource);
394
395 /* EFI systems may still have VGA */
396 request_resource(&iomem_resource, &video_ram_resource);
397@@ -774,7 +787,7 @@ void __init e820_register_memory(void)
398 #endif
399
400 /*
401- * Search for the bigest gap in the low 32 bits of the e820
402+ * Search for the biggest gap in the low 32 bits of the e820
403 * memory space.
404 */
405 last = 0x100000000ull;
82094b55
AF
406--- sle11-2009-10-16.orig/arch/x86/kernel/e820_64-xen.c 2009-02-16 16:17:21.000000000 +0100
407+++ sle11-2009-10-16/arch/x86/kernel/e820_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
408@@ -24,7 +24,7 @@
409 #include <asm/page.h>
410 #include <asm/e820.h>
411 #include <asm/proto.h>
412-#include <asm/bootsetup.h>
413+#include <asm/setup.h>
414 #include <asm/sections.h>
415 #include <xen/interface/memory.h>
416
417@@ -51,7 +51,7 @@ unsigned long end_pfn_map;
418 */
419 static unsigned long __initdata end_user_pfn = MAXMEM>>PAGE_SHIFT;
420
421-extern struct resource code_resource, data_resource;
422+extern struct resource code_resource, data_resource, bss_resource;
423
424 /* Check for some hardcoded bad areas that early boot is not allowed to touch */
425 static inline int bad_addr(unsigned long *addrp, unsigned long size)
426@@ -73,10 +73,15 @@ static inline int bad_addr(unsigned long
427
428 /* initrd */
429 #ifdef CONFIG_BLK_DEV_INITRD
430- if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
431- addr < INITRD_START+INITRD_SIZE) {
432- *addrp = PAGE_ALIGN(INITRD_START + INITRD_SIZE);
433- return 1;
434+ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
435+ unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
436+ unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
437+ unsigned long ramdisk_end = ramdisk_image+ramdisk_size;
438+
439+ if (last >= ramdisk_image && addr < ramdisk_end) {
440+ *addrp = PAGE_ALIGN(ramdisk_end);
441+ return 1;
442+ }
443 }
444 #endif
445 /* kernel code */
446@@ -249,6 +254,7 @@ void __init e820_reserve_resources(struc
447 #ifndef CONFIG_XEN
448 request_resource(res, &code_resource);
449 request_resource(res, &data_resource);
450+ request_resource(res, &bss_resource);
451 #endif
452 #ifdef CONFIG_KEXEC
453 if (crashk_res.start != crashk_res.end)
454@@ -650,8 +656,8 @@ void __init setup_memory_region(void)
455 * Otherwise fake a memory map; one section from 0k->640k,
456 * the next section from 1mb->appropriate_mem_k
457 */
458- sanitize_e820_map(E820_MAP, &E820_MAP_NR);
459- if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0)
460+ sanitize_e820_map(boot_params.e820_map, &boot_params.e820_entries);
461+ if (copy_e820_map(boot_params.e820_map, boot_params.e820_entries) < 0)
462 early_panic("Cannot find a valid memory map");
463 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
464 e820_print_map("BIOS-e820");
465@@ -836,3 +842,22 @@ __init void e820_setup_gap(struct e820en
466 printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
467 pci_mem_start, gapstart, gapsize);
468 }
469+
470+int __init arch_get_ram_range(int slot, u64 *addr, u64 *size)
471+{
472+ int i;
473+
474+ if (slot < 0 || slot >= e820.nr_map)
475+ return -1;
476+ for (i = slot; i < e820.nr_map; i++) {
477+ if (e820.map[i].type != E820_RAM)
478+ continue;
479+ break;
480+ }
481+ if (i == e820.nr_map || e820.map[i].addr > (max_pfn << PAGE_SHIFT))
482+ return -1;
483+ *addr = e820.map[i].addr;
484+ *size = min_t(u64, e820.map[i].size + e820.map[i].addr,
485+ max_pfn << PAGE_SHIFT) - *addr;
486+ return i + 1;
487+}
82094b55
AF
488--- sle11-2009-10-16.orig/arch/x86/kernel/early_printk-xen.c 2009-02-16 16:17:21.000000000 +0100
489+++ sle11-2009-10-16/arch/x86/kernel/early_printk-xen.c 2009-09-24 10:27:18.000000000 +0200
490@@ -6,15 +6,10 @@
2cb7cef9
BS
491 #include <asm/io.h>
492 #include <asm/processor.h>
493 #include <asm/fcntl.h>
82094b55
AF
494-
495-/* Simple VGA output */
2cb7cef9
BS
496-
497-#ifdef __i386__
82094b55 498 #include <asm/setup.h>
2cb7cef9
BS
499-#else
500-#include <asm/bootsetup.h>
501-#endif
82094b55
AF
502+
503 #ifndef CONFIG_XEN
504+/* Simple VGA output */
2cb7cef9
BS
505 #define VGABASE (__ISA_IO_base + 0xb8000)
506
82094b55 507 static int max_ypos = 25, max_xpos = 80;
2cb7cef9
BS
508@@ -264,10 +259,10 @@ static int __init setup_early_printk(cha
509 early_console = &early_serial_console;
510 } else if (!strncmp(buf, "vga", 3)) {
511 #ifndef CONFIG_XEN
512- && SCREEN_INFO.orig_video_isVGA == 1) {
513- max_xpos = SCREEN_INFO.orig_video_cols;
514- max_ypos = SCREEN_INFO.orig_video_lines;
515- current_ypos = SCREEN_INFO.orig_y;
516+ && boot_params.screen_info.orig_video_isVGA == 1) {
517+ max_xpos = boot_params.screen_info.orig_video_cols;
518+ max_ypos = boot_params.screen_info.orig_video_lines;
519+ current_ypos = boot_params.screen_info.orig_y;
520 #endif
521 early_console = &early_vga_console;
522 } else if (!strncmp(buf, "simnow", 6)) {
82094b55
AF
523--- sle11-2009-10-16.orig/arch/x86/kernel/entry_32-xen.S 2009-02-16 16:17:21.000000000 +0100
524+++ sle11-2009-10-16/arch/x86/kernel/entry_32-xen.S 2009-05-14 11:18:18.000000000 +0200
2cb7cef9
BS
525@@ -254,6 +254,7 @@ check_userspace:
526 jb resume_kernel # not returning to v8086 or userspace
527
528 ENTRY(resume_userspace)
529+ LOCKDEP_SYS_EXIT
530 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
531 # setting need_resched or sigpending
532 # between sampling and the iret
533@@ -341,6 +342,7 @@ sysenter_past_esp:
534 jae syscall_badsys
535 call *sys_call_table(,%eax,4)
536 movl %eax,PT_EAX(%esp)
537+ LOCKDEP_SYS_EXIT
538 DISABLE_INTERRUPTS(CLBR_ANY)
539 TRACE_IRQS_OFF
540 movl TI_flags(%ebp), %ecx
541@@ -406,6 +408,7 @@ syscall_call:
542 call *sys_call_table(,%eax,4)
543 movl %eax,PT_EAX(%esp) # store the return value
544 syscall_exit:
545+ LOCKDEP_SYS_EXIT
546 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
547 # setting need_resched or sigpending
548 # between sampling and the iret
549@@ -478,7 +481,7 @@ ldt_ss:
550 * is still available to implement the setting of the high
551 * 16-bits in the INTERRUPT_RETURN paravirt-op.
552 */
553- cmpl $0, paravirt_ops+PARAVIRT_enabled
554+ cmpl $0, pv_info+PARAVIRT_enabled
555 jne restore_nocheck
556 #endif
557
558@@ -540,6 +543,7 @@ work_pending:
559 jz work_notifysig
560 work_resched:
561 call schedule
562+ LOCKDEP_SYS_EXIT
563 DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
564 # setting need_resched or sigpending
565 # between sampling and the iret
566@@ -1268,6 +1272,6 @@ ENTRY(kernel_thread_helper)
567 ENDPROC(kernel_thread_helper)
568
569 .section .rodata,"a"
570-#include "syscall_table.S"
571+#include "syscall_table_32.S"
572
573 syscall_table_size=(.-sys_call_table)
82094b55
AF
574--- sle11-2009-10-16.orig/arch/x86/kernel/entry_64-xen.S 2009-02-16 16:17:21.000000000 +0100
575+++ sle11-2009-10-16/arch/x86/kernel/entry_64-xen.S 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
576@@ -57,7 +57,7 @@
577 #include <xen/interface/arch-x86_64.h>
578 #include <xen/interface/features.h>
579
580-#include "xen_entry.S"
581+#include "xen_entry_64.S"
582
583 .code64
584
585@@ -275,6 +275,7 @@ ret_from_sys_call:
586 movl $_TIF_ALLWORK_MASK,%edi
587 /* edi: flagmask */
588 sysret_check:
589+ LOCKDEP_SYS_EXIT
590 GET_THREAD_INFO(%rcx)
591 XEN_BLOCK_EVENTS(%rsi)
592 TRACE_IRQS_OFF
593@@ -365,6 +366,7 @@ int_ret_from_sys_call:
594 movl $_TIF_ALLWORK_MASK,%edi
595 /* edi: mask to check */
596 int_with_check:
597+ LOCKDEP_SYS_EXIT_IRQ
598 GET_THREAD_INFO(%rcx)
599 movl threadinfo_flags(%rcx),%edx
600 andl %edi,%edx
601@@ -516,11 +518,12 @@ END(stub_rt_sigreturn)
602
603 retint_check:
604 CFI_DEFAULT_STACK adj=1
605+ LOCKDEP_SYS_EXIT_IRQ
606 movl threadinfo_flags(%rcx),%edx
607 andl %edi,%edx
608 CFI_REMEMBER_STATE
609 jnz retint_careful
610-retint_restore_args:
611+retint_restore_args: /* return to kernel space */
612 movl EFLAGS-REST_SKIP(%rsp), %eax
613 shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
614 XEN_GET_VCPU_INFO(%rsi)
615@@ -841,7 +844,7 @@ error_call_handler:
616 movq ORIG_RAX(%rsp),%rsi # get error code
617 movq $-1,ORIG_RAX(%rsp)
618 call *%rax
619-error_exit:
620+error_exit:
621 RESTORE_REST
622 /* cli */
623 XEN_BLOCK_EVENTS(%rsi)
624@@ -849,14 +852,11 @@ error_exit:
625 GET_THREAD_INFO(%rcx)
626 testb $3,CS-ARGOFFSET(%rsp)
627 jz retint_kernel
628+ LOCKDEP_SYS_EXIT_IRQ
629 movl threadinfo_flags(%rcx),%edx
630 movl $_TIF_WORK_MASK,%edi
631 andl %edi,%edx
632 jnz retint_careful
633- /*
634- * The iret might restore flags:
635- */
636- TRACE_IRQS_IRETQ
637 jmp retint_restore_args
638
639 #if 0
640@@ -1071,7 +1071,7 @@ child_rip:
641 movq %rsi, %rdi
642 call *%rax
643 # exit
644- xorl %edi, %edi
645+ mov %eax, %edi
646 call do_exit
647 CFI_ENDPROC
648 ENDPROC(child_rip)
82094b55
AF
649--- sle11-2009-10-16.orig/arch/x86/kernel/genapic_64-xen.c 2008-12-15 11:27:22.000000000 +0100
650+++ sle11-2009-10-16/arch/x86/kernel/genapic_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
651@@ -24,12 +24,21 @@
652 #include <acpi/acpi_bus.h>
653 #endif
654
655-/* which logical CPU number maps to which CPU (physical APIC ID) */
656-u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly
657+/*
658+ * which logical CPU number maps to which CPU (physical APIC ID)
659+ *
660+ * The following static array is used during kernel startup
661+ * and the x86_cpu_to_apicid_ptr contains the address of the
662+ * array during this time. Is it zeroed when the per_cpu
663+ * data area is removed.
664+ */
665+#ifndef CONFIG_XEN
666+u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata
667 = { [0 ... NR_CPUS-1] = BAD_APICID };
668-EXPORT_SYMBOL(x86_cpu_to_apicid);
669-
670-u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
671+void *x86_cpu_to_apicid_ptr;
672+#endif
673+DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
674+EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
675
676 #ifndef CONFIG_XEN
677 struct genapic __read_mostly *genapic = &apic_flat;
82094b55
AF
678--- sle11-2009-10-16.orig/arch/x86/kernel/head64-xen.c 2009-02-16 16:17:21.000000000 +0100
679+++ sle11-2009-10-16/arch/x86/kernel/head64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
680@@ -1,5 +1,5 @@
681 /*
682- * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
683+ * prepare to run common code
684 *
685 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
686 *
687@@ -21,7 +21,6 @@
688 #include <asm/processor.h>
689 #include <asm/proto.h>
690 #include <asm/smp.h>
691-#include <asm/bootsetup.h>
692 #include <asm/setup.h>
693 #include <asm/desc.h>
694 #include <asm/pgtable.h>
695@@ -47,27 +46,16 @@ static void __init clear_bss(void)
696 }
697 #endif
698
699-#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
700-#define OLD_CL_MAGIC_ADDR 0x20
701-#define OLD_CL_MAGIC 0xA33F
702-#define OLD_CL_OFFSET 0x22
703-
704 static void __init copy_bootdata(char *real_mode_data)
705 {
706 #ifndef CONFIG_XEN
707- unsigned long new_data;
708 char * command_line;
709
710- memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
711- new_data = *(u32 *) (x86_boot_params + NEW_CL_POINTER);
712- if (!new_data) {
713- if (OLD_CL_MAGIC != *(u16 *)(real_mode_data + OLD_CL_MAGIC_ADDR)) {
714- return;
715- }
716- new_data = __pa(real_mode_data) + *(u16 *)(real_mode_data + OLD_CL_OFFSET);
717+ memcpy(&boot_params, real_mode_data, sizeof boot_params);
718+ if (boot_params.hdr.cmd_line_ptr) {
719+ command_line = __va(boot_params.hdr.cmd_line_ptr);
720+ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
721 }
722- command_line = __va(new_data);
723- memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
724 #else
725 int max_cmdline;
726
727@@ -117,7 +105,7 @@ void __init x86_64_start_kernel(char * r
728
729 for (i = 0; i < IDT_ENTRIES; i++)
730 set_intr_gate(i, early_idt_handler);
731- asm volatile("lidt %0" :: "m" (idt_descr));
732+ load_idt((const struct desc_ptr *)&idt_descr);
733 #endif
734
735 early_printk("Kernel alive\n");
82094b55
AF
736--- sle11-2009-10-16.orig/arch/x86/kernel/init_task-xen.c 2009-02-16 16:17:21.000000000 +0100
737+++ sle11-2009-10-16/arch/x86/kernel/init_task-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
738@@ -14,11 +14,11 @@ static struct fs_struct init_fs = INIT_F
739 static struct files_struct init_files = INIT_FILES;
740 static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
741 static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
742-
743+#ifdef CONFIG_X86_XEN
744 #define swapper_pg_dir ((pgd_t *)NULL)
745+#endif
746 struct mm_struct init_mm = INIT_MM(init_mm);
747 #undef swapper_pg_dir
748-
749 EXPORT_SYMBOL(init_mm);
750
751 /*
752@@ -28,7 +28,7 @@ EXPORT_SYMBOL(init_mm);
753 * way process stacks are handled. This is done by having a special
754 * "init_task" linker map entry..
755 */
756-union thread_union init_thread_union
757+union thread_union init_thread_union
758 __attribute__((__section__(".data.init_task"))) =
759 { INIT_THREAD_INFO(init_task) };
760
761@@ -38,14 +38,15 @@ union thread_union init_thread_union
762 * All other task structs will be allocated on slabs in fork.c
763 */
764 struct task_struct init_task = INIT_TASK(init_task);
765-
766 EXPORT_SYMBOL(init_task);
767
768 #ifndef CONFIG_X86_NO_TSS
769 /*
770- * per-CPU TSS segments. Threads are completely 'soft' on Linux,
771- * no more per-task TSS's.
772- */
773+ * no more per-task TSS's. The TSS size is kept cacheline-aligned
774+ * so they are allowed to end up in the .data.cacheline_aligned
775+ * section. Since TSS's are completely CPU-local, we want them
776+ * on exact cacheline boundaries, to eliminate cacheline ping-pong.
777+ */
778 DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
779 #endif
780
82094b55
AF
781--- sle11-2009-10-16.orig/arch/x86/kernel/io_apic_32-xen.c 2009-02-16 16:17:21.000000000 +0100
782+++ sle11-2009-10-16/arch/x86/kernel/io_apic_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
783@@ -427,7 +427,7 @@ static struct irq_cpu_info {
784
785 #define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
786
787-#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
788+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i)))
789
790 static cpumask_t balance_irq_affinity[NR_IRQS] = {
791 [0 ... NR_IRQS-1] = CPU_MASK_ALL
792@@ -633,7 +633,7 @@ tryanotherirq:
793
794 imbalance = move_this_load;
795
796- /* For physical_balance case, we accumlated both load
797+ /* For physical_balance case, we accumulated both load
798 * values in the one of the siblings cpu_irq[],
799 * to use the same code for physical and logical processors
800 * as much as possible.
801@@ -647,7 +647,7 @@ tryanotherirq:
802 * (A+B)/2 vs B
803 */
804 load = CPU_IRQ(min_loaded) >> 1;
805- for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
806+ for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) {
807 if (load > CPU_IRQ(j)) {
808 /* This won't change cpu_sibling_map[min_loaded] */
809 load = CPU_IRQ(j);
810@@ -1018,7 +1018,7 @@ static int EISA_ELCR(unsigned int irq)
811 #define default_MCA_trigger(idx) (1)
812 #define default_MCA_polarity(idx) (0)
813
814-static int __init MPBIOS_polarity(int idx)
815+static int MPBIOS_polarity(int idx)
816 {
817 int bus = mp_irqs[idx].mpc_srcbus;
818 int polarity;
819@@ -1347,6 +1347,11 @@ static void __init setup_IO_APIC_irqs(vo
820 continue;
821 }
822
823+ if (!first_notcon) {
824+ apic_printk(APIC_VERBOSE, " not connected.\n");
825+ first_notcon = 1;
826+ }
827+
828 entry.trigger = irq_trigger(idx);
829 entry.polarity = irq_polarity(idx);
830
831@@ -1936,13 +1941,16 @@ __setup("no_timer_check", notimercheck);
832 static int __init timer_irq_works(void)
833 {
834 unsigned long t1 = jiffies;
835+ unsigned long flags;
836
837 if (no_timer_check)
838 return 1;
839
840+ local_save_flags(flags);
841 local_irq_enable();
842 /* Let ten ticks pass... */
843 mdelay((10 * 1000) / HZ);
844+ local_irq_restore(flags);
845
846 /*
847 * Expect a few ticks at least, to be sure some possible
848@@ -2223,6 +2231,9 @@ static inline void __init check_timer(vo
849 {
850 int apic1, pin1, apic2, pin2;
851 int vector;
852+ unsigned long flags;
853+
854+ local_irq_save(flags);
855
856 /*
857 * get/set the timer IRQ vector:
858@@ -2268,7 +2279,7 @@ static inline void __init check_timer(vo
859 }
860 if (disable_timer_pin_1 > 0)
861 clear_IO_APIC_pin(0, pin1);
862- return;
863+ goto out;
864 }
865 clear_IO_APIC_pin(apic1, pin1);
866 printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
867@@ -2291,7 +2302,7 @@ static inline void __init check_timer(vo
868 if (nmi_watchdog == NMI_IO_APIC) {
869 setup_nmi();
870 }
871- return;
872+ goto out;
873 }
874 /*
875 * Cleanup, just in case ...
876@@ -2315,7 +2326,7 @@ static inline void __init check_timer(vo
877
878 if (timer_irq_works()) {
879 printk(" works.\n");
880- return;
881+ goto out;
882 }
883 apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
884 printk(" failed.\n");
885@@ -2331,11 +2342,13 @@ static inline void __init check_timer(vo
886
887 if (timer_irq_works()) {
888 printk(" works.\n");
889- return;
890+ goto out;
891 }
892 printk(" failed :(.\n");
893 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
894 "report. Then try booting with the 'noapic' option");
895+out:
896+ local_irq_restore(flags);
897 }
898 #else
899 int timer_uses_ioapic_pin_0 = 0;
900@@ -2353,6 +2366,14 @@ int timer_uses_ioapic_pin_0 = 0;
901
902 void __init setup_IO_APIC(void)
903 {
904+#ifndef CONFIG_XEN
905+ int i;
906+
907+ /* Reserve all the system vectors. */
908+ for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++)
909+ set_bit(i, used_vectors);
910+#endif
911+
912 enable_IO_APIC();
913
914 if (acpi_ioapic)
915@@ -2542,7 +2563,7 @@ void destroy_irq(unsigned int irq)
916 #endif /* CONFIG_XEN */
917
918 /*
919- * MSI mesage composition
920+ * MSI message composition
921 */
922 #if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN)
923 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
924@@ -2899,6 +2920,25 @@ int io_apic_set_pci_routing (int ioapic,
925 return 0;
926 }
927
928+int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
929+{
930+ int i;
931+
932+ if (skip_ioapic_setup)
933+ return -1;
934+
935+ for (i = 0; i < mp_irq_entries; i++)
936+ if (mp_irqs[i].mpc_irqtype == mp_INT &&
937+ mp_irqs[i].mpc_srcbusirq == bus_irq)
938+ break;
939+ if (i >= mp_irq_entries)
940+ return -1;
941+
942+ *trigger = irq_trigger(i);
943+ *polarity = irq_polarity(i);
944+ return 0;
945+}
946+
947 #endif /* CONFIG_ACPI */
948
949 static int __init parse_disable_timer_pin_1(char *arg)
82094b55
AF
950--- sle11-2009-10-16.orig/arch/x86/kernel/io_apic_64-xen.c 2009-02-16 16:17:21.000000000 +0100
951+++ sle11-2009-10-16/arch/x86/kernel/io_apic_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
952@@ -31,6 +31,7 @@
953 #include <linux/sysdev.h>
954 #include <linux/msi.h>
955 #include <linux/htirq.h>
956+#include <linux/dmar.h>
957 #ifdef CONFIG_ACPI
958 #include <acpi/acpi_bus.h>
959 #endif
960@@ -584,7 +585,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
961 #define default_PCI_trigger(idx) (1)
962 #define default_PCI_polarity(idx) (1)
963
964-static int __init MPBIOS_polarity(int idx)
965+static int MPBIOS_polarity(int idx)
966 {
967 int bus = mp_irqs[idx].mpc_srcbus;
968 int polarity;
969@@ -871,6 +872,10 @@ static void __init setup_IO_APIC_irqs(vo
970 apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
971 continue;
972 }
973+ if (!first_notcon) {
974+ apic_printk(APIC_VERBOSE, " not connected.\n");
975+ first_notcon = 1;
976+ }
977
978 irq = pin_2_irq(idx, apic, pin);
979 add_pin_to_irq(irq, apic, pin);
980@@ -881,7 +886,7 @@ static void __init setup_IO_APIC_irqs(vo
981 }
982
983 if (!first_notcon)
984- apic_printk(APIC_VERBOSE," not connected.\n");
985+ apic_printk(APIC_VERBOSE, " not connected.\n");
986 }
987
988 #ifndef CONFIG_XEN
989@@ -1277,10 +1282,13 @@ void disable_IO_APIC(void)
990 static int __init timer_irq_works(void)
991 {
992 unsigned long t1 = jiffies;
993+ unsigned long flags;
994
995+ local_save_flags(flags);
996 local_irq_enable();
997 /* Let ten ticks pass... */
998 mdelay((10 * 1000) / HZ);
999+ local_irq_restore(flags);
1000
1001 /*
1002 * Expect a few ticks at least, to be sure some possible
1003@@ -1655,6 +1663,9 @@ static inline void check_timer(void)
1004 {
1005 struct irq_cfg *cfg = irq_cfg + 0;
1006 int apic1, pin1, apic2, pin2;
1007+ unsigned long flags;
1008+
1009+ local_irq_save(flags);
1010
1011 /*
1012 * get/set the timer IRQ vector:
1013@@ -1696,7 +1707,7 @@ static inline void check_timer(void)
1014 }
1015 if (disable_timer_pin_1 > 0)
1016 clear_IO_APIC_pin(0, pin1);
1017- return;
1018+ goto out;
1019 }
1020 clear_IO_APIC_pin(apic1, pin1);
1021 apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
1022@@ -1718,7 +1729,7 @@ static inline void check_timer(void)
1023 if (nmi_watchdog == NMI_IO_APIC) {
1024 setup_nmi();
1025 }
1026- return;
1027+ goto out;
1028 }
1029 /*
1030 * Cleanup, just in case ...
1031@@ -1741,7 +1752,7 @@ static inline void check_timer(void)
1032
1033 if (timer_irq_works()) {
1034 apic_printk(APIC_VERBOSE," works.\n");
1035- return;
1036+ goto out;
1037 }
1038 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
1039 apic_printk(APIC_VERBOSE," failed.\n");
1040@@ -1756,10 +1767,12 @@ static inline void check_timer(void)
1041
1042 if (timer_irq_works()) {
1043 apic_printk(APIC_VERBOSE," works.\n");
1044- return;
1045+ goto out;
1046 }
1047 apic_printk(APIC_VERBOSE," failed :(.\n");
1048 panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
1049+out:
1050+ local_irq_restore(flags);
1051 }
1052 #else
1053 #define check_timer() ((void)0)
1054@@ -1775,7 +1788,7 @@ __setup("no_timer_check", notimercheck);
1055
1056 /*
1057 *
1058- * IRQ's that are handled by the PIC in the MPS IOAPIC case.
1059+ * IRQs that are handled by the PIC in the MPS IOAPIC case.
1060 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
1061 * Linux doesn't really care, as it's not actually used
1062 * for any interrupt handling anyway.
1063@@ -1858,7 +1871,7 @@ static struct sysdev_class ioapic_sysdev
1064 static int __init ioapic_init_sysfs(void)
1065 {
1066 struct sys_device * dev;
1067- int i, size, error = 0;
1068+ int i, size, error;
1069
1070 error = sysdev_class_register(&ioapic_sysdev_class);
1071 if (error)
1072@@ -1867,12 +1880,11 @@ static int __init ioapic_init_sysfs(void
1073 for (i = 0; i < nr_ioapics; i++ ) {
1074 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
1075 * sizeof(struct IO_APIC_route_entry);
1076- mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
1077+ mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
1078 if (!mp_ioapic_data[i]) {
1079 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
1080 continue;
1081 }
1082- memset(mp_ioapic_data[i], 0, size);
1083 dev = &mp_ioapic_data[i]->dev;
1084 dev->id = i;
1085 dev->cls = &ioapic_sysdev_class;
1086@@ -1933,7 +1945,7 @@ void destroy_irq(unsigned int irq)
1087 #endif /* CONFIG_XEN */
1088
1089 /*
1090- * MSI mesage composition
1091+ * MSI message composition
1092 */
1093 #if defined(CONFIG_PCI_MSI) && !defined(CONFIG_XEN)
1094 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
1095@@ -2043,8 +2055,64 @@ void arch_teardown_msi_irq(unsigned int
1096 destroy_irq(irq);
1097 }
1098
1099-#endif /* CONFIG_PCI_MSI */
1100+#ifdef CONFIG_DMAR
1101+#ifdef CONFIG_SMP
1102+static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
1103+{
1104+ struct irq_cfg *cfg = irq_cfg + irq;
1105+ struct msi_msg msg;
1106+ unsigned int dest;
1107+ cpumask_t tmp;
1108+
1109+ cpus_and(tmp, mask, cpu_online_map);
1110+ if (cpus_empty(tmp))
1111+ return;
1112+
1113+ if (assign_irq_vector(irq, mask))
1114+ return;
1115+
1116+ cpus_and(tmp, cfg->domain, mask);
1117+ dest = cpu_mask_to_apicid(tmp);
1118+
1119+ dmar_msi_read(irq, &msg);
1120+
1121+ msg.data &= ~MSI_DATA_VECTOR_MASK;
1122+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
1123+ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
1124+ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
1125+
1126+ dmar_msi_write(irq, &msg);
1127+ irq_desc[irq].affinity = mask;
1128+}
1129+#endif /* CONFIG_SMP */
1130+
1131+struct irq_chip dmar_msi_type = {
1132+ .name = "DMAR_MSI",
1133+ .unmask = dmar_msi_unmask,
1134+ .mask = dmar_msi_mask,
1135+ .ack = ack_apic_edge,
1136+#ifdef CONFIG_SMP
1137+ .set_affinity = dmar_msi_set_affinity,
1138+#endif
1139+ .retrigger = ioapic_retrigger_irq,
1140+};
1141+
1142+int arch_setup_dmar_msi(unsigned int irq)
1143+{
1144+ int ret;
1145+ struct msi_msg msg;
1146+
1147+ ret = msi_compose_msg(NULL, irq, &msg);
1148+ if (ret < 0)
1149+ return ret;
1150+ dmar_msi_write(irq, &msg);
1151+ set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
1152+ "edge");
1153+ return 0;
1154+}
1155+#endif
1156
1157+#endif /* CONFIG_PCI_MSI */
1158 /*
1159 * Hypertransport interrupt support
1160 */
1161@@ -2177,8 +2245,27 @@ int io_apic_set_pci_routing (int ioapic,
1162 return 0;
1163 }
1164
1165-#endif /* CONFIG_ACPI */
1166
1167+int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
1168+{
1169+ int i;
1170+
1171+ if (skip_ioapic_setup)
1172+ return -1;
1173+
1174+ for (i = 0; i < mp_irq_entries; i++)
1175+ if (mp_irqs[i].mpc_irqtype == mp_INT &&
1176+ mp_irqs[i].mpc_srcbusirq == bus_irq)
1177+ break;
1178+ if (i >= mp_irq_entries)
1179+ return -1;
1180+
1181+ *trigger = irq_trigger(i);
1182+ *polarity = irq_polarity(i);
1183+ return 0;
1184+}
1185+
1186+#endif /* CONFIG_ACPI */
1187
1188 #ifndef CONFIG_XEN
1189 /*
1190@@ -2217,3 +2304,4 @@ void __init setup_ioapic_dest(void)
1191 }
1192 #endif
1193 #endif /* !CONFIG_XEN */
1194+
82094b55
AF
1195--- sle11-2009-10-16.orig/arch/x86/kernel/ioport_32-xen.c 2008-12-15 11:27:22.000000000 +0100
1196+++ sle11-2009-10-16/arch/x86/kernel/ioport_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1197@@ -1,6 +1,4 @@
1198 /*
1199- * linux/arch/i386/kernel/ioport.c
1200- *
1201 * This contains the io-permission bitmap code - written by obz, with changes
1202 * by Linus.
1203 */
82094b55
AF
1204--- sle11-2009-10-16.orig/arch/x86/kernel/ioport_64-xen.c 2008-12-15 11:27:22.000000000 +0100
1205+++ sle11-2009-10-16/arch/x86/kernel/ioport_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1206@@ -1,6 +1,4 @@
1207 /*
1208- * linux/arch/x86_64/kernel/ioport.c
1209- *
1210 * This contains the io-permission bitmap code - written by obz, with changes
1211 * by Linus.
1212 */
82094b55
AF
1213--- sle11-2009-10-16.orig/arch/x86/kernel/irq_32-xen.c 2009-02-16 16:17:21.000000000 +0100
1214+++ sle11-2009-10-16/arch/x86/kernel/irq_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1215@@ -1,6 +1,4 @@
1216 /*
1217- * linux/arch/i386/kernel/irq.c
1218- *
1219 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
1220 *
1221 * This file contains the lowest level x86-specific interrupt
1222@@ -231,8 +229,6 @@ asmlinkage void do_softirq(void)
1223
1224 local_irq_restore(flags);
1225 }
1226-
1227-EXPORT_SYMBOL(do_softirq);
1228 #endif
1229
1230 /*
1231@@ -259,9 +255,17 @@ int show_interrupts(struct seq_file *p,
1232 }
1233
1234 if (i < NR_IRQS) {
1235+ unsigned any_count = 0;
1236+
1237 spin_lock_irqsave(&irq_desc[i].lock, flags);
1238+#ifndef CONFIG_SMP
1239+ any_count = kstat_irqs(i);
1240+#else
1241+ for_each_online_cpu(j)
1242+ any_count |= kstat_cpu(j).irqs[i];
1243+#endif
1244 action = irq_desc[i].action;
1245- if (!action)
1246+ if (!action && !any_count)
1247 goto skip;
1248 seq_printf(p, "%3d: ",i);
1249 #ifndef CONFIG_SMP
1250@@ -272,10 +276,12 @@ int show_interrupts(struct seq_file *p,
1251 #endif
1252 seq_printf(p, " %8s", irq_desc[i].chip->name);
1253 seq_printf(p, "-%-8s", irq_desc[i].name);
1254- seq_printf(p, " %s", action->name);
1255
1256- for (action=action->next; action; action = action->next)
1257- seq_printf(p, ", %s", action->name);
1258+ if (action) {
1259+ seq_printf(p, " %s", action->name);
1260+ while ((action = action->next) != NULL)
1261+ seq_printf(p, ", %s", action->name);
1262+ }
1263
1264 seq_putc(p, '\n');
1265 skip:
1266@@ -284,13 +290,46 @@ skip:
1267 seq_printf(p, "NMI: ");
1268 for_each_online_cpu(j)
1269 seq_printf(p, "%10u ", nmi_count(j));
1270- seq_putc(p, '\n');
1271+ seq_printf(p, " Non-maskable interrupts\n");
1272 #ifdef CONFIG_X86_LOCAL_APIC
1273 seq_printf(p, "LOC: ");
1274 for_each_online_cpu(j)
1275 seq_printf(p, "%10u ",
1276 per_cpu(irq_stat,j).apic_timer_irqs);
1277- seq_putc(p, '\n');
1278+ seq_printf(p, " Local timer interrupts\n");
1279+#endif
1280+#ifdef CONFIG_SMP
1281+ seq_printf(p, "RES: ");
1282+ for_each_online_cpu(j)
1283+ seq_printf(p, "%10u ",
1284+ per_cpu(irq_stat,j).irq_resched_count);
1285+ seq_printf(p, " Rescheduling interrupts\n");
1286+ seq_printf(p, "CAL: ");
1287+ for_each_online_cpu(j)
1288+ seq_printf(p, "%10u ",
1289+ per_cpu(irq_stat,j).irq_call_count);
1290+ seq_printf(p, " function call interrupts\n");
1291+#ifndef CONFIG_XEN
1292+ seq_printf(p, "TLB: ");
1293+ for_each_online_cpu(j)
1294+ seq_printf(p, "%10u ",
1295+ per_cpu(irq_stat,j).irq_tlb_count);
1296+ seq_printf(p, " TLB shootdowns\n");
1297+#endif
1298+#endif
1299+#ifdef CONFIG_X86_MCE
1300+ seq_printf(p, "TRM: ");
1301+ for_each_online_cpu(j)
1302+ seq_printf(p, "%10u ",
1303+ per_cpu(irq_stat,j).irq_thermal_count);
1304+ seq_printf(p, " Thermal event interrupts\n");
1305+#endif
1306+#ifdef CONFIG_X86_LOCAL_APIC
1307+ seq_printf(p, "SPU: ");
1308+ for_each_online_cpu(j)
1309+ seq_printf(p, "%10u ",
1310+ per_cpu(irq_stat,j).irq_spurious_count);
1311+ seq_printf(p, " Spurious interrupts\n");
1312 #endif
1313 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
1314 #if defined(CONFIG_X86_IO_APIC)
82094b55
AF
1315--- sle11-2009-10-16.orig/arch/x86/kernel/irq_64-xen.c 2008-12-15 11:27:22.000000000 +0100
1316+++ sle11-2009-10-16/arch/x86/kernel/irq_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1317@@ -1,6 +1,4 @@
1318 /*
1319- * linux/arch/x86_64/kernel/irq.c
1320- *
1321 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
1322 *
1323 * This file contains the lowest level x86_64-specific interrupt
1324@@ -64,9 +62,17 @@ int show_interrupts(struct seq_file *p,
1325 }
1326
1327 if (i < NR_IRQS) {
1328+ unsigned any_count = 0;
1329+
1330 spin_lock_irqsave(&irq_desc[i].lock, flags);
1331+#ifndef CONFIG_SMP
1332+ any_count = kstat_irqs(i);
1333+#else
1334+ for_each_online_cpu(j)
1335+ any_count |= kstat_cpu(j).irqs[i];
1336+#endif
1337 action = irq_desc[i].action;
1338- if (!action)
1339+ if (!action && !any_count)
1340 goto skip;
1341 seq_printf(p, "%3d: ",i);
1342 #ifndef CONFIG_SMP
1343@@ -78,9 +84,11 @@ int show_interrupts(struct seq_file *p,
1344 seq_printf(p, " %8s", irq_desc[i].chip->name);
1345 seq_printf(p, "-%-8s", irq_desc[i].name);
1346
1347- seq_printf(p, " %s", action->name);
1348- for (action=action->next; action; action = action->next)
1349- seq_printf(p, ", %s", action->name);
1350+ if (action) {
1351+ seq_printf(p, " %s", action->name);
1352+ while ((action = action->next) != NULL)
1353+ seq_printf(p, ", %s", action->name);
1354+ }
1355 seq_putc(p, '\n');
1356 skip:
1357 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
1358@@ -88,12 +96,44 @@ skip:
1359 seq_printf(p, "NMI: ");
1360 for_each_online_cpu(j)
1361 seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
1362- seq_putc(p, '\n');
1363+ seq_printf(p, " Non-maskable interrupts\n");
1364 #ifdef CONFIG_X86_LOCAL_APIC
1365 seq_printf(p, "LOC: ");
1366 for_each_online_cpu(j)
1367 seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
1368- seq_putc(p, '\n');
1369+ seq_printf(p, " Local timer interrupts\n");
1370+#endif
1371+#ifdef CONFIG_SMP
1372+ seq_printf(p, "RES: ");
1373+ for_each_online_cpu(j)
1374+ seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count);
1375+ seq_printf(p, " Rescheduling interrupts\n");
1376+ seq_printf(p, "CAL: ");
1377+ for_each_online_cpu(j)
1378+ seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
1379+ seq_printf(p, " function call interrupts\n");
1380+#ifndef CONFIG_XEN
1381+ seq_printf(p, "TLB: ");
1382+ for_each_online_cpu(j)
1383+ seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
1384+ seq_printf(p, " TLB shootdowns\n");
1385+#endif
1386+#endif
1387+#ifdef CONFIG_X86_MCE
1388+ seq_printf(p, "TRM: ");
1389+ for_each_online_cpu(j)
1390+ seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count);
1391+ seq_printf(p, " Thermal event interrupts\n");
1392+ seq_printf(p, "THR: ");
1393+ for_each_online_cpu(j)
1394+ seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count);
1395+ seq_printf(p, " Threshold APIC interrupts\n");
1396+#endif
1397+#ifdef CONFIG_X86_LOCAL_APIC
1398+ seq_printf(p, "SPU: ");
1399+ for_each_online_cpu(j)
1400+ seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count);
1401+ seq_printf(p, " Spurious interrupts\n");
1402 #endif
1403 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
1404 }
1405@@ -211,7 +251,6 @@ asmlinkage void do_softirq(void)
1406 }
1407 local_irq_restore(flags);
1408 }
1409-EXPORT_SYMBOL(do_softirq);
1410
1411 #ifndef CONFIG_X86_LOCAL_APIC
1412 /*
82094b55
AF
1413--- sle11-2009-10-16.orig/arch/x86/kernel/ldt_32-xen.c 2008-12-15 11:27:22.000000000 +0100
1414+++ sle11-2009-10-16/arch/x86/kernel/ldt_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1415@@ -1,6 +1,4 @@
1416 /*
1417- * linux/arch/i386/kernel/ldt.c
1418- *
1419 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
1420 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
1421 */
1422@@ -106,14 +104,14 @@ int init_new_context(struct task_struct
1423 struct mm_struct * old_mm;
1424 int retval = 0;
1425
1426- init_MUTEX(&mm->context.sem);
1427+ mutex_init(&mm->context.lock);
1428 mm->context.size = 0;
1429 mm->context.has_foreign_mappings = 0;
1430 old_mm = current->mm;
1431 if (old_mm && old_mm->context.size > 0) {
1432- down(&old_mm->context.sem);
1433+ mutex_lock(&old_mm->context.lock);
1434 retval = copy_ldt(&mm->context, &old_mm->context);
1435- up(&old_mm->context.sem);
1436+ mutex_unlock(&old_mm->context.lock);
1437 }
1438 return retval;
1439 }
1440@@ -149,7 +147,7 @@ static int read_ldt(void __user * ptr, u
1441 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
1442 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
1443
1444- down(&mm->context.sem);
1445+ mutex_lock(&mm->context.lock);
1446 size = mm->context.size*LDT_ENTRY_SIZE;
1447 if (size > bytecount)
1448 size = bytecount;
1449@@ -157,7 +155,7 @@ static int read_ldt(void __user * ptr, u
1450 err = 0;
1451 if (copy_to_user(ptr, mm->context.ldt, size))
1452 err = -EFAULT;
1453- up(&mm->context.sem);
1454+ mutex_unlock(&mm->context.lock);
1455 if (err < 0)
1456 goto error_return;
1457 if (size != bytecount) {
1458@@ -213,7 +211,7 @@ static int write_ldt(void __user * ptr,
1459 goto out;
1460 }
1461
1462- down(&mm->context.sem);
1463+ mutex_lock(&mm->context.lock);
1464 if (ldt_info.entry_number >= mm->context.size) {
1465 error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
1466 if (error < 0)
1467@@ -240,7 +238,7 @@ install:
1468 entry_1, entry_2);
1469
1470 out_unlock:
1471- up(&mm->context.sem);
1472+ mutex_unlock(&mm->context.lock);
1473 out:
1474 return error;
1475 }
82094b55
AF
1476--- sle11-2009-10-16.orig/arch/x86/kernel/ldt_64-xen.c 2009-02-16 16:17:21.000000000 +0100
1477+++ sle11-2009-10-16/arch/x86/kernel/ldt_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1478@@ -1,6 +1,4 @@
1479 /*
1480- * linux/arch/x86_64/kernel/ldt.c
1481- *
1482 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
1483 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
1484 * Copyright (C) 2002 Andi Kleen
1485@@ -112,19 +110,14 @@ int init_new_context(struct task_struct
1486 int retval = 0;
1487
1488 memset(&mm->context, 0, sizeof(mm->context));
1489- init_MUTEX(&mm->context.sem);
1490+ mutex_init(&mm->context.lock);
1491 old_mm = current->mm;
1492 if (old_mm)
1493 mm->context.vdso = old_mm->context.vdso;
1494 if (old_mm && old_mm->context.size > 0) {
1495- down(&old_mm->context.sem);
1496+ mutex_lock(&old_mm->context.lock);
1497 retval = copy_ldt(&mm->context, &old_mm->context);
1498- up(&old_mm->context.sem);
1499- }
1500- if (retval == 0) {
1501- spin_lock(&mm_unpinned_lock);
1502- list_add(&mm->context.unpinned, &mm_unpinned);
1503- spin_unlock(&mm_unpinned_lock);
1504+ mutex_unlock(&old_mm->context.lock);
1505 }
1506 return retval;
1507 }
1508@@ -148,11 +141,6 @@ void destroy_context(struct mm_struct *m
1509 kfree(mm->context.ldt);
1510 mm->context.size = 0;
1511 }
1512- if (!PagePinned(virt_to_page(mm->pgd))) {
1513- spin_lock(&mm_unpinned_lock);
1514- list_del(&mm->context.unpinned);
1515- spin_unlock(&mm_unpinned_lock);
1516- }
1517 }
1518
1519 static int read_ldt(void __user * ptr, unsigned long bytecount)
1520@@ -166,7 +154,7 @@ static int read_ldt(void __user * ptr, u
1521 if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
1522 bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
1523
1524- down(&mm->context.sem);
1525+ mutex_lock(&mm->context.lock);
1526 size = mm->context.size*LDT_ENTRY_SIZE;
1527 if (size > bytecount)
1528 size = bytecount;
1529@@ -174,7 +162,7 @@ static int read_ldt(void __user * ptr, u
1530 err = 0;
1531 if (copy_to_user(ptr, mm->context.ldt, size))
1532 err = -EFAULT;
1533- up(&mm->context.sem);
1534+ mutex_unlock(&mm->context.lock);
1535 if (err < 0)
1536 goto error_return;
1537 if (size != bytecount) {
1538@@ -227,7 +215,7 @@ static int write_ldt(void __user * ptr,
1539 goto out;
1540 }
1541
1542- down(&mm->context.sem);
1543+ mutex_lock(&mm->context.lock);
1544 if (ldt_info.entry_number >= (unsigned)mm->context.size) {
1545 error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
1546 if (error < 0)
1547@@ -256,7 +244,7 @@ install:
1548 error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
1549
1550 out_unlock:
1551- up(&mm->context.sem);
1552+ mutex_unlock(&mm->context.lock);
1553 out:
1554 return error;
1555 }
82094b55
AF
1556--- sle11-2009-10-16.orig/arch/x86/kernel/mpparse_32-xen.c 2008-12-15 11:27:22.000000000 +0100
1557+++ sle11-2009-10-16/arch/x86/kernel/mpparse_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1558@@ -1023,7 +1023,7 @@ void __init mp_config_acpi_legacy_irqs (
1559
1560 /*
1561 * Use the default configuration for the IRQs 0-15. Unless
1562- * overriden by (MADT) interrupt source override entries.
1563+ * overridden by (MADT) interrupt source override entries.
1564 */
1565 for (i = 0; i < 16; i++) {
1566 int idx;
82094b55
AF
1567--- sle11-2009-10-16.orig/arch/x86/kernel/mpparse_64-xen.c 2009-02-16 16:17:21.000000000 +0100
1568+++ sle11-2009-10-16/arch/x86/kernel/mpparse_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1569@@ -57,6 +57,8 @@ unsigned long mp_lapic_addr = 0;
1570
1571 /* Processor that is doing the boot up */
1572 unsigned int boot_cpu_id = -1U;
1573+EXPORT_SYMBOL(boot_cpu_id);
1574+
1575 /* Internal processor count */
1576 unsigned int num_processors __cpuinitdata = 0;
1577
1578@@ -87,7 +89,7 @@ static int __init mpf_checksum(unsigned
1579 }
1580
1581 #ifndef CONFIG_XEN
1582-static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
1583+static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
1584 {
1585 int cpu;
1586 cpumask_t tmp_map;
1587@@ -124,13 +126,24 @@ static void __cpuinit MP_processor_info
1588 cpu = 0;
1589 }
1590 bios_cpu_apicid[cpu] = m->mpc_apicid;
1591- x86_cpu_to_apicid[cpu] = m->mpc_apicid;
1592+ /*
1593+ * We get called early in the the start_kernel initialization
1594+ * process when the per_cpu data area is not yet setup, so we
1595+ * use a static array that is removed after the per_cpu data
1596+ * area is created.
1597+ */
1598+ if (x86_cpu_to_apicid_ptr) {
1599+ u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr;
1600+ x86_cpu_to_apicid[cpu] = m->mpc_apicid;
1601+ } else {
1602+ per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
1603+ }
1604
1605 cpu_set(cpu, cpu_possible_map);
1606 cpu_set(cpu, cpu_present_map);
1607 }
1608 #else
1609-static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
1610+static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
1611 {
1612 num_processors++;
1613 }
82094b55
AF
1614--- sle11-2009-10-16.orig/arch/x86/kernel/pci-dma-xen.c 2009-02-16 16:17:21.000000000 +0100
1615+++ sle11-2009-10-16/arch/x86/kernel/pci-dma-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1616@@ -13,14 +13,13 @@
1617 #include <linux/pci.h>
1618 #include <linux/module.h>
1619 #include <linux/version.h>
1620-#include <linux/pci.h>
1621 #include <asm/io.h>
1622 #include <xen/balloon.h>
1623 #include <xen/gnttab.h>
1624 #include <asm/swiotlb.h>
1625 #include <asm/tlbflush.h>
1626-#include <asm-i386/mach-xen/asm/swiotlb.h>
1627-#include <asm-i386/mach-xen/asm/gnttab_dma.h>
1628+#include <asm/swiotlb_32.h>
1629+#include <asm/gnttab_dma.h>
1630 #include <asm/bug.h>
1631
1632 #ifdef __x86_64__
1633@@ -106,27 +105,29 @@ int range_straddles_page_boundary(paddr_
1634 }
1635
1636 int
1637-dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
1638+dma_map_sg(struct device *hwdev, struct scatterlist *sgl, int nents,
1639 enum dma_data_direction direction)
1640 {
1641 int i, rc;
1642
1643 BUG_ON(!valid_dma_direction(direction));
1644- WARN_ON(nents == 0 || sg[0].length == 0);
1645+ WARN_ON(nents == 0 || sgl->length == 0);
1646
1647 if (swiotlb) {
1648- rc = swiotlb_map_sg(hwdev, sg, nents, direction);
1649+ rc = swiotlb_map_sg(hwdev, sgl, nents, direction);
1650 } else {
1651- for (i = 0; i < nents; i++ ) {
1652- BUG_ON(!sg[i].page);
1653- sg[i].dma_address =
1654- gnttab_dma_map_page(sg[i].page) + sg[i].offset;
1655- sg[i].dma_length = sg[i].length;
1656+ struct scatterlist *sg;
1657+
1658+ for_each_sg(sgl, sg, nents, i) {
1659+ BUG_ON(!sg_page(sg));
1660+ sg->dma_address =
1661+ gnttab_dma_map_page(sg_page(sg)) + sg->offset;
1662+ sg->dma_length = sg->length;
1663 IOMMU_BUG_ON(address_needs_mapping(
1664- hwdev, sg[i].dma_address));
1665+ hwdev, sg->dma_address));
1666 IOMMU_BUG_ON(range_straddles_page_boundary(
1667- page_to_pseudophys(sg[i].page) + sg[i].offset,
1668- sg[i].length));
1669+ page_to_pseudophys(sg_page(sg)) + sg->offset,
1670+ sg->length));
1671 }
1672 rc = nents;
1673 }
1674@@ -137,17 +138,19 @@ dma_map_sg(struct device *hwdev, struct
1675 EXPORT_SYMBOL(dma_map_sg);
1676
1677 void
1678-dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
1679+dma_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nents,
1680 enum dma_data_direction direction)
1681 {
1682 int i;
1683
1684 BUG_ON(!valid_dma_direction(direction));
1685 if (swiotlb)
1686- swiotlb_unmap_sg(hwdev, sg, nents, direction);
1687+ swiotlb_unmap_sg(hwdev, sgl, nents, direction);
1688 else {
1689- for (i = 0; i < nents; i++ )
1690- gnttab_dma_unmap_page(sg[i].dma_address);
1691+ struct scatterlist *sg;
1692+
1693+ for_each_sg(sgl, sg, nents, i)
1694+ gnttab_dma_unmap_page(sg->dma_address);
1695 }
1696 }
1697 EXPORT_SYMBOL(dma_unmap_sg);
1698@@ -261,7 +264,8 @@ void dma_free_coherent(struct device *de
1699 {
1700 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
1701 int order = get_order(size);
1702-
1703+
1704+ WARN_ON(irqs_disabled()); /* for portability */
1705 if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
1706 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
1707
82094b55
AF
1708--- sle11-2009-10-16.orig/arch/x86/kernel/process_32-xen.c 2009-02-16 16:17:21.000000000 +0100
1709+++ sle11-2009-10-16/arch/x86/kernel/process_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1710@@ -1,6 +1,4 @@
1711 /*
1712- * linux/arch/i386/kernel/process.c
1713- *
1714 * Copyright (C) 1995 Linus Torvalds
1715 *
1716 * Pentium III FXSR, SSE support
1717@@ -190,6 +188,10 @@ void cpu_idle(void)
1718 }
1719 }
1720
1721+static void do_nothing(void *unused)
1722+{
1723+}
1724+
1725 void cpu_idle_wait(void)
1726 {
1727 unsigned int cpu, this_cpu = get_cpu();
1728@@ -214,13 +216,20 @@ void cpu_idle_wait(void)
1729 cpu_clear(cpu, map);
1730 }
1731 cpus_and(map, map, cpu_online_map);
1732+ /*
1733+ * We waited 1 sec, if a CPU still did not call idle
1734+ * it may be because it is in idle and not waking up
1735+ * because it has nothing to do.
1736+ * Give all the remaining CPUS a kick.
1737+ */
1738+ smp_call_function_mask(map, do_nothing, 0, 0);
1739 } while (!cpus_empty(map));
1740
1741 set_cpus_allowed(current, tmp);
1742 }
1743 EXPORT_SYMBOL_GPL(cpu_idle_wait);
1744
1745-void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
1746+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
1747 {
1748 }
1749
1750@@ -238,34 +247,52 @@ static int __init idle_setup(char *str)
1751 }
1752 early_param("idle", idle_setup);
1753
1754-void show_regs(struct pt_regs * regs)
1755+void __show_registers(struct pt_regs *regs, int all)
1756 {
1757 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
1758 unsigned long d0, d1, d2, d3, d6, d7;
1759+ unsigned long esp;
1760+ unsigned short ss, gs;
1761+
1762+ if (user_mode_vm(regs)) {
1763+ esp = regs->esp;
1764+ ss = regs->xss & 0xffff;
1765+ savesegment(gs, gs);
1766+ } else {
1767+ esp = (unsigned long) (&regs->esp);
1768+ savesegment(ss, ss);
1769+ savesegment(gs, gs);
1770+ }
1771
1772 printk("\n");
1773- printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
1774- printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
1775+ printk("Pid: %d, comm: %s %s (%s %.*s)\n",
1776+ task_pid_nr(current), current->comm,
1777+ print_tainted(), init_utsname()->release,
1778+ (int)strcspn(init_utsname()->version, " "),
1779+ init_utsname()->version);
1780+
1781+ printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
1782+ 0xffff & regs->xcs, regs->eip, regs->eflags,
1783+ smp_processor_id());
1784 print_symbol("EIP is at %s\n", regs->eip);
1785
1786- if (user_mode_vm(regs))
1787- printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
1788- printk(" EFLAGS: %08lx %s (%s %.*s)\n",
1789- regs->eflags, print_tainted(), init_utsname()->release,
1790- (int)strcspn(init_utsname()->version, " "),
1791- init_utsname()->version);
1792 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
1793- regs->eax,regs->ebx,regs->ecx,regs->edx);
1794- printk("ESI: %08lx EDI: %08lx EBP: %08lx",
1795- regs->esi, regs->edi, regs->ebp);
1796- printk(" DS: %04x ES: %04x FS: %04x\n",
1797- 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs);
1798+ regs->eax, regs->ebx, regs->ecx, regs->edx);
1799+ printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
1800+ regs->esi, regs->edi, regs->ebp, esp);
1801+ printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
1802+ regs->xds & 0xffff, regs->xes & 0xffff,
1803+ regs->xfs & 0xffff, gs, ss);
1804+
1805+ if (!all)
1806+ return;
1807
1808 cr0 = read_cr0();
1809 cr2 = read_cr2();
1810 cr3 = read_cr3();
1811 cr4 = read_cr4_safe();
1812- printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
1813+ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
1814+ cr0, cr2, cr3, cr4);
1815
1816 get_debugreg(d0, 0);
1817 get_debugreg(d1, 1);
1818@@ -273,10 +300,16 @@ void show_regs(struct pt_regs * regs)
1819 get_debugreg(d3, 3);
1820 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
1821 d0, d1, d2, d3);
1822+
1823 get_debugreg(d6, 6);
1824 get_debugreg(d7, 7);
1825- printk("DR6: %08lx DR7: %08lx\n", d6, d7);
1826+ printk("DR6: %08lx DR7: %08lx\n",
1827+ d6, d7);
1828+}
1829
1830+void show_regs(struct pt_regs *regs)
1831+{
1832+ __show_registers(regs, 1);
1833 show_trace(NULL, regs, &regs->esp);
1834 }
1835
82094b55
AF
1836--- sle11-2009-10-16.orig/arch/x86/kernel/process_64-xen.c 2009-02-16 16:17:21.000000000 +0100
1837+++ sle11-2009-10-16/arch/x86/kernel/process_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1838@@ -1,6 +1,4 @@
1839 /*
1840- * linux/arch/x86-64/kernel/process.c
1841- *
1842 * Copyright (C) 1995 Linus Torvalds
1843 *
1844 * Pentium III FXSR, SSE support
1845@@ -41,6 +39,7 @@
1846 #include <linux/notifier.h>
1847 #include <linux/kprobes.h>
1848 #include <linux/kdebug.h>
1849+#include <linux/tick.h>
1850
1851 #include <asm/uaccess.h>
1852 #include <asm/pgtable.h>
1853@@ -172,6 +171,9 @@ void cpu_idle (void)
1854
1855 if (__get_cpu_var(cpu_idle_state))
1856 __get_cpu_var(cpu_idle_state) = 0;
1857+
1858+ tick_nohz_stop_sched_tick();
1859+
1860 rmb();
1861 idle = xen_idle; /* no alternatives */
1862 if (cpu_is_offline(smp_processor_id()))
1863@@ -190,12 +192,17 @@ void cpu_idle (void)
1864 __exit_idle();
1865 }
1866
1867+ tick_nohz_restart_sched_tick();
1868 preempt_enable_no_resched();
1869 schedule();
1870 preempt_disable();
1871 }
1872 }
1873
1874+static void do_nothing(void *unused)
1875+{
1876+}
1877+
1878 void cpu_idle_wait(void)
1879 {
1880 unsigned int cpu, this_cpu = get_cpu();
1881@@ -221,6 +228,13 @@ void cpu_idle_wait(void)
1882 cpu_clear(cpu, map);
1883 }
1884 cpus_and(map, map, cpu_online_map);
1885+ /*
1886+ * We waited 1 sec, if a CPU still did not call idle
1887+ * it may be because it is in idle and not waking up
1888+ * because it has nothing to do.
1889+ * Give all the remaining CPUS a kick.
1890+ */
1891+ smp_call_function_mask(map, do_nothing, 0, 0);
1892 } while (!cpus_empty(map));
1893
1894 set_cpus_allowed(current, tmp);
1895@@ -528,7 +542,7 @@ static inline void __switch_to_xtra(stru
1896 *
1897 * Kprobes not supported here. Set the probe on schedule instead.
1898 */
1899-__kprobes struct task_struct *
1900+struct task_struct *
1901 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1902 {
1903 struct thread_struct *prev = &prev_p->thread,
82094b55
AF
1904--- sle11-2009-10-16.orig/arch/x86/kernel/quirks-xen.c 2008-12-15 11:27:22.000000000 +0100
1905+++ sle11-2009-10-16/arch/x86/kernel/quirks-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
1906@@ -41,7 +41,353 @@ static void __devinit quirk_intel_irqbal
1907 if (!(config & 0x2))
1908 pci_write_config_byte(dev, 0xf4, config);
1909 }
1910-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
1911-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
1912-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
1913+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
1914+ quirk_intel_irqbalance);
1915+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
1916+ quirk_intel_irqbalance);
1917+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
1918+ quirk_intel_irqbalance);
1919+#endif
1920+
1921+#if defined(CONFIG_HPET_TIMER)
1922+#include <asm/hpet.h>
1923+
1924+unsigned long force_hpet_address;
1925+
1926+static enum {
1927+ NONE_FORCE_HPET_RESUME,
1928+ OLD_ICH_FORCE_HPET_RESUME,
1929+ ICH_FORCE_HPET_RESUME,
1930+ VT8237_FORCE_HPET_RESUME,
1931+ NVIDIA_FORCE_HPET_RESUME,
1932+} force_hpet_resume_type;
1933+
1934+static void __iomem *rcba_base;
1935+
1936+static void ich_force_hpet_resume(void)
1937+{
1938+ u32 val;
1939+
1940+ if (!force_hpet_address)
1941+ return;
1942+
1943+ if (rcba_base == NULL)
1944+ BUG();
1945+
1946+ /* read the Function Disable register, dword mode only */
1947+ val = readl(rcba_base + 0x3404);
1948+ if (!(val & 0x80)) {
1949+ /* HPET disabled in HPTC. Trying to enable */
1950+ writel(val | 0x80, rcba_base + 0x3404);
1951+ }
1952+
1953+ val = readl(rcba_base + 0x3404);
1954+ if (!(val & 0x80))
1955+ BUG();
1956+ else
1957+ printk(KERN_DEBUG "Force enabled HPET at resume\n");
1958+
1959+ return;
1960+}
1961+
1962+static void ich_force_enable_hpet(struct pci_dev *dev)
1963+{
1964+ u32 val;
1965+ u32 uninitialized_var(rcba);
1966+ int err = 0;
1967+
1968+ if (hpet_address || force_hpet_address)
1969+ return;
1970+
1971+ pci_read_config_dword(dev, 0xF0, &rcba);
1972+ rcba &= 0xFFFFC000;
1973+ if (rcba == 0) {
1974+ printk(KERN_DEBUG "RCBA disabled. Cannot force enable HPET\n");
1975+ return;
1976+ }
1977+
1978+ /* use bits 31:14, 16 kB aligned */
1979+ rcba_base = ioremap_nocache(rcba, 0x4000);
1980+ if (rcba_base == NULL) {
1981+ printk(KERN_DEBUG "ioremap failed. Cannot force enable HPET\n");
1982+ return;
1983+ }
1984+
1985+ /* read the Function Disable register, dword mode only */
1986+ val = readl(rcba_base + 0x3404);
1987+
1988+ if (val & 0x80) {
1989+ /* HPET is enabled in HPTC. Just not reported by BIOS */
1990+ val = val & 0x3;
1991+ force_hpet_address = 0xFED00000 | (val << 12);
1992+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
1993+ force_hpet_address);
1994+ iounmap(rcba_base);
1995+ return;
1996+ }
1997+
1998+ /* HPET disabled in HPTC. Trying to enable */
1999+ writel(val | 0x80, rcba_base + 0x3404);
2000+
2001+ val = readl(rcba_base + 0x3404);
2002+ if (!(val & 0x80)) {
2003+ err = 1;
2004+ } else {
2005+ val = val & 0x3;
2006+ force_hpet_address = 0xFED00000 | (val << 12);
2007+ }
2008+
2009+ if (err) {
2010+ force_hpet_address = 0;
2011+ iounmap(rcba_base);
2012+ printk(KERN_DEBUG "Failed to force enable HPET\n");
2013+ } else {
2014+ force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
2015+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
2016+ force_hpet_address);
2017+ }
2018+}
2019+
2020+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
2021+ ich_force_enable_hpet);
2022+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
2023+ ich_force_enable_hpet);
2024+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
2025+ ich_force_enable_hpet);
2026+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
2027+ ich_force_enable_hpet);
2028+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
2029+ ich_force_enable_hpet);
2030+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
2031+ ich_force_enable_hpet);
2032+
2033+
2034+static struct pci_dev *cached_dev;
2035+
2036+static void old_ich_force_hpet_resume(void)
2037+{
2038+ u32 val;
2039+ u32 uninitialized_var(gen_cntl);
2040+
2041+ if (!force_hpet_address || !cached_dev)
2042+ return;
2043+
2044+ pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
2045+ gen_cntl &= (~(0x7 << 15));
2046+ gen_cntl |= (0x4 << 15);
2047+
2048+ pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
2049+ pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
2050+ val = gen_cntl >> 15;
2051+ val &= 0x7;
2052+ if (val == 0x4)
2053+ printk(KERN_DEBUG "Force enabled HPET at resume\n");
2054+ else
2055+ BUG();
2056+}
2057+
2058+static void old_ich_force_enable_hpet(struct pci_dev *dev)
2059+{
2060+ u32 val;
2061+ u32 uninitialized_var(gen_cntl);
2062+
2063+ if (hpet_address || force_hpet_address)
2064+ return;
2065+
2066+ pci_read_config_dword(dev, 0xD0, &gen_cntl);
2067+ /*
2068+ * Bit 17 is HPET enable bit.
2069+ * Bit 16:15 control the HPET base address.
2070+ */
2071+ val = gen_cntl >> 15;
2072+ val &= 0x7;
2073+ if (val & 0x4) {
2074+ val &= 0x3;
2075+ force_hpet_address = 0xFED00000 | (val << 12);
2076+ printk(KERN_DEBUG "HPET at base address 0x%lx\n",
2077+ force_hpet_address);
2078+ return;
2079+ }
2080+
2081+ /*
2082+ * HPET is disabled. Trying enabling at FED00000 and check
2083+ * whether it sticks
2084+ */
2085+ gen_cntl &= (~(0x7 << 15));
2086+ gen_cntl |= (0x4 << 15);
2087+ pci_write_config_dword(dev, 0xD0, gen_cntl);
2088+
2089+ pci_read_config_dword(dev, 0xD0, &gen_cntl);
2090+
2091+ val = gen_cntl >> 15;
2092+ val &= 0x7;
2093+ if (val & 0x4) {
2094+ /* HPET is enabled in HPTC. Just not reported by BIOS */
2095+ val &= 0x3;
2096+ force_hpet_address = 0xFED00000 | (val << 12);
2097+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
2098+ force_hpet_address);
2099+ cached_dev = dev;
2100+ force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
2101+ return;
2102+ }
2103+
2104+ printk(KERN_DEBUG "Failed to force enable HPET\n");
2105+}
2106+
2107+/*
2108+ * Undocumented chipset features. Make sure that the user enforced
2109+ * this.
2110+ */
2111+static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
2112+{
2113+ if (hpet_force_user)
2114+ old_ich_force_enable_hpet(dev);
2115+}
2116+
2117+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
2118+ old_ich_force_enable_hpet_user);
2119+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
2120+ old_ich_force_enable_hpet_user);
2121+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
2122+ old_ich_force_enable_hpet_user);
2123+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
2124+ old_ich_force_enable_hpet_user);
2125+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
2126+ old_ich_force_enable_hpet);
2127+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
2128+ old_ich_force_enable_hpet);
2129+
2130+
2131+static void vt8237_force_hpet_resume(void)
2132+{
2133+ u32 val;
2134+
2135+ if (!force_hpet_address || !cached_dev)
2136+ return;
2137+
2138+ val = 0xfed00000 | 0x80;
2139+ pci_write_config_dword(cached_dev, 0x68, val);
2140+
2141+ pci_read_config_dword(cached_dev, 0x68, &val);
2142+ if (val & 0x80)
2143+ printk(KERN_DEBUG "Force enabled HPET at resume\n");
2144+ else
2145+ BUG();
2146+}
2147+
2148+static void vt8237_force_enable_hpet(struct pci_dev *dev)
2149+{
2150+ u32 uninitialized_var(val);
2151+
2152+ if (!hpet_force_user || hpet_address || force_hpet_address)
2153+ return;
2154+
2155+ pci_read_config_dword(dev, 0x68, &val);
2156+ /*
2157+ * Bit 7 is HPET enable bit.
2158+ * Bit 31:10 is HPET base address (contrary to what datasheet claims)
2159+ */
2160+ if (val & 0x80) {
2161+ force_hpet_address = (val & ~0x3ff);
2162+ printk(KERN_DEBUG "HPET at base address 0x%lx\n",
2163+ force_hpet_address);
2164+ return;
2165+ }
2166+
2167+ /*
2168+ * HPET is disabled. Trying enabling at FED00000 and check
2169+ * whether it sticks
2170+ */
2171+ val = 0xfed00000 | 0x80;
2172+ pci_write_config_dword(dev, 0x68, val);
2173+
2174+ pci_read_config_dword(dev, 0x68, &val);
2175+ if (val & 0x80) {
2176+ force_hpet_address = (val & ~0x3ff);
2177+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
2178+ force_hpet_address);
2179+ cached_dev = dev;
2180+ force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
2181+ return;
2182+ }
2183+
2184+ printk(KERN_DEBUG "Failed to force enable HPET\n");
2185+}
2186+
2187+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
2188+ vt8237_force_enable_hpet);
2189+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
2190+ vt8237_force_enable_hpet);
2191+
2192+/*
2193+ * Undocumented chipset feature taken from LinuxBIOS.
2194+ */
2195+static void nvidia_force_hpet_resume(void)
2196+{
2197+ pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
2198+ printk(KERN_DEBUG "Force enabled HPET at resume\n");
2199+}
2200+
2201+static void nvidia_force_enable_hpet(struct pci_dev *dev)
2202+{
2203+ u32 uninitialized_var(val);
2204+
2205+ if (!hpet_force_user || hpet_address || force_hpet_address)
2206+ return;
2207+
2208+ pci_write_config_dword(dev, 0x44, 0xfed00001);
2209+ pci_read_config_dword(dev, 0x44, &val);
2210+ force_hpet_address = val & 0xfffffffe;
2211+ force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
2212+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
2213+ force_hpet_address);
2214+ cached_dev = dev;
2215+ return;
2216+}
2217+
2218+/* ISA Bridges */
2219+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
2220+ nvidia_force_enable_hpet);
2221+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
2222+ nvidia_force_enable_hpet);
2223+
2224+/* LPC bridges */
2225+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
2226+ nvidia_force_enable_hpet);
2227+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
2228+ nvidia_force_enable_hpet);
2229+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
2230+ nvidia_force_enable_hpet);
2231+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
2232+ nvidia_force_enable_hpet);
2233+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
2234+ nvidia_force_enable_hpet);
2235+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
2236+ nvidia_force_enable_hpet);
2237+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
2238+ nvidia_force_enable_hpet);
2239+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
2240+ nvidia_force_enable_hpet);
2241+
2242+void force_hpet_resume(void)
2243+{
2244+ switch (force_hpet_resume_type) {
2245+ case ICH_FORCE_HPET_RESUME:
2246+ return ich_force_hpet_resume();
2247+
2248+ case OLD_ICH_FORCE_HPET_RESUME:
2249+ return old_ich_force_hpet_resume();
2250+
2251+ case VT8237_FORCE_HPET_RESUME:
2252+ return vt8237_force_hpet_resume();
2253+
2254+ case NVIDIA_FORCE_HPET_RESUME:
2255+ return nvidia_force_hpet_resume();
2256+
2257+ default:
2258+ break;
2259+ }
2260+}
2261+
2262 #endif
82094b55
AF
2263--- sle11-2009-10-16.orig/arch/x86/kernel/setup64-xen.c 2009-02-16 16:17:21.000000000 +0100
2264+++ sle11-2009-10-16/arch/x86/kernel/setup64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
2265@@ -15,7 +15,6 @@
2266 #include <linux/bootmem.h>
2267 #include <linux/bitops.h>
2268 #include <linux/module.h>
2269-#include <asm/bootsetup.h>
2270 #include <asm/pda.h>
2271 #include <asm/pgtable.h>
2272 #include <asm/processor.h>
2273@@ -27,11 +26,12 @@
2274 #include <asm/percpu.h>
2275 #include <asm/proto.h>
2276 #include <asm/sections.h>
2277+#include <asm/setup.h>
2278 #ifdef CONFIG_XEN
2279 #include <asm/hypervisor.h>
2280 #endif
2281
2282-char x86_boot_params[BOOT_PARAM_SIZE] __initdata;
2283+struct boot_params __initdata boot_params;
2284
2285 cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
2286
2287@@ -159,8 +159,8 @@ static void switch_pt(void)
2288
2289 static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr)
2290 {
2291- asm volatile("lgdt %0" :: "m" (*gdt_descr));
2292- asm volatile("lidt %0" :: "m" (idt_descr));
2293+ load_gdt(gdt_descr);
2294+ load_idt(idt_descr);
2295 }
2296 #endif
2297
2298@@ -252,6 +252,14 @@ void __cpuinit check_efer(void)
2299
2300 unsigned long kernel_eflags;
2301
2302+#ifndef CONFIG_X86_NO_TSS
2303+/*
2304+ * Copies of the original ist values from the tss are only accessed during
2305+ * debugging, no special alignment required.
2306+ */
2307+DEFINE_PER_CPU(struct orig_ist, orig_ist);
2308+#endif
2309+
2310 /*
2311 * cpu_init() initializes state that is per-CPU. Some data is already
2312 * initialized (naturally) in the bootstrap process, such as the GDT
82094b55
AF
2313--- sle11-2009-10-16.orig/arch/x86/kernel/setup_32-xen.c 2009-02-16 16:17:21.000000000 +0100
2314+++ sle11-2009-10-16/arch/x86/kernel/setup_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
2315@@ -1,6 +1,4 @@
2316 /*
2317- * linux/arch/i386/kernel/setup.c
2318- *
2319 * Copyright (C) 1995 Linus Torvalds
2320 *
2321 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
2322@@ -70,6 +68,7 @@
2323 #include <xen/xencons.h>
2324 #include <setup_arch.h>
2325 #include <bios_ebda.h>
2326+#include <asm/cacheflush.h>
2327
2328 #ifdef CONFIG_XEN
2329 #include <xen/interface/kexec.h>
2330@@ -80,13 +79,14 @@ static struct notifier_block xen_panic_b
2331 xen_panic_event, NULL, 0 /* try to go last */
2332 };
2333
2334-int disable_pse __devinitdata = 0;
2335+int disable_pse __cpuinitdata = 0;
2336
2337 /*
2338 * Machine setup..
2339 */
2340 extern struct resource code_resource;
2341 extern struct resource data_resource;
2342+extern struct resource bss_resource;
2343
2344 /* cpu data as detected by the assembly code in head.S */
2345 struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
2346@@ -98,9 +98,6 @@ unsigned long mmu_cr4_features;
2347
2348 /* for MCA, but anyone else can use it if they want */
2349 unsigned int machine_id;
2350-#ifdef CONFIG_MCA
2351-EXPORT_SYMBOL(machine_id);
2352-#endif
2353 unsigned int machine_submodel_id;
2354 unsigned int BIOS_revision;
2355 unsigned int mca_pentium_flag;
2356@@ -121,7 +118,7 @@ EXPORT_SYMBOL(apm_info);
2357 struct edid_info edid_info;
2358 EXPORT_SYMBOL_GPL(edid_info);
2359 #ifndef CONFIG_XEN
2360-#define copy_edid() (edid_info = EDID_INFO)
2361+#define copy_edid() (edid_info = boot_params.edid_info)
2362 #endif
2363 struct ist_info ist_info;
2364 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
2365@@ -170,10 +167,11 @@ EXPORT_SYMBOL(edd);
2366 */
2367 static inline void copy_edd(void)
2368 {
2369- memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
2370- memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
2371- edd.mbr_signature_nr = EDD_MBR_SIG_NR;
2372- edd.edd_info_nr = EDD_NR;
2373+ memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
2374+ sizeof(edd.mbr_signature));
2375+ memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
2376+ edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
2377+ edd.edd_info_nr = boot_params.eddbuf_entries;
2378 }
2379 #endif
2380 #else
2381@@ -416,6 +414,53 @@ extern unsigned long __init setup_memory
2382 extern void zone_sizes_init(void);
2383 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
2384
2385+static inline unsigned long long get_total_mem(void)
2386+{
2387+ unsigned long long total;
2388+
2389+ total = max_low_pfn - min_low_pfn;
2390+#ifdef CONFIG_HIGHMEM
2391+ total += highend_pfn - highstart_pfn;
2392+#endif
2393+
2394+ return total << PAGE_SHIFT;
2395+}
2396+
2397+#ifdef CONFIG_KEXEC
2398+#ifndef CONFIG_XEN
2399+static void __init reserve_crashkernel(void)
2400+{
2401+ unsigned long long total_mem;
2402+ unsigned long long crash_size, crash_base;
2403+ int ret;
2404+
2405+ total_mem = get_total_mem();
2406+
2407+ ret = parse_crashkernel(boot_command_line, total_mem,
2408+ &crash_size, &crash_base);
2409+ if (ret == 0 && crash_size > 0) {
2410+ if (crash_base > 0) {
2411+ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
2412+ "for crashkernel (System RAM: %ldMB)\n",
2413+ (unsigned long)(crash_size >> 20),
2414+ (unsigned long)(crash_base >> 20),
2415+ (unsigned long)(total_mem >> 20));
2416+ crashk_res.start = crash_base;
2417+ crashk_res.end = crash_base + crash_size - 1;
2418+ reserve_bootmem(crash_base, crash_size);
2419+ } else
2420+ printk(KERN_INFO "crashkernel reservation failed - "
2421+ "you have to specify a base address\n");
2422+ }
2423+}
2424+#else
2425+#define reserve_crashkernel xen_machine_kexec_setup_resources
2426+#endif
2427+#else
2428+static inline void __init reserve_crashkernel(void)
2429+{}
2430+#endif
2431+
2432 void __init setup_bootmem_allocator(void)
2433 {
2434 unsigned long bootmap_size;
2435@@ -471,30 +516,25 @@ void __init setup_bootmem_allocator(void
2436
2437 #ifdef CONFIG_BLK_DEV_INITRD
2438 if (xen_start_info->mod_start) {
2439- if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
2440- /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
2441- initrd_start = INITRD_START + PAGE_OFFSET;
2442- initrd_end = initrd_start+INITRD_SIZE;
2443+ unsigned long ramdisk_image = __pa(xen_start_info->mod_start);
2444+ unsigned long ramdisk_size = xen_start_info->mod_len;
2445+ unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
2446+ unsigned long end_of_lowmem = max_low_pfn << PAGE_SHIFT;
2447+
2448+ if (ramdisk_end <= end_of_lowmem) {
2449+ /*reserve_bootmem(ramdisk_image, ramdisk_size);*/
2450+ initrd_start = ramdisk_image + PAGE_OFFSET;
2451+ initrd_end = initrd_start+ramdisk_size;
2452 initrd_below_start_ok = 1;
2453- }
2454- else {
2455+ } else {
2456 printk(KERN_ERR "initrd extends beyond end of memory "
2457- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
2458- INITRD_START + INITRD_SIZE,
2459- max_low_pfn << PAGE_SHIFT);
2460+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
2461+ ramdisk_end, end_of_lowmem);
2462 initrd_start = 0;
2463 }
2464 }
2465 #endif
2466-#ifdef CONFIG_KEXEC
2467-#ifdef CONFIG_XEN
2468- xen_machine_kexec_setup_resources();
2469-#else
2470- if (crashk_res.start != crashk_res.end)
2471- reserve_bootmem(crashk_res.start,
2472- crashk_res.end - crashk_res.start + 1);
2473-#endif
2474-#endif
2475+ reserve_crashkernel();
2476 }
2477
2478 /*
2479@@ -572,7 +612,8 @@ void __init setup_arch(char **cmdline_p)
2480 * the system table is valid. If not, then initialize normally.
2481 */
2482 #ifdef CONFIG_EFI
2483- if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
2484+ if ((boot_params.hdr.type_of_loader == 0x50) &&
2485+ boot_params.efi_info.efi_systab)
2486 efi_enabled = 1;
2487 #endif
2488
2489@@ -580,18 +621,18 @@ void __init setup_arch(char **cmdline_p)
2490 properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
2491 */
2492 ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
2493- screen_info = SCREEN_INFO;
2494+ screen_info = boot_params.screen_info;
2495 copy_edid();
2496- apm_info.bios = APM_BIOS_INFO;
2497- ist_info = IST_INFO;
2498- saved_videomode = VIDEO_MODE;
2499- if( SYS_DESC_TABLE.length != 0 ) {
2500- set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
2501- machine_id = SYS_DESC_TABLE.table[0];
2502- machine_submodel_id = SYS_DESC_TABLE.table[1];
2503- BIOS_revision = SYS_DESC_TABLE.table[2];
2504+ apm_info.bios = boot_params.apm_bios_info;
2505+ ist_info = boot_params.ist_info;
2506+ saved_videomode = boot_params.hdr.vid_mode;
2507+ if( boot_params.sys_desc_table.length != 0 ) {
2508+ set_mca_bus(boot_params.sys_desc_table.table[3] & 0x2);
2509+ machine_id = boot_params.sys_desc_table.table[0];
2510+ machine_submodel_id = boot_params.sys_desc_table.table[1];
2511+ BIOS_revision = boot_params.sys_desc_table.table[2];
2512 }
2513- bootloader_type = LOADER_TYPE;
2514+ bootloader_type = boot_params.hdr.type_of_loader;
2515
2516 if (is_initial_xendomain()) {
2517 const struct dom0_vga_console_info *info =
2518@@ -606,9 +647,9 @@ void __init setup_arch(char **cmdline_p)
2519 screen_info.orig_video_isVGA = 0;
2520
2521 #ifdef CONFIG_BLK_DEV_RAM
2522- rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
2523- rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
2524- rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
2525+ rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
2526+ rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
2527+ rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
2528 #endif
2529
2530 ARCH_SETUP
2531@@ -621,7 +662,7 @@ void __init setup_arch(char **cmdline_p)
2532
2533 copy_edd();
2534
2535- if (!MOUNT_ROOT_RDONLY)
2536+ if (!boot_params.hdr.root_flags)
2537 root_mountflags &= ~MS_RDONLY;
2538 init_mm.start_code = (unsigned long) _text;
2539 init_mm.end_code = (unsigned long) _etext;
2540@@ -633,6 +674,8 @@ void __init setup_arch(char **cmdline_p)
2541 code_resource.end = virt_to_phys(_etext)-1;
2542 data_resource.start = virt_to_phys(_etext);
2543 data_resource.end = virt_to_phys(_edata)-1;
2544+ bss_resource.start = virt_to_phys(&__bss_start);
2545+ bss_resource.end = virt_to_phys(&__bss_stop)-1;
2546
2547 if ((i = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
2548 i = COMMAND_LINE_SIZE;
2549@@ -661,7 +704,7 @@ void __init setup_arch(char **cmdline_p)
2550 /*
2551 * NOTE: before this point _nobody_ is allowed to allocate
2552 * any memory using the bootmem allocator. Although the
2553- * alloctor is now initialised only the first 8Mb of the kernel
2554+ * allocator is now initialised only the first 8Mb of the kernel
2555 * virtual address space has been mapped. All allocations before
2556 * paging_init() has completed must use the alloc_bootmem_low_pages()
2557 * variant (which allocates DMA'able memory) and care must be taken
2558@@ -784,10 +827,8 @@ void __init setup_arch(char **cmdline_p)
2559 acpi_boot_table_init();
2560 #endif
2561
2562-#ifdef CONFIG_PCI
2563-#ifdef CONFIG_X86_IO_APIC
2564- check_acpi_pci(); /* Checks more than just ACPI actually */
2565-#endif
2566+#if defined(CONFIG_PCI) && !defined(CONFIG_XEN)
2567+ early_quirks();
2568 #endif
2569
2570 #ifdef CONFIG_ACPI
82094b55
AF
2571--- sle11-2009-10-16.orig/arch/x86/kernel/setup_64-xen.c 2009-02-16 16:17:21.000000000 +0100
2572+++ sle11-2009-10-16/arch/x86/kernel/setup_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
2573@@ -1,10 +1,5 @@
2574 /*
2575- * linux/arch/x86-64/kernel/setup.c
2576- *
2577 * Copyright (C) 1995 Linus Torvalds
2578- *
2579- * Nov 2001 Dave Jones <davej@suse.de>
2580- * Forked from i386 setup code.
2581 */
2582
2583 /*
2584@@ -57,13 +52,13 @@
2585 #include <asm/dma.h>
2586 #include <asm/mpspec.h>
2587 #include <asm/mmu_context.h>
2588-#include <asm/bootsetup.h>
2589 #include <asm/proto.h>
2590 #include <asm/setup.h>
2591 #include <asm/mach_apic.h>
2592 #include <asm/numa.h>
2593 #include <asm/sections.h>
2594 #include <asm/dmi.h>
2595+#include <asm/cacheflush.h>
2596 #ifdef CONFIG_XEN
2597 #include <linux/percpu.h>
2598 #include <xen/interface/physdev.h>
2599@@ -180,6 +175,12 @@ struct resource code_resource = {
2600 .end = 0,
2601 .flags = IORESOURCE_RAM,
2602 };
2603+struct resource bss_resource = {
2604+ .name = "Kernel bss",
2605+ .start = 0,
2606+ .end = 0,
2607+ .flags = IORESOURCE_RAM,
2608+};
2609
2610 #ifdef CONFIG_PROC_VMCORE
2611 /* elfcorehdr= specifies the location of elf core header
2612@@ -231,10 +232,11 @@ EXPORT_SYMBOL(edd);
2613 */
2614 static inline void copy_edd(void)
2615 {
2616- memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
2617- memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
2618- edd.mbr_signature_nr = EDD_MBR_SIG_NR;
2619- edd.edd_info_nr = EDD_NR;
2620+ memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
2621+ sizeof(edd.mbr_signature));
2622+ memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
2623+ edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
2624+ edd.edd_info_nr = boot_params.eddbuf_entries;
2625 }
2626 #endif
2627 #else
2628@@ -243,6 +245,41 @@ static inline void copy_edd(void)
2629 }
2630 #endif
2631
2632+#ifdef CONFIG_KEXEC
2633+#ifndef CONFIG_XEN
2634+static void __init reserve_crashkernel(void)
2635+{
2636+ unsigned long long free_mem;
2637+ unsigned long long crash_size, crash_base;
2638+ int ret;
2639+
2640+ free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
2641+
2642+ ret = parse_crashkernel(boot_command_line, free_mem,
2643+ &crash_size, &crash_base);
2644+ if (ret == 0 && crash_size) {
2645+ if (crash_base > 0) {
2646+ printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
2647+ "for crashkernel (System RAM: %ldMB)\n",
2648+ (unsigned long)(crash_size >> 20),
2649+ (unsigned long)(crash_base >> 20),
2650+ (unsigned long)(free_mem >> 20));
2651+ crashk_res.start = crash_base;
2652+ crashk_res.end = crash_base + crash_size - 1;
2653+ reserve_bootmem(crash_base, crash_size);
2654+ } else
2655+ printk(KERN_INFO "crashkernel reservation failed - "
2656+ "you have to specify a base address\n");
2657+ }
2658+}
2659+#else
2660+#define reserve_crashkernel xen_machine_kexec_setup_resources
2661+#endif
2662+#else
2663+static inline void __init reserve_crashkernel(void)
2664+{}
2665+#endif
2666+
2667 #ifndef CONFIG_XEN
2668 #define EBDA_ADDR_POINTER 0x40E
2669
2670@@ -283,7 +320,7 @@ void __init setup_arch(char **cmdline_p)
2671 atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
2672
2673 ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
2674- screen_info = SCREEN_INFO;
2675+ screen_info = boot_params.screen_info;
2676
2677 if (is_initial_xendomain()) {
2678 const struct dom0_vga_console_info *info =
2679@@ -306,22 +343,22 @@ void __init setup_arch(char **cmdline_p)
2680 #else
2681 printk(KERN_INFO "Command line: %s\n", boot_command_line);
2682
2683- ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
2684- screen_info = SCREEN_INFO;
2685- edid_info = EDID_INFO;
2686+ ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
2687+ screen_info = boot_params.screen_info;
2688+ edid_info = boot_params.edid_info;
2689 #endif /* !CONFIG_XEN */
2690- saved_video_mode = SAVED_VIDEO_MODE;
2691- bootloader_type = LOADER_TYPE;
2692+ saved_video_mode = boot_params.hdr.vid_mode;
2693+ bootloader_type = boot_params.hdr.type_of_loader;
2694
2695 #ifdef CONFIG_BLK_DEV_RAM
2696- rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
2697- rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
2698- rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
2699+ rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
2700+ rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
2701+ rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
2702 #endif
2703 setup_memory_region();
2704 copy_edd();
2705
2706- if (!MOUNT_ROOT_RDONLY)
2707+ if (!boot_params.hdr.root_flags)
2708 root_mountflags &= ~MS_RDONLY;
2709 init_mm.start_code = (unsigned long) &_text;
2710 init_mm.end_code = (unsigned long) &_etext;
2711@@ -332,6 +369,8 @@ void __init setup_arch(char **cmdline_p)
2712 code_resource.end = virt_to_phys(&_etext)-1;
2713 data_resource.start = virt_to_phys(&_etext);
2714 data_resource.end = virt_to_phys(&_edata)-1;
2715+ bss_resource.start = virt_to_phys(&__bss_start);
2716+ bss_resource.end = virt_to_phys(&__bss_stop)-1;
2717
2718 early_identify_cpu(&boot_cpu_data);
2719
2720@@ -359,6 +398,11 @@ void __init setup_arch(char **cmdline_p)
2721 if (is_initial_xendomain())
2722 dmi_scan_machine();
2723
2724+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
2725+ /* setup to use the static apicid table during kernel startup */
2726+ x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
2727+#endif
2728+
2729 /* How many end-of-memory variables you have, grandma! */
2730 max_low_pfn = end_pfn;
2731 max_pfn = end_pfn;
2732@@ -423,52 +467,37 @@ void __init setup_arch(char **cmdline_p)
2733 */
2734 acpi_reserve_bootmem();
2735 #endif
2736-#ifdef CONFIG_XEN
2737 #ifdef CONFIG_BLK_DEV_INITRD
2738+#ifdef CONFIG_XEN
2739 if (xen_start_info->mod_start) {
2740- if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
2741- /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
2742- initrd_start = INITRD_START + PAGE_OFFSET;
2743- initrd_end = initrd_start+INITRD_SIZE;
2744+ unsigned long ramdisk_image = __pa(xen_start_info->mod_start);
2745+ unsigned long ramdisk_size = xen_start_info->mod_len;
2746+#else
2747+ if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
2748+ unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
2749+ unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
2750+#endif
2751+ unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
2752+ unsigned long end_of_mem = end_pfn << PAGE_SHIFT;
2753+
2754+ if (ramdisk_end <= end_of_mem) {
2755+#ifndef CONFIG_XEN
2756+ reserve_bootmem_generic(ramdisk_image, ramdisk_size);
2757+#endif
2758+ initrd_start = ramdisk_image + PAGE_OFFSET;
2759+ initrd_end = initrd_start+ramdisk_size;
2760+#ifdef CONFIG_XEN
2761 initrd_below_start_ok = 1;
2762- } else {
2763- printk(KERN_ERR "initrd extends beyond end of memory "
2764- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
2765- (unsigned long)(INITRD_START + INITRD_SIZE),
2766- (unsigned long)(end_pfn << PAGE_SHIFT));
2767- initrd_start = 0;
2768- }
2769- }
2770 #endif
2771-#else /* CONFIG_XEN */
2772-#ifdef CONFIG_BLK_DEV_INITRD
2773- if (LOADER_TYPE && INITRD_START) {
2774- if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
2775- reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
2776- initrd_start = INITRD_START + PAGE_OFFSET;
2777- initrd_end = initrd_start+INITRD_SIZE;
2778- }
2779- else {
2780+ } else {
2781 printk(KERN_ERR "initrd extends beyond end of memory "
2782- "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
2783- (unsigned long)(INITRD_START + INITRD_SIZE),
2784- (unsigned long)(end_pfn << PAGE_SHIFT));
2785+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
2786+ ramdisk_end, end_of_mem);
2787 initrd_start = 0;
2788 }
2789 }
2790 #endif
2791-#endif /* !CONFIG_XEN */
2792-#ifdef CONFIG_KEXEC
2793-#ifdef CONFIG_XEN
2794- xen_machine_kexec_setup_resources();
2795-#else
2796- if (crashk_res.start != crashk_res.end) {
2797- reserve_bootmem_generic(crashk_res.start,
2798- crashk_res.end - crashk_res.start + 1);
2799- }
2800-#endif
2801-#endif
2802-
2803+ reserve_crashkernel();
2804 paging_init();
2805 #ifdef CONFIG_X86_LOCAL_APIC
2806 /*
2807@@ -783,7 +812,7 @@ static void __init amd_detect_cmp(struct
2808 but in the same order as the HT nodeids.
2809 If that doesn't result in a usable node fall back to the
2810 path for the previous case. */
2811- int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
2812+ int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
2813 if (ht_nodeid >= 0 &&
2814 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
2815 node = apicid_to_node[ht_nodeid];
2816@@ -798,6 +827,39 @@ static void __init amd_detect_cmp(struct
2817 #endif
2818 }
2819
2820+#define ENABLE_C1E_MASK 0x18000000
2821+#define CPUID_PROCESSOR_SIGNATURE 1
2822+#define CPUID_XFAM 0x0ff00000
2823+#define CPUID_XFAM_K8 0x00000000
2824+#define CPUID_XFAM_10H 0x00100000
2825+#define CPUID_XFAM_11H 0x00200000
2826+#define CPUID_XMOD 0x000f0000
2827+#define CPUID_XMOD_REV_F 0x00040000
2828+
2829+#ifndef CONFIG_XEN
2830+/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
2831+static __cpuinit int amd_apic_timer_broken(void)
2832+{
2833+ u32 lo, hi;
2834+ u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
2835+ switch (eax & CPUID_XFAM) {
2836+ case CPUID_XFAM_K8:
2837+ if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
2838+ break;
2839+ case CPUID_XFAM_10H:
2840+ case CPUID_XFAM_11H:
2841+ rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
2842+ if (lo & ENABLE_C1E_MASK)
2843+ return 1;
2844+ break;
2845+ default:
2846+ /* err on the side of caution */
2847+ return 1;
2848+ }
2849+ return 0;
2850+}
2851+#endif
2852+
2853 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
2854 {
2855 unsigned level;
2856@@ -827,7 +889,7 @@ static void __cpuinit init_amd(struct cp
2857 level = cpuid_eax(1);
2858 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
2859 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
2860- if (c->x86 == 0x10)
2861+ if (c->x86 == 0x10 || c->x86 == 0x11)
2862 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
2863
2864 /* Enable workaround for FXSAVE leak */
2865@@ -869,6 +931,11 @@ static void __cpuinit init_amd(struct cp
2866 /* Family 10 doesn't support C states in MWAIT so don't use it */
2867 if (c->x86 == 0x10 && !force_mwait)
2868 clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
2869+
2870+#ifndef CONFIG_XEN
2871+ if (amd_apic_timer_broken())
2872+ disable_apic_timer = 1;
2873+#endif
2874 }
2875
2876 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
2877@@ -1179,6 +1246,7 @@ void __cpuinit print_cpu_info(struct cpu
2878 static int show_cpuinfo(struct seq_file *m, void *v)
2879 {
2880 struct cpuinfo_x86 *c = v;
2881+ int cpu = 0;
2882
2883 /*
2884 * These flag bits must match the definitions in <asm/cpufeature.h>.
2885@@ -1188,7 +1256,7 @@ static int show_cpuinfo(struct seq_file
2886 * applications want to get the raw CPUID data, they should access
2887 * /dev/cpu/<cpu_nr>/cpuid instead.
2888 */
2889- static char *x86_cap_flags[] = {
2890+ static const char *const x86_cap_flags[] = {
2891 /* Intel-defined */
2892 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
2893 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
2894@@ -1219,7 +1287,7 @@ static int show_cpuinfo(struct seq_file
2895 /* Intel-defined (#2) */
2896 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
2897 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
2898- NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
2899+ NULL, NULL, "dca", "sse4_1", "sse4_2", NULL, NULL, "popcnt",
2900 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2901
2902 /* VIA/Cyrix/Centaur-defined */
2903@@ -1229,10 +1297,10 @@ static int show_cpuinfo(struct seq_file
2904 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2905
2906 /* AMD-defined (#2) */
2907- "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
2908- "altmovcr8", "abm", "sse4a",
2909- "misalignsse", "3dnowprefetch",
2910- "osvw", "ibs", NULL, NULL, NULL, NULL,
2911+ "lahf_lm", "cmp_legacy", "svm", "extapic",
2912+ "cr8_legacy", "abm", "sse4a", "misalignsse",
2913+ "3dnowprefetch", "osvw", "ibs", "sse5",
2914+ "skinit", "wdt", NULL, NULL,
2915 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2916 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2917
2918@@ -1242,7 +1310,7 @@ static int show_cpuinfo(struct seq_file
2919 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2920 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2921 };
2922- static char *x86_power_flags[] = {
2923+ static const char *const x86_power_flags[] = {
2924 "ts", /* temperature sensor */
2925 "fid", /* frequency id control */
2926 "vid", /* voltage id control */
2927@@ -1257,8 +1325,7 @@ static int show_cpuinfo(struct seq_file
2928
2929
2930 #ifdef CONFIG_SMP
2931- if (!cpu_online(c-cpu_data))
2932- return 0;
2933+ cpu = c->cpu_index;
2934 #endif
2935
2936 seq_printf(m,"processor\t: %u\n"
2937@@ -1266,7 +1333,7 @@ static int show_cpuinfo(struct seq_file
2938 "cpu family\t: %d\n"
2939 "model\t\t: %d\n"
2940 "model name\t: %s\n",
2941- (unsigned)(c-cpu_data),
2942+ (unsigned)cpu,
2943 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
2944 c->x86,
2945 (int)c->x86_model,
2946@@ -1278,7 +1345,7 @@ static int show_cpuinfo(struct seq_file
2947 seq_printf(m, "stepping\t: unknown\n");
2948
2949 if (cpu_has(c,X86_FEATURE_TSC)) {
2950- unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
2951+ unsigned int freq = cpufreq_quick_get((unsigned)cpu);
2952 if (!freq)
2953 freq = cpu_khz;
2954 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
2955@@ -1291,9 +1358,9 @@ static int show_cpuinfo(struct seq_file
2956
2957 #ifdef CONFIG_SMP
2958 if (smp_num_siblings * c->x86_max_cores > 1) {
2959- int cpu = c - cpu_data;
2960 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
2961- seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
2962+ seq_printf(m, "siblings\t: %d\n",
2963+ cpus_weight(per_cpu(cpu_core_map, cpu)));
2964 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
2965 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
2966 }
2967@@ -1348,12 +1415,16 @@ static int show_cpuinfo(struct seq_file
2968
2969 static void *c_start(struct seq_file *m, loff_t *pos)
2970 {
2971- return *pos < NR_CPUS ? cpu_data + *pos : NULL;
2972+ if (*pos == 0) /* just in case, cpu 0 is not the first */
2973+ *pos = first_cpu(cpu_online_map);
2974+ if ((*pos) < NR_CPUS && cpu_online(*pos))
2975+ return &cpu_data(*pos);
2976+ return NULL;
2977 }
2978
2979 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
2980 {
2981- ++*pos;
2982+ *pos = next_cpu(*pos, cpu_online_map);
2983 return c_start(m, pos);
2984 }
2985
82094b55
AF
2986--- sle11-2009-10-16.orig/arch/x86/kernel/smp_32-xen.c 2009-02-16 16:17:21.000000000 +0100
2987+++ sle11-2009-10-16/arch/x86/kernel/smp_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
2988@@ -72,7 +72,7 @@
2989 *
2990 * B stepping CPUs may hang. There are hardware work arounds
2991 * for this. We warn about it in case your board doesn't have the work
2992- * arounds. Basically thats so I can tell anyone with a B stepping
2993+ * arounds. Basically that's so I can tell anyone with a B stepping
2994 * CPU and SMP problems "tough".
2995 *
2996 * Specific items [From Pentium Processor Specification Update]
2997@@ -241,7 +241,7 @@ void leave_mm(unsigned long cpu)
2998 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
2999 * Stop ipi delivery for the old mm. This is not synchronized with
3000 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
3001- * for the wrong mm, and in the worst case we perform a superflous
3002+ * for the wrong mm, and in the worst case we perform a superfluous
3003 * tlb flush.
3004 * 1a2) set cpu_tlbstate to TLBSTATE_OK
3005 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
3006@@ -309,6 +309,7 @@ irqreturn_t smp_invalidate_interrupt(int
3007 smp_mb__after_clear_bit();
3008 out:
3009 put_cpu_no_resched();
3010+ __get_cpu_var(irq_stat).irq_tlb_count++;
3011
3012 return IRQ_HANDLED;
3013 }
3014@@ -580,7 +581,7 @@ static void stop_this_cpu (void * dummy)
3015 */
3016 cpu_clear(smp_processor_id(), cpu_online_map);
3017 disable_all_local_evtchn();
3018- if (cpu_data[smp_processor_id()].hlt_works_ok)
3019+ if (cpu_data(smp_processor_id()).hlt_works_ok)
3020 for(;;) halt();
3021 for (;;);
3022 }
3023@@ -610,6 +611,7 @@ void xen_smp_send_stop(void)
3024 */
3025 irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
3026 {
3027+ __get_cpu_var(irq_stat).irq_resched_count++;
3028
3029 return IRQ_HANDLED;
3030 }
3031@@ -632,6 +634,7 @@ irqreturn_t smp_call_function_interrupt(
3032 */
3033 irq_enter();
3034 (*func)(info);
3035+ __get_cpu_var(irq_stat).irq_call_count++;
3036 irq_exit();
3037
3038 if (wait) {
82094b55
AF
3039--- sle11-2009-10-16.orig/arch/x86/kernel/smp_64-xen.c 2009-02-16 16:17:21.000000000 +0100
3040+++ sle11-2009-10-16/arch/x86/kernel/smp_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
3041@@ -167,6 +167,7 @@ asmlinkage void smp_invalidate_interrupt
3042 out:
3043 ack_APIC_irq();
3044 cpu_clear(cpu, f->flush_cpumask);
3045+ add_pda(irq_tlb_count, 1);
3046 }
3047
3048 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
3049@@ -326,17 +327,27 @@ void unlock_ipi_call_lock(void)
3050 }
3051
3052 /*
3053- * this function sends a 'generic call function' IPI to one other CPU
3054- * in the system.
3055- *
3056- * cpu is a standard Linux logical CPU number.
3057+ * this function sends a 'generic call function' IPI to all other CPU
3058+ * of the system defined in the mask.
3059 */
3060-static void
3061-__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
3062- int nonatomic, int wait)
3063+
3064+static int
3065+__smp_call_function_mask(cpumask_t mask,
3066+ void (*func)(void *), void *info,
3067+ int wait)
3068 {
3069 struct call_data_struct data;
3070- int cpus = 1;
3071+ cpumask_t allbutself;
3072+ int cpus;
3073+
3074+ allbutself = cpu_online_map;
3075+ cpu_clear(smp_processor_id(), allbutself);
3076+
3077+ cpus_and(mask, mask, allbutself);
3078+ cpus = cpus_weight(mask);
3079+
3080+ if (!cpus)
3081+ return 0;
3082
3083 data.func = func;
3084 data.info = info;
3085@@ -347,19 +358,55 @@ __smp_call_function_single(int cpu, void
3086
3087 call_data = &data;
3088 wmb();
3089- /* Send a message to all other CPUs and wait for them to respond */
3090- send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
3091+
3092+ /* Send a message to other CPUs */
3093+ if (cpus_equal(mask, allbutself))
3094+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
3095+ else
3096+ send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
3097
3098 /* Wait for response */
3099 while (atomic_read(&data.started) != cpus)
3100 cpu_relax();
3101
3102 if (!wait)
3103- return;
3104+ return 0;
3105
3106 while (atomic_read(&data.finished) != cpus)
3107 cpu_relax();
3108+
3109+ return 0;
3110+}
3111+/**
3112+ * smp_call_function_mask(): Run a function on a set of other CPUs.
3113+ * @mask: The set of cpus to run on. Must not include the current cpu.
3114+ * @func: The function to run. This must be fast and non-blocking.
3115+ * @info: An arbitrary pointer to pass to the function.
3116+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
3117+ *
3118+ * Returns 0 on success, else a negative status code.
3119+ *
3120+ * If @wait is true, then returns once @func has returned; otherwise
3121+ * it returns just before the target cpu calls @func.
3122+ *
3123+ * You must not call this function with disabled interrupts or from a
3124+ * hardware interrupt handler or from a bottom half handler.
3125+ */
3126+int smp_call_function_mask(cpumask_t mask,
3127+ void (*func)(void *), void *info,
3128+ int wait)
3129+{
3130+ int ret;
3131+
3132+ /* Can deadlock when called with interrupts disabled */
3133+ WARN_ON(irqs_disabled());
3134+
3135+ spin_lock(&call_lock);
3136+ ret = __smp_call_function_mask(mask, func, info, wait);
3137+ spin_unlock(&call_lock);
3138+ return ret;
3139 }
3140+EXPORT_SYMBOL(smp_call_function_mask);
3141
3142 /*
3143 * smp_call_function_single - Run a function on a specific CPU
3144@@ -378,6 +425,7 @@ int smp_call_function_single (int cpu, v
3145 int nonatomic, int wait)
3146 {
3147 /* prevent preemption and reschedule on another processor */
3148+ int ret;
3149 int me = get_cpu();
3150
3151 /* Can deadlock when called with interrupts disabled */
3152@@ -391,51 +439,14 @@ int smp_call_function_single (int cpu, v
3153 return 0;
3154 }
3155
3156- spin_lock(&call_lock);
3157- __smp_call_function_single(cpu, func, info, nonatomic, wait);
3158- spin_unlock(&call_lock);
3159+ ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
3160+
3161 put_cpu();
3162- return 0;
3163+ return ret;
3164 }
3165 EXPORT_SYMBOL(smp_call_function_single);
3166
3167 /*
3168- * this function sends a 'generic call function' IPI to all other CPUs
3169- * in the system.
3170- */
3171-static void __smp_call_function (void (*func) (void *info), void *info,
3172- int nonatomic, int wait)
3173-{
3174- struct call_data_struct data;
3175- int cpus = num_online_cpus()-1;
3176-
3177- if (!cpus)
3178- return;
3179-
3180- data.func = func;
3181- data.info = info;
3182- atomic_set(&data.started, 0);
3183- data.wait = wait;
3184- if (wait)
3185- atomic_set(&data.finished, 0);
3186-
3187- call_data = &data;
3188- wmb();
3189- /* Send a message to all other CPUs and wait for them to respond */
3190- send_IPI_allbutself(CALL_FUNCTION_VECTOR);
3191-
3192- /* Wait for response */
3193- while (atomic_read(&data.started) != cpus)
3194- cpu_relax();
3195-
3196- if (!wait)
3197- return;
3198-
3199- while (atomic_read(&data.finished) != cpus)
3200- cpu_relax();
3201-}
3202-
3203-/*
3204 * smp_call_function - run a function on all other CPUs.
3205 * @func: The function to run. This must be fast and non-blocking.
3206 * @info: An arbitrary pointer to pass to the function.
3207@@ -453,10 +464,7 @@ static void __smp_call_function (void (*
3208 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
3209 int wait)
3210 {
3211- spin_lock(&call_lock);
3212- __smp_call_function(func,info,nonatomic,wait);
3213- spin_unlock(&call_lock);
3214- return 0;
3215+ return smp_call_function_mask(cpu_online_map, func, info, wait);
3216 }
3217 EXPORT_SYMBOL(smp_call_function);
3218
3219@@ -485,7 +493,7 @@ void smp_send_stop(void)
3220 /* Don't deadlock on the call lock in panic */
3221 nolock = !spin_trylock(&call_lock);
3222 local_irq_save(flags);
3223- __smp_call_function(stop_this_cpu, NULL, 0, 0);
3224+ __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
3225 if (!nolock)
3226 spin_unlock(&call_lock);
3227 disable_all_local_evtchn();
3228@@ -505,7 +513,9 @@ asmlinkage irqreturn_t smp_reschedule_in
3229 {
3230 #ifndef CONFIG_XEN
3231 ack_APIC_irq();
3232-#else
3233+#endif
3234+ add_pda(irq_resched_count, 1);
3235+#ifdef CONFIG_XEN
3236 return IRQ_HANDLED;
3237 #endif
3238 }
3239@@ -535,6 +545,7 @@ asmlinkage irqreturn_t smp_call_function
3240 exit_idle();
3241 irq_enter();
3242 (*func)(info);
3243+ add_pda(irq_call_count, 1);
3244 irq_exit();
3245 if (wait) {
3246 mb();
82094b55
AF
3247--- sle11-2009-10-16.orig/arch/x86/kernel/time_32-xen.c 2009-10-28 14:57:53.000000000 +0100
3248+++ sle11-2009-10-16/arch/x86/kernel/time_32-xen.c 2009-10-28 14:57:59.000000000 +0100
2cb7cef9
BS
3249@@ -1,6 +1,4 @@
3250 /*
3251- * linux/arch/i386/kernel/time.c
3252- *
3253 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
3254 *
3255 * This file contains the PC-specific time handling details:
3256@@ -74,6 +72,7 @@
3257 #include <asm/arch_hooks.h>
3258
3259 #include <xen/evtchn.h>
3260+#include <xen/sysctl.h>
3261 #include <xen/interface/vcpu.h>
3262
3263 #include <asm/i8253.h>
3264@@ -544,6 +543,13 @@ irqreturn_t timer_interrupt(int irq, voi
3265 struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
3266 struct vcpu_runstate_info runstate;
3267
3268+ /* Keep nmi watchdog up to date */
3269+#ifdef __i386__
3270+ per_cpu(irq_stat, smp_processor_id()).irq0_irqs++;
3271+#else
3272+ add_pda(irq0_irqs, 1);
3273+#endif
3274+
3275 /*
3276 * Here we are in the timer irq handler. We just have irqs locally
3277 * disabled but we don't know if the timer_bh is running on the other
82094b55 3278@@ -995,7 +1001,7 @@ static int time_cpufreq_notifier(struct
2cb7cef9
BS
3279 struct cpufreq_freqs *freq = data;
3280 struct xen_platform_op op;
3281
3282- if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
3283+ if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
3284 return 0;
3285
3286 if (val == CPUFREQ_PRECHANGE)
82094b55 3287@@ -1033,30 +1039,33 @@ core_initcall(cpufreq_time_setup);
2cb7cef9
BS
3288 */
3289 static ctl_table xen_subtable[] = {
3290 {
3291- .ctl_name = 1,
3292+ .ctl_name = CTL_XEN_INDEPENDENT_WALLCLOCK,
3293 .procname = "independent_wallclock",
3294 .data = &independent_wallclock,
3295 .maxlen = sizeof(independent_wallclock),
3296 .mode = 0644,
3297+ .strategy = sysctl_data,
3298 .proc_handler = proc_dointvec
3299 },
3300 {
3301- .ctl_name = 2,
3302+ .ctl_name = CTL_XEN_PERMITTED_CLOCK_JITTER,
3303 .procname = "permitted_clock_jitter",
3304 .data = &permitted_clock_jitter,
3305 .maxlen = sizeof(permitted_clock_jitter),
3306 .mode = 0644,
3307+ .strategy = sysctl_data,
3308 .proc_handler = proc_doulongvec_minmax
3309 },
3310- { 0 }
3311+ { }
3312 };
3313 static ctl_table xen_table[] = {
3314 {
3315- .ctl_name = 123,
3316+ .ctl_name = CTL_XEN,
3317 .procname = "xen",
3318 .mode = 0555,
3319- .child = xen_subtable},
3320- { 0 }
3321+ .child = xen_subtable
3322+ },
3323+ { }
3324 };
3325 static int __init xen_sysctl_init(void)
3326 {
82094b55
AF
3327--- sle11-2009-10-16.orig/arch/x86/kernel/traps_32-xen.c 2009-02-16 16:17:21.000000000 +0100
3328+++ sle11-2009-10-16/arch/x86/kernel/traps_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
3329@@ -1,6 +1,4 @@
3330 /*
3331- * linux/arch/i386/traps.c
3332- *
3333 * Copyright (C) 1991, 1992 Linus Torvalds
3334 *
3335 * Pentium III FXSR, SSE support
3336@@ -65,6 +63,11 @@
3337
3338 int panic_on_unrecovered_nmi;
3339
3340+#ifndef CONFIG_XEN
3341+DECLARE_BITMAP(used_vectors, NR_VECTORS);
3342+EXPORT_SYMBOL_GPL(used_vectors);
3343+#endif
3344+
3345 asmlinkage int system_call(void);
3346
3347 /* Do we ignore FPU interrupts ? */
3348@@ -120,7 +123,7 @@ struct stack_frame {
3349
3350 static inline unsigned long print_context_stack(struct thread_info *tinfo,
3351 unsigned long *stack, unsigned long ebp,
3352- struct stacktrace_ops *ops, void *data)
3353+ const struct stacktrace_ops *ops, void *data)
3354 {
3355 #ifdef CONFIG_FRAME_POINTER
3356 struct stack_frame *frame = (struct stack_frame *)ebp;
3357@@ -157,7 +160,7 @@ static inline unsigned long print_contex
3358
3359 void dump_trace(struct task_struct *task, struct pt_regs *regs,
3360 unsigned long *stack,
3361- struct stacktrace_ops *ops, void *data)
3362+ const struct stacktrace_ops *ops, void *data)
3363 {
3364 unsigned long ebp = 0;
3365
3366@@ -229,7 +232,7 @@ static void print_trace_address(void *da
3367 touch_nmi_watchdog();
3368 }
3369
3370-static struct stacktrace_ops print_trace_ops = {
3371+static const struct stacktrace_ops print_trace_ops = {
3372 .warning = print_trace_warning,
3373 .warning_symbol = print_trace_warning_symbol,
3374 .stack = print_trace_stack,
3375@@ -288,6 +291,11 @@ void dump_stack(void)
3376 {
3377 unsigned long stack;
3378
3379+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
3380+ current->pid, current->comm, print_tainted(),
3381+ init_utsname()->release,
3382+ (int)strcspn(init_utsname()->version, " "),
3383+ init_utsname()->version);
3384 show_trace(current, NULL, &stack);
3385 }
3386
3387@@ -296,48 +304,24 @@ EXPORT_SYMBOL(dump_stack);
3388 void show_registers(struct pt_regs *regs)
3389 {
3390 int i;
3391- int in_kernel = 1;
3392- unsigned long esp;
3393- unsigned short ss, gs;
3394-
3395- esp = (unsigned long) (&regs->esp);
3396- savesegment(ss, ss);
3397- savesegment(gs, gs);
3398- if (user_mode_vm(regs)) {
3399- in_kernel = 0;
3400- esp = regs->esp;
3401- ss = regs->xss & 0xffff;
3402- }
3403+
3404 print_modules();
3405- printk(KERN_EMERG "CPU: %d\n"
3406- KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
3407- KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
3408- smp_processor_id(), 0xffff & regs->xcs, regs->eip,
3409- print_tainted(), regs->eflags, init_utsname()->release,
3410- (int)strcspn(init_utsname()->version, " "),
3411- init_utsname()->version);
3412- print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
3413- printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
3414- regs->eax, regs->ebx, regs->ecx, regs->edx);
3415- printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
3416- regs->esi, regs->edi, regs->ebp, esp);
3417- printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
3418- regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
3419+ __show_registers(regs, 0);
3420 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
3421- TASK_COMM_LEN, current->comm, current->pid,
3422+ TASK_COMM_LEN, current->comm, task_pid_nr(current),
3423 current_thread_info(), current, task_thread_info(current));
3424 /*
3425 * When in-kernel, we also print out the stack and code at the
3426 * time of the fault..
3427 */
3428- if (in_kernel) {
3429+ if (!user_mode_vm(regs)) {
3430 u8 *eip;
3431 unsigned int code_prologue = code_bytes * 43 / 64;
3432 unsigned int code_len = code_bytes;
3433 unsigned char c;
3434
3435 printk("\n" KERN_EMERG "Stack: ");
3436- show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
3437+ show_stack_log_lvl(NULL, regs, &regs->esp, KERN_EMERG);
3438
3439 printk(KERN_EMERG "Code: ");
3440
3441@@ -382,11 +366,11 @@ int is_valid_bugaddr(unsigned long eip)
3442 void die(const char * str, struct pt_regs * regs, long err)
3443 {
3444 static struct {
3445- spinlock_t lock;
3446+ raw_spinlock_t lock;
3447 u32 lock_owner;
3448 int lock_owner_depth;
3449 } die = {
3450- .lock = __SPIN_LOCK_UNLOCKED(die.lock),
3451+ .lock = __RAW_SPIN_LOCK_UNLOCKED,
3452 .lock_owner = -1,
3453 .lock_owner_depth = 0
3454 };
3455@@ -397,40 +381,33 @@ void die(const char * str, struct pt_reg
3456
3457 if (die.lock_owner != raw_smp_processor_id()) {
3458 console_verbose();
3459- spin_lock_irqsave(&die.lock, flags);
3460+ raw_local_irq_save(flags);
3461+ __raw_spin_lock(&die.lock);
3462 die.lock_owner = smp_processor_id();
3463 die.lock_owner_depth = 0;
3464 bust_spinlocks(1);
3465- }
3466- else
3467- local_save_flags(flags);
3468+ } else
3469+ raw_local_irq_save(flags);
3470
3471 if (++die.lock_owner_depth < 3) {
3472- int nl = 0;
3473 unsigned long esp;
3474 unsigned short ss;
3475
3476 report_bug(regs->eip, regs);
3477
3478- printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
3479+ printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff,
3480+ ++die_counter);
3481 #ifdef CONFIG_PREEMPT
3482- printk(KERN_EMERG "PREEMPT ");
3483- nl = 1;
3484+ printk("PREEMPT ");
3485 #endif
3486 #ifdef CONFIG_SMP
3487- if (!nl)
3488- printk(KERN_EMERG);
3489 printk("SMP ");
3490- nl = 1;
3491 #endif
3492 #ifdef CONFIG_DEBUG_PAGEALLOC
3493- if (!nl)
3494- printk(KERN_EMERG);
3495 printk("DEBUG_PAGEALLOC");
3496- nl = 1;
3497 #endif
3498- if (nl)
3499- printk("\n");
3500+ printk("\n");
3501+
3502 if (notify_die(DIE_OOPS, str, regs, err,
3503 current->thread.trap_no, SIGSEGV) !=
3504 NOTIFY_STOP) {
3505@@ -454,7 +431,8 @@ void die(const char * str, struct pt_reg
3506 bust_spinlocks(0);
3507 die.lock_owner = -1;
3508 add_taint(TAINT_DIE);
3509- spin_unlock_irqrestore(&die.lock, flags);
3510+ __raw_spin_unlock(&die.lock);
3511+ raw_local_irq_restore(flags);
3512
3513 if (!regs)
3514 return;
3515@@ -571,6 +549,7 @@ fastcall void do_##name(struct pt_regs *
3516 info.si_errno = 0; \
3517 info.si_code = sicode; \
3518 info.si_addr = (void __user *)siaddr; \
3519+ trace_hardirqs_fixup(); \
3520 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
3521 == NOTIFY_STOP) \
3522 return; \
3523@@ -606,7 +585,7 @@ fastcall void __kprobes do_general_prote
3524 printk_ratelimit())
3525 printk(KERN_INFO
3526 "%s[%d] general protection eip:%lx esp:%lx error:%lx\n",
3527- current->comm, current->pid,
3528+ current->comm, task_pid_nr(current),
3529 regs->eip, regs->esp, error_code);
3530
3531 force_sig(SIGSEGV, current);
3532@@ -785,6 +764,8 @@ void restart_nmi(void)
3533 #ifdef CONFIG_KPROBES
3534 fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
3535 {
3536+ trace_hardirqs_fixup();
3537+
3538 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
3539 == NOTIFY_STOP)
3540 return;
3541@@ -822,6 +803,8 @@ fastcall void __kprobes do_debug(struct
3542 unsigned int condition;
3543 struct task_struct *tsk = current;
3544
3545+ trace_hardirqs_fixup();
3546+
3547 get_debugreg(condition, 6);
3548
3549 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
3550@@ -1084,20 +1067,6 @@ asmlinkage void math_emulate(long arg)
3551
3552 #endif /* CONFIG_MATH_EMULATION */
3553
3554-#ifdef CONFIG_X86_F00F_BUG
3555-void __init trap_init_f00f_bug(void)
3556-{
3557- __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
3558-
3559- /*
3560- * Update the IDT descriptor and reload the IDT so that
3561- * it uses the read-only mapped virtual address.
3562- */
3563- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
3564- load_idt(&idt_descr);
3565-}
3566-#endif
3567-
3568
3569 /*
3570 * NB. All these are "trap gates" (i.e. events_mask isn't set) except
82094b55
AF
3571--- sle11-2009-10-16.orig/arch/x86/kernel/traps_64-xen.c 2009-02-16 16:17:21.000000000 +0100
3572+++ sle11-2009-10-16/arch/x86/kernel/traps_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
3573@@ -1,6 +1,4 @@
3574 /*
3575- * linux/arch/x86-64/traps.c
3576- *
3577 * Copyright (C) 1991, 1992 Linus Torvalds
3578 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
3579 *
3580@@ -33,6 +31,7 @@
3581 #include <linux/uaccess.h>
3582 #include <linux/bug.h>
3583 #include <linux/kdebug.h>
3584+#include <linux/utsname.h>
3585
3586 #if defined(CONFIG_EDAC)
3587 #include <linux/edac.h>
3588@@ -205,7 +204,7 @@ static unsigned long *in_exception_stack
3589 #define MSG(txt) ops->warning(data, txt)
3590
3591 /*
3592- * x86-64 can have upto three kernel stacks:
3593+ * x86-64 can have up to three kernel stacks:
3594 * process stack
3595 * interrupt stack
3596 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
3597@@ -219,7 +218,7 @@ static inline int valid_stack_ptr(struct
3598
3599 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
3600 unsigned long *stack,
3601- struct stacktrace_ops *ops, void *data)
3602+ const struct stacktrace_ops *ops, void *data)
3603 {
3604 const unsigned cpu = get_cpu();
3605 unsigned long *irqstack_end = (unsigned long*)cpu_pda(cpu)->irqstackptr;
3606@@ -340,7 +339,7 @@ static void print_trace_address(void *da
3607 printk_address(addr);
3608 }
3609
3610-static struct stacktrace_ops print_trace_ops = {
3611+static const struct stacktrace_ops print_trace_ops = {
3612 .warning = print_trace_warning,
3613 .warning_symbol = print_trace_warning_symbol,
3614 .stack = print_trace_stack,
3615@@ -404,6 +403,12 @@ void show_stack(struct task_struct *tsk,
3616 void dump_stack(void)
3617 {
3618 unsigned long dummy;
3619+
3620+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
3621+ current->pid, current->comm, print_tainted(),
3622+ init_utsname()->release,
3623+ (int)strcspn(init_utsname()->version, " "),
3624+ init_utsname()->version);
3625 show_trace(NULL, NULL, &dummy);
3626 }
3627
3628@@ -466,7 +471,7 @@ void out_of_line_bug(void)
3629 EXPORT_SYMBOL(out_of_line_bug);
3630 #endif
3631
3632-static DEFINE_SPINLOCK(die_lock);
3633+static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
3634 static int die_owner = -1;
3635 static unsigned int die_nest_count;
3636
3637@@ -478,13 +483,13 @@ unsigned __kprobes long oops_begin(void)
3638 oops_enter();
3639
3640 /* racy, but better than risking deadlock. */
3641- local_irq_save(flags);
3642+ raw_local_irq_save(flags);
3643 cpu = smp_processor_id();
3644- if (!spin_trylock(&die_lock)) {
3645+ if (!__raw_spin_trylock(&die_lock)) {
3646 if (cpu == die_owner)
3647 /* nested oops. should stop eventually */;
3648 else
3649- spin_lock(&die_lock);
3650+ __raw_spin_lock(&die_lock);
3651 }
3652 die_nest_count++;
3653 die_owner = cpu;
3654@@ -498,12 +503,10 @@ void __kprobes oops_end(unsigned long fl
3655 die_owner = -1;
3656 bust_spinlocks(0);
3657 die_nest_count--;
3658- if (die_nest_count)
3659- /* We still own the lock */
3660- local_irq_restore(flags);
3661- else
3662+ if (!die_nest_count)
3663 /* Nest count reaches zero, release the lock. */
3664- spin_unlock_irqrestore(&die_lock, flags);
3665+ __raw_spin_unlock(&die_lock);
3666+ raw_local_irq_restore(flags);
3667 if (panic_on_oops)
3668 panic("Fatal exception");
3669 oops_exit();
3670@@ -636,6 +639,7 @@ asmlinkage void do_##name(struct pt_regs
3671 info.si_errno = 0; \
3672 info.si_code = sicode; \
3673 info.si_addr = (void __user *)siaddr; \
3674+ trace_hardirqs_fixup(); \
3675 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
3676 == NOTIFY_STOP) \
3677 return; \
3678@@ -741,11 +745,8 @@ mem_parity_error(unsigned char reason, s
3679
3680 printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
3681
3682-#if 0 /* XEN */
3683 /* Clear and disable the memory parity error line. */
3684- reason = (reason & 0xf) | 4;
3685- outb(reason, 0x61);
3686-#endif /* XEN */
3687+ clear_mem_error(reason);
3688 }
3689
3690 static __kprobes void
3691@@ -754,14 +755,8 @@ io_check_error(unsigned char reason, str
3692 printk("NMI: IOCK error (debug interrupt?)\n");
3693 show_registers(regs);
3694
3695-#if 0 /* XEN */
3696 /* Re-enable the IOCK line, wait for a few seconds */
3697- reason = (reason & 0xf) | 8;
3698- outb(reason, 0x61);
3699- mdelay(2000);
3700- reason &= ~8;
3701- outb(reason, 0x61);
3702-#endif /* XEN */
3703+ clear_io_check_error(reason);
3704 }
3705
3706 static __kprobes void
3707@@ -821,6 +816,8 @@ asmlinkage __kprobes void default_do_nmi
3708 /* runs on IST stack. */
3709 asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
3710 {
3711+ trace_hardirqs_fixup();
3712+
3713 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
3714 return;
3715 }
3716@@ -858,6 +855,8 @@ asmlinkage void __kprobes do_debug(struc
3717 struct task_struct *tsk = current;
3718 siginfo_t info;
3719
3720+ trace_hardirqs_fixup();
3721+
3722 get_debugreg(condition, 6);
3723
3724 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
82094b55
AF
3725--- sle11-2009-10-16.orig/arch/x86/kernel/vsyscall_64-xen.c 2009-02-16 16:17:21.000000000 +0100
3726+++ sle11-2009-10-16/arch/x86/kernel/vsyscall_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
3727@@ -1,6 +1,4 @@
3728 /*
3729- * linux/arch/x86_64/kernel/vsyscall.c
3730- *
3731 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
3732 * Copyright 2003 Andi Kleen, SuSE Labs.
3733 *
3734@@ -50,12 +48,12 @@
3735 ({unsigned long v; \
3736 extern char __vsyscall_0; \
3737 asm("" : "=r" (v) : "0" (x)); \
3738- ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); })
3739+ ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); })
3740
3741 /*
3742 * vsyscall_gtod_data contains data that is :
3743 * - readonly from vsyscalls
3744- * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
3745+ * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
3746 * Try to keep this structure as small as possible to avoid cache line ping pongs
3747 */
3748 int __vgetcpu_mode __section_vgetcpu_mode;
3749@@ -66,6 +64,16 @@ struct vsyscall_gtod_data __vsyscall_gto
3750 .sysctl_enabled = 1,
3751 };
3752
3753+void update_vsyscall_tz(void)
3754+{
3755+ unsigned long flags;
3756+
3757+ write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
3758+ /* sys_tz has changed */
3759+ vsyscall_gtod_data.sys_tz = sys_tz;
3760+ write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
3761+}
3762+
3763 void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
3764 {
3765 unsigned long flags;
3766@@ -79,8 +87,6 @@ void update_vsyscall(struct timespec *wa
3767 vsyscall_gtod_data.clock.shift = clock->shift;
3768 vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
3769 vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
3770- vsyscall_gtod_data.sys_tz = sys_tz;
3771- vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
3772 vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
3773 write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
3774 }
3775@@ -166,7 +172,7 @@ time_t __vsyscall(1) vtime(time_t *t)
3776 if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
3777 return time_syscall(t);
3778
3779- vgettimeofday(&tv, 0);
3780+ vgettimeofday(&tv, NULL);
3781 result = tv.tv_sec;
3782 if (t)
3783 *t = result;
3784@@ -260,18 +266,10 @@ out:
3785 return ret;
3786 }
3787
3788-static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
3789- void __user *oldval, size_t __user *oldlenp,
3790- void __user *newval, size_t newlen)
3791-{
3792- return -ENOSYS;
3793-}
3794-
3795 static ctl_table kernel_table2[] = {
3796- { .ctl_name = 99, .procname = "vsyscall64",
3797+ { .procname = "vsyscall64",
3798 .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
3799 .mode = 0644,
3800- .strategy = vsyscall_sysctl_nostrat,
3801 .proc_handler = vsyscall_sysctl_change },
3802 {}
3803 };
3804@@ -291,9 +289,9 @@ static void __cpuinit vsyscall_set_cpu(i
3805 unsigned long d;
3806 unsigned long node = 0;
3807 #ifdef CONFIG_NUMA
3808- node = cpu_to_node[cpu];
3809+ node = cpu_to_node(cpu);
3810 #endif
3811- if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
3812+ if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
3813 write_rdtscp_aux((node << 12) | cpu);
3814
3815 /* Store cpu number in limit so that it can be loaded quickly
82094b55
AF
3816--- sle11-2009-10-16.orig/arch/x86/mm/fault_32-xen.c 2009-02-16 16:17:21.000000000 +0100
3817+++ sle11-2009-10-16/arch/x86/mm/fault_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
3818@@ -25,6 +25,7 @@
3819 #include <linux/kprobes.h>
3820 #include <linux/uaccess.h>
3821 #include <linux/kdebug.h>
3822+#include <linux/kprobes.h>
3823
3824 #include <asm/system.h>
3825 #include <asm/desc.h>
3826@@ -32,33 +33,27 @@
3827
3828 extern void die(const char *,struct pt_regs *,long);
3829
3830-static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
3831-
3832-int register_page_fault_notifier(struct notifier_block *nb)
3833+#ifdef CONFIG_KPROBES
3834+static inline int notify_page_fault(struct pt_regs *regs)
3835 {
3836- vmalloc_sync_all();
3837- return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
3838-}
3839-EXPORT_SYMBOL_GPL(register_page_fault_notifier);
3840+ int ret = 0;
3841
3842-int unregister_page_fault_notifier(struct notifier_block *nb)
3843-{
3844- return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
3845-}
3846-EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
3847+ /* kprobe_running() needs smp_processor_id() */
3848+ if (!user_mode_vm(regs)) {
3849+ preempt_disable();
3850+ if (kprobe_running() && kprobe_fault_handler(regs, 14))
3851+ ret = 1;
3852+ preempt_enable();
3853+ }
3854
3855-static inline int notify_page_fault(struct pt_regs *regs, long err)
3856+ return ret;
3857+}
3858+#else
3859+static inline int notify_page_fault(struct pt_regs *regs)
3860 {
3861- struct die_args args = {
3862- .regs = regs,
3863- .str = "page fault",
3864- .err = err,
3865- .trapnr = 14,
3866- .signr = SIGSEGV
3867- };
3868- return atomic_notifier_call_chain(&notify_page_fault_chain,
3869- DIE_PAGE_FAULT, &args);
3870+ return 0;
3871 }
3872+#endif
3873
3874 /*
3875 * Return EIP plus the CS segment base. The segment limit is also
3876@@ -110,7 +105,7 @@ static inline unsigned long get_segment_
3877 LDT and other horrors are only used in user space. */
3878 if (seg & (1<<2)) {
3879 /* Must lock the LDT while reading it. */
3880- down(&current->mm->context.sem);
3881+ mutex_lock(&current->mm->context.lock);
3882 desc = current->mm->context.ldt;
3883 desc = (void *)desc + (seg & ~7);
3884 } else {
3885@@ -123,7 +118,7 @@ static inline unsigned long get_segment_
3886 base = get_desc_base((unsigned long *)desc);
3887
3888 if (seg & (1<<2)) {
3889- up(&current->mm->context.sem);
3890+ mutex_unlock(&current->mm->context.lock);
3891 } else
3892 put_cpu();
3893
3894@@ -244,7 +239,7 @@ static void dump_fault_path(unsigned lon
3895 if (mfn_to_pfn(mfn) >= highstart_pfn)
3896 return;
3897 #endif
3898- if (p[0] & _PAGE_PRESENT) {
3899+ if ((p[0] & _PAGE_PRESENT) && !(p[0] & _PAGE_PSE)) {
3900 page = mfn_to_pfn(mfn) << PAGE_SHIFT;
3901 p = (unsigned long *) __va(page);
3902 address &= 0x001fffff;
3903@@ -270,7 +265,8 @@ static void dump_fault_path(unsigned lon
3904 * it's allocated already.
3905 */
3906 if ((machine_to_phys(page) >> PAGE_SHIFT) < max_low_pfn
3907- && (page & _PAGE_PRESENT)) {
3908+ && (page & _PAGE_PRESENT)
3909+ && !(page & _PAGE_PSE)) {
3910 page = machine_to_phys(page & PAGE_MASK);
3911 page = ((unsigned long *) __va(page))[(address >> PAGE_SHIFT)
3912 & (PTRS_PER_PTE - 1)];
3913@@ -416,6 +412,11 @@ fastcall void __kprobes do_page_fault(st
3914 int write, si_code;
3915 int fault;
3916
3917+ /*
3918+ * We can fault from pretty much anywhere, with unknown IRQ state.
3919+ */
3920+ trace_hardirqs_fixup();
3921+
3922 /* get the address */
3923 address = read_cr2();
3924
3925@@ -453,7 +454,7 @@ fastcall void __kprobes do_page_fault(st
3926 /* Can take a spurious fault if mapping changes R/O -> R/W. */
3927 if (spurious_fault(regs, address, error_code))
3928 return;
3929- if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
3930+ if (notify_page_fault(regs))
3931 return;
3932 /*
3933 * Don't take the mm semaphore here. If we fixup a prefetch
3934@@ -462,7 +463,7 @@ fastcall void __kprobes do_page_fault(st
3935 goto bad_area_nosemaphore;
3936 }
3937
3938- if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
3939+ if (notify_page_fault(regs))
3940 return;
3941
3942 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
3943@@ -481,7 +482,7 @@ fastcall void __kprobes do_page_fault(st
3944
3945 /* When running in the kernel we expect faults to occur only to
3946 * addresses in user space. All other faults represent errors in the
3947- * kernel and should generate an OOPS. Unfortunatly, in the case of an
3948+ * kernel and should generate an OOPS. Unfortunately, in the case of an
3949 * erroneous fault occurring in a code path which already holds mmap_sem
3950 * we will deadlock attempting to validate the fault against the
3951 * address space. Luckily the kernel only validly references user
3952@@ -489,7 +490,7 @@ fastcall void __kprobes do_page_fault(st
3953 * exceptions table.
3954 *
3955 * As the vast majority of faults will be valid we will only perform
3956- * the source reference check when there is a possibilty of a deadlock.
3957+ * the source reference check when there is a possibility of a deadlock.
3958 * Attempt to lock the address space, if we cannot we then validate the
3959 * source. If this is invalid we can skip the address space check,
3960 * thus avoiding the deadlock.
3961@@ -598,8 +599,8 @@ bad_area_nosemaphore:
3962 printk_ratelimit()) {
3963 printk("%s%s[%d]: segfault at %08lx eip %08lx "
3964 "esp %08lx error %lx\n",
3965- tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
3966- tsk->comm, tsk->pid, address, regs->eip,
3967+ task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
3968+ tsk->comm, task_pid_nr(tsk), address, regs->eip,
3969 regs->esp, error_code);
3970 }
3971 tsk->thread.cr2 = address;
3972@@ -664,8 +665,7 @@ no_context:
3973 printk(KERN_ALERT "BUG: unable to handle kernel paging"
3974 " request");
3975 printk(" at virtual address %08lx\n",address);
3976- printk(KERN_ALERT " printing eip:\n");
3977- printk("%08lx\n", regs->eip);
3978+ printk(KERN_ALERT "printing eip: %08lx\n", regs->eip);
3979 dump_fault_path(address);
3980 }
3981 tsk->thread.cr2 = address;
3982@@ -681,14 +681,14 @@ no_context:
3983 */
3984 out_of_memory:
3985 up_read(&mm->mmap_sem);
3986- if (is_init(tsk)) {
3987+ if (is_global_init(tsk)) {
3988 yield();
3989 down_read(&mm->mmap_sem);
3990 goto survive;
3991 }
3992 printk("VM: killing process %s\n", tsk->comm);
3993 if (error_code & 4)
3994- do_exit(SIGKILL);
3995+ do_group_exit(SIGKILL);
3996 goto no_context;
3997
3998 do_sigbus:
82094b55
AF
3999--- sle11-2009-10-16.orig/arch/x86/mm/fault_64-xen.c 2009-02-16 16:17:21.000000000 +0100
4000+++ sle11-2009-10-16/arch/x86/mm/fault_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4001@@ -25,6 +25,7 @@
4002 #include <linux/kprobes.h>
4003 #include <linux/uaccess.h>
4004 #include <linux/kdebug.h>
4005+#include <linux/kprobes.h>
4006
4007 #include <asm/system.h>
4008 #include <asm/pgalloc.h>
4009@@ -40,34 +41,27 @@
4010 #define PF_RSVD (1<<3)
4011 #define PF_INSTR (1<<4)
4012
4013-static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
4014-
4015-/* Hook to register for page fault notifications */
4016-int register_page_fault_notifier(struct notifier_block *nb)
4017+#ifdef CONFIG_KPROBES
4018+static inline int notify_page_fault(struct pt_regs *regs)
4019 {
4020- vmalloc_sync_all();
4021- return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
4022-}
4023-EXPORT_SYMBOL_GPL(register_page_fault_notifier);
4024+ int ret = 0;
4025
4026-int unregister_page_fault_notifier(struct notifier_block *nb)
4027-{
4028- return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
4029-}
4030-EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
4031+ /* kprobe_running() needs smp_processor_id() */
4032+ if (!user_mode(regs)) {
4033+ preempt_disable();
4034+ if (kprobe_running() && kprobe_fault_handler(regs, 14))
4035+ ret = 1;
4036+ preempt_enable();
4037+ }
4038
4039-static inline int notify_page_fault(struct pt_regs *regs, long err)
4040+ return ret;
4041+}
4042+#else
4043+static inline int notify_page_fault(struct pt_regs *regs)
4044 {
4045- struct die_args args = {
4046- .regs = regs,
4047- .str = "page fault",
4048- .err = err,
4049- .trapnr = 14,
4050- .signr = SIGSEGV
4051- };
4052- return atomic_notifier_call_chain(&notify_page_fault_chain,
4053- DIE_PAGE_FAULT, &args);
4054+ return 0;
4055 }
4056+#endif
4057
4058 /* Sometimes the CPU reports invalid exceptions on prefetch.
4059 Check that here and ignore.
4060@@ -175,7 +169,7 @@ void dump_pagetable(unsigned long addres
4061 pmd = pmd_offset(pud, address);
4062 if (bad_address(pmd)) goto bad;
4063 printk("PMD %lx ", pmd_val(*pmd));
4064- if (!pmd_present(*pmd)) goto ret;
4065+ if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
4066
4067 pte = pte_offset_kernel(pmd, address);
4068 if (bad_address(pte)) goto bad;
4069@@ -294,7 +288,6 @@ static int vmalloc_fault(unsigned long a
4070 return 0;
4071 }
4072
4073-static int page_fault_trace;
4074 int show_unhandled_signals = 1;
4075
4076
4077@@ -371,6 +364,11 @@ asmlinkage void __kprobes do_page_fault(
4078 if (!user_mode(regs))
4079 error_code &= ~PF_USER; /* means kernel */
4080
4081+ /*
4082+ * We can fault from pretty much anywhere, with unknown IRQ state.
4083+ */
4084+ trace_hardirqs_fixup();
4085+
4086 tsk = current;
4087 mm = tsk->mm;
4088 prefetchw(&mm->mmap_sem);
4089@@ -408,7 +406,7 @@ asmlinkage void __kprobes do_page_fault(
4090 /* Can take a spurious fault if mapping changes R/O -> R/W. */
4091 if (spurious_fault(regs, address, error_code))
4092 return;
4093- if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
4094+ if (notify_page_fault(regs))
4095 return;
4096 /*
4097 * Don't take the mm semaphore here. If we fixup a prefetch
4098@@ -417,16 +415,12 @@ asmlinkage void __kprobes do_page_fault(
4099 goto bad_area_nosemaphore;
4100 }
4101
4102- if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
4103+ if (notify_page_fault(regs))
4104 return;
4105
4106 if (likely(regs->eflags & X86_EFLAGS_IF))
4107 local_irq_enable();
4108
4109- if (unlikely(page_fault_trace))
4110- printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
4111- regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
4112-
4113 if (unlikely(error_code & PF_RSVD))
4114 pgtable_bad(address, regs, error_code);
4115
4116@@ -447,7 +441,7 @@ asmlinkage void __kprobes do_page_fault(
4117 again:
4118 /* When running in the kernel we expect faults to occur only to
4119 * addresses in user space. All other faults represent errors in the
4120- * kernel and should generate an OOPS. Unfortunatly, in the case of an
4121+ * kernel and should generate an OOPS. Unfortunately, in the case of an
4122 * erroneous fault occurring in a code path which already holds mmap_sem
4123 * we will deadlock attempting to validate the fault against the
4124 * address space. Luckily the kernel only validly references user
4125@@ -455,7 +449,7 @@ asmlinkage void __kprobes do_page_fault(
4126 * exceptions table.
4127 *
4128 * As the vast majority of faults will be valid we will only perform
4129- * the source reference check when there is a possibilty of a deadlock.
4130+ * the source reference check when there is a possibility of a deadlock.
4131 * Attempt to lock the address space, if we cannot we then validate the
4132 * source. If this is invalid we can skip the address space check,
4133 * thus avoiding the deadlock.
4134@@ -557,7 +551,7 @@ bad_area_nosemaphore:
4135 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
4136 printk_ratelimit()) {
4137 printk(
4138- "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
4139+ "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n",
4140 tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
4141 tsk->comm, tsk->pid, address, regs->rip,
4142 regs->rsp, error_code);
4143@@ -623,7 +617,7 @@ no_context:
4144 */
4145 out_of_memory:
4146 up_read(&mm->mmap_sem);
4147- if (is_init(current)) {
4148+ if (is_global_init(current)) {
4149 yield();
4150 goto again;
4151 }
4152@@ -690,10 +684,3 @@ void vmalloc_sync_all(void)
4153 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
4154 (__START_KERNEL & PGDIR_MASK)));
4155 }
4156-
4157-static int __init enable_pagefaulttrace(char *str)
4158-{
4159- page_fault_trace = 1;
4160- return 1;
4161-}
4162-__setup("pagefaulttrace", enable_pagefaulttrace);
82094b55
AF
4163--- sle11-2009-10-16.orig/arch/x86/mm/hypervisor.c 2009-03-04 11:28:34.000000000 +0100
4164+++ sle11-2009-10-16/arch/x86/mm/hypervisor.c 2009-05-06 10:23:43.000000000 +0200
2cb7cef9
BS
4165@@ -496,6 +496,9 @@ int xen_create_contiguous_region(
4166 unsigned long frame, flags;
4167 unsigned int i;
4168 int rc, success;
4169+#ifdef CONFIG_64BIT
4170+ pte_t *ptep = NULL;
4171+#endif
4172 struct xen_memory_exchange exchange = {
4173 .in = {
4174 .nr_extents = 1UL << order,
4175@@ -521,6 +524,27 @@ int xen_create_contiguous_region(
4176 if (unlikely(order > MAX_CONTIG_ORDER))
4177 return -ENOMEM;
4178
4179+#ifdef CONFIG_64BIT
4180+ if (unlikely(vstart > PAGE_OFFSET + MAXMEM)) {
4181+ unsigned int level;
4182+
4183+ if (vstart < __START_KERNEL_map
4184+ || vstart + (PAGE_SIZE << order) > (unsigned long)_end)
4185+ return -EINVAL;
4186+ ptep = lookup_address((unsigned long)__va(__pa(vstart)),
4187+ &level);
4188+ if (ptep && pte_none(*ptep))
4189+ ptep = NULL;
4190+ if (vstart < __START_KERNEL && ptep)
4191+ return -EINVAL;
4192+ if (order > MAX_CONTIG_ORDER - 1)
4193+ return -ENOMEM;
4194+ }
4195+#else
4196+ if (unlikely(vstart + (PAGE_SIZE << order) > (unsigned long)high_memory))
4197+ return -EINVAL;
4198+#endif
4199+
4200 set_xen_guest_handle(exchange.in.extent_start, in_frames);
4201 set_xen_guest_handle(exchange.out.extent_start, &out_frame);
4202
4203@@ -533,9 +557,19 @@ int xen_create_contiguous_region(
4204 in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
4205 MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
4206 __pte_ma(0), 0);
4207+#ifdef CONFIG_64BIT
4208+ if (ptep)
4209+ MULTI_update_va_mapping(cr_mcl + i + (1U << order),
4210+ (unsigned long)__va(__pa(vstart)) + (i*PAGE_SIZE),
4211+ __pte_ma(0), 0);
4212+#endif
4213 set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
4214 INVALID_P2M_ENTRY);
4215 }
4216+#ifdef CONFIG_64BIT
4217+ if (ptep)
4218+ i += i;
4219+#endif
4220 if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
4221 BUG();
4222
4223@@ -569,9 +603,18 @@ int xen_create_contiguous_region(
4224 frame = success ? (out_frame + i) : in_frames[i];
4225 MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
4226 pfn_pte_ma(frame, PAGE_KERNEL), 0);
4227+#ifdef CONFIG_64BIT
4228+ if (ptep)
4229+ MULTI_update_va_mapping(cr_mcl + i + (1U << order),
4230+ (unsigned long)__va(__pa(vstart)) + (i*PAGE_SIZE),
4231+ pfn_pte_ma(frame, PAGE_KERNEL_RO), 0);
4232+#endif
4233 set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
4234 }
4235-
4236+#ifdef CONFIG_64BIT
4237+ if (ptep)
4238+ i += i;
4239+#endif
4240 cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
4241 ? UVMF_TLB_FLUSH|UVMF_ALL
4242 : UVMF_INVLPG|UVMF_ALL;
82094b55
AF
4243--- sle11-2009-10-16.orig/arch/x86/mm/init_32-xen.c 2009-02-16 16:17:21.000000000 +0100
4244+++ sle11-2009-10-16/arch/x86/mm/init_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4245@@ -94,7 +94,14 @@ static pte_t * __init one_page_table_ini
4246 #else
4247 if (!(__pmd_val(*pmd) & _PAGE_PRESENT)) {
4248 #endif
4249- pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
4250+ pte_t *page_table = NULL;
4251+
4252+#ifdef CONFIG_DEBUG_PAGEALLOC
4253+ page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
4254+#endif
4255+ if (!page_table)
4256+ page_table =
4257+ (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
4258
4259 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT);
4260 make_lowmem_page_readonly(page_table,
4261@@ -102,7 +109,7 @@ static pte_t * __init one_page_table_ini
4262 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
4263 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
4264 }
4265-
4266+
4267 return pte_offset_kernel(pmd, 0);
4268 }
4269
4270@@ -360,8 +367,13 @@ extern void set_highmem_pages_init(int);
4271 static void __init set_highmem_pages_init(int bad_ppro)
4272 {
4273 int pfn;
4274- for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
4275- add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
4276+ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) {
4277+ /*
4278+ * Holes under sparsemem might not have no mem_map[]:
4279+ */
4280+ if (pfn_valid(pfn))
4281+ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
4282+ }
4283 totalram_pages += totalhigh_pages;
4284 }
4285 #endif /* CONFIG_FLATMEM */
4286@@ -779,35 +791,18 @@ int arch_add_memory(int nid, u64 start,
4287 return __add_pages(zone, start_pfn, nr_pages);
4288 }
4289
4290-int remove_memory(u64 start, u64 size)
4291-{
4292- return -EINVAL;
4293-}
4294-EXPORT_SYMBOL_GPL(remove_memory);
4295 #endif
4296
4297 struct kmem_cache *pmd_cache;
4298
4299 void __init pgtable_cache_init(void)
4300 {
4301- size_t pgd_size = PTRS_PER_PGD*sizeof(pgd_t);
4302-
4303- if (PTRS_PER_PMD > 1) {
4304+ if (PTRS_PER_PMD > 1)
4305 pmd_cache = kmem_cache_create("pmd",
4306- PTRS_PER_PMD*sizeof(pmd_t),
4307- PTRS_PER_PMD*sizeof(pmd_t),
4308- SLAB_PANIC,
4309- pmd_ctor);
4310- if (!SHARED_KERNEL_PMD) {
4311- /* If we're in PAE mode and have a non-shared
4312- kernel pmd, then the pgd size must be a
4313- page size. This is because the pgd_list
4314- links through the page structure, so there
4315- can only be one pgd per page for this to
4316- work. */
4317- pgd_size = PAGE_SIZE;
4318- }
4319- }
4320+ PTRS_PER_PMD*sizeof(pmd_t),
4321+ PTRS_PER_PMD*sizeof(pmd_t),
4322+ SLAB_PANIC,
4323+ pmd_ctor);
4324 }
4325
4326 /*
82094b55
AF
4327--- sle11-2009-10-16.orig/arch/x86/mm/init_64-xen.c 2009-02-16 16:17:21.000000000 +0100
4328+++ sle11-2009-10-16/arch/x86/mm/init_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4329@@ -767,7 +767,7 @@ static void xen_finish_init_mapping(void
4330 /* Setup the direct mapping of the physical memory at PAGE_OFFSET.
4331 This runs before bootmem is initialized and gets pages directly from the
4332 physical memory. To access them they are temporarily mapped. */
4333-void __meminit init_memory_mapping(unsigned long start, unsigned long end)
4334+void __init_refok init_memory_mapping(unsigned long start, unsigned long end)
4335 {
4336 unsigned long next;
4337
4338@@ -901,12 +901,6 @@ error:
4339 }
4340 EXPORT_SYMBOL_GPL(arch_add_memory);
4341
4342-int remove_memory(u64 start, u64 size)
4343-{
4344- return -EINVAL;
4345-}
4346-EXPORT_SYMBOL_GPL(remove_memory);
4347-
4348 #if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA)
4349 int memory_add_physaddr_to_nid(u64 start)
4350 {
4351@@ -1173,14 +1167,6 @@ int in_gate_area_no_task(unsigned long a
4352 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
4353 }
4354
4355-#ifndef CONFIG_XEN
4356-void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
4357-{
4358- return __alloc_bootmem_core(pgdat->bdata, size,
4359- SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0);
4360-}
4361-#endif
4362-
4363 const char *arch_vma_name(struct vm_area_struct *vma)
4364 {
4365 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
4366@@ -1189,3 +1175,48 @@ const char *arch_vma_name(struct vm_area
4367 return "[vsyscall]";
4368 return NULL;
4369 }
4370+
4371+#ifdef CONFIG_SPARSEMEM_VMEMMAP
4372+/*
4373+ * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
4374+ */
4375+int __meminit vmemmap_populate(struct page *start_page,
4376+ unsigned long size, int node)
4377+{
4378+ unsigned long addr = (unsigned long)start_page;
4379+ unsigned long end = (unsigned long)(start_page + size);
4380+ unsigned long next;
4381+ pgd_t *pgd;
4382+ pud_t *pud;
4383+ pmd_t *pmd;
4384+
4385+ for (; addr < end; addr = next) {
4386+ next = pmd_addr_end(addr, end);
4387+
4388+ pgd = vmemmap_pgd_populate(addr, node);
4389+ if (!pgd)
4390+ return -ENOMEM;
4391+ pud = vmemmap_pud_populate(pgd, addr, node);
4392+ if (!pud)
4393+ return -ENOMEM;
4394+
4395+ pmd = pmd_offset(pud, addr);
4396+ if (pmd_none(*pmd)) {
4397+ pte_t entry;
4398+ void *p = vmemmap_alloc_block(PMD_SIZE, node);
4399+ if (!p)
4400+ return -ENOMEM;
4401+
4402+ entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
4403+ mk_pte_huge(entry);
4404+ set_pmd(pmd, __pmd(pte_val(entry)));
4405+
4406+ printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
4407+ addr, addr + PMD_SIZE - 1, p, node);
4408+ } else
4409+ vmemmap_verify((pte_t *)pmd, node, addr, next);
4410+ }
4411+
4412+ return 0;
4413+}
4414+#endif
82094b55
AF
4415--- sle11-2009-10-16.orig/arch/x86/mm/pageattr_64-xen.c 2009-02-16 16:17:21.000000000 +0100
4416+++ sle11-2009-10-16/arch/x86/mm/pageattr_64-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4417@@ -17,9 +17,6 @@
4418 #include <asm/pgalloc.h>
4419 #include <asm/mmu_context.h>
4420
4421-LIST_HEAD(mm_unpinned);
4422-DEFINE_SPINLOCK(mm_unpinned_lock);
4423-
4424 static void _pin_lock(struct mm_struct *mm, int lock) {
4425 if (lock)
4426 spin_lock(&mm->page_table_lock);
4427@@ -81,8 +78,8 @@ static void _pin_lock(struct mm_struct *
4428 #define PIN_BATCH 8
4429 static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
4430
4431-static inline unsigned int mm_walk_set_prot(void *pt, pgprot_t flags,
4432- unsigned int cpu, unsigned int seq)
4433+static inline unsigned int pgd_walk_set_prot(void *pt, pgprot_t flags,
4434+ unsigned int cpu, unsigned int seq)
4435 {
4436 struct page *page = virt_to_page(pt);
4437 unsigned long pfn = page_to_pfn(page);
4438@@ -100,9 +97,9 @@ static inline unsigned int mm_walk_set_p
4439 return seq;
4440 }
4441
4442-static void mm_walk(struct mm_struct *mm, pgprot_t flags)
4443+static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
4444 {
4445- pgd_t *pgd;
4446+ pgd_t *pgd = pgd_base;
4447 pud_t *pud;
4448 pmd_t *pmd;
4449 pte_t *pte;
4450@@ -110,7 +107,6 @@ static void mm_walk(struct mm_struct *mm
4451 unsigned int cpu, seq;
4452 multicall_entry_t *mcl;
4453
4454- pgd = mm->pgd;
4455 cpu = get_cpu();
4456
4457 /*
4458@@ -125,18 +121,18 @@ static void mm_walk(struct mm_struct *mm
4459 continue;
4460 pud = pud_offset(pgd, 0);
4461 if (PTRS_PER_PUD > 1) /* not folded */
4462- seq = mm_walk_set_prot(pud,flags,cpu,seq);
4463+ seq = pgd_walk_set_prot(pud,flags,cpu,seq);
4464 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
4465 if (pud_none(*pud))
4466 continue;
4467 pmd = pmd_offset(pud, 0);
4468 if (PTRS_PER_PMD > 1) /* not folded */
4469- seq = mm_walk_set_prot(pmd,flags,cpu,seq);
4470+ seq = pgd_walk_set_prot(pmd,flags,cpu,seq);
4471 for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
4472 if (pmd_none(*pmd))
4473 continue;
4474 pte = pte_offset_kernel(pmd,0);
4475- seq = mm_walk_set_prot(pte,flags,cpu,seq);
4476+ seq = pgd_walk_set_prot(pte,flags,cpu,seq);
4477 }
4478 }
4479 }
4480@@ -148,12 +144,12 @@ static void mm_walk(struct mm_struct *mm
4481 seq = 0;
4482 }
4483 MULTI_update_va_mapping(mcl + seq,
4484- (unsigned long)__user_pgd(mm->pgd),
4485- pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, flags),
4486+ (unsigned long)__user_pgd(pgd_base),
4487+ pfn_pte(virt_to_phys(__user_pgd(pgd_base))>>PAGE_SHIFT, flags),
4488 0);
4489 MULTI_update_va_mapping(mcl + seq + 1,
4490- (unsigned long)mm->pgd,
4491- pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, flags),
4492+ (unsigned long)pgd_base,
4493+ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
4494 UVMF_TLB_FLUSH);
4495 if (unlikely(HYPERVISOR_multicall_check(mcl, seq + 2, NULL)))
4496 BUG();
4497@@ -161,21 +157,35 @@ static void mm_walk(struct mm_struct *mm
4498 put_cpu();
4499 }
4500
4501+static void __pgd_pin(pgd_t *pgd)
4502+{
4503+ pgd_walk(pgd, PAGE_KERNEL_RO);
4504+ xen_pgd_pin(__pa(pgd)); /* kernel */
4505+ xen_pgd_pin(__pa(__user_pgd(pgd))); /* user */
4506+ SetPagePinned(virt_to_page(pgd));
4507+}
4508+
4509+static void __pgd_unpin(pgd_t *pgd)
4510+{
4511+ xen_pgd_unpin(__pa(pgd));
4512+ xen_pgd_unpin(__pa(__user_pgd(pgd)));
4513+ pgd_walk(pgd, PAGE_KERNEL);
4514+ ClearPagePinned(virt_to_page(pgd));
4515+}
4516+
4517+void pgd_test_and_unpin(pgd_t *pgd)
4518+{
4519+ if (PagePinned(virt_to_page(pgd)))
4520+ __pgd_unpin(pgd);
4521+}
4522+
4523 void mm_pin(struct mm_struct *mm)
4524 {
4525 if (xen_feature(XENFEAT_writable_page_tables))
4526 return;
4527
4528 pin_lock(mm);
4529-
4530- mm_walk(mm, PAGE_KERNEL_RO);
4531- xen_pgd_pin(__pa(mm->pgd)); /* kernel */
4532- xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
4533- SetPagePinned(virt_to_page(mm->pgd));
4534- spin_lock(&mm_unpinned_lock);
4535- list_del(&mm->context.unpinned);
4536- spin_unlock(&mm_unpinned_lock);
4537-
4538+ __pgd_pin(mm->pgd);
4539 pin_unlock(mm);
4540 }
4541
4542@@ -185,34 +195,30 @@ void mm_unpin(struct mm_struct *mm)
4543 return;
4544
4545 pin_lock(mm);
4546-
4547- xen_pgd_unpin(__pa(mm->pgd));
4548- xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
4549- mm_walk(mm, PAGE_KERNEL);
4550- ClearPagePinned(virt_to_page(mm->pgd));
4551- spin_lock(&mm_unpinned_lock);
4552- list_add(&mm->context.unpinned, &mm_unpinned);
4553- spin_unlock(&mm_unpinned_lock);
4554-
4555+ __pgd_unpin(mm->pgd);
4556 pin_unlock(mm);
4557 }
4558
4559 void mm_pin_all(void)
4560 {
4561+ struct page *page;
4562+ unsigned long flags;
4563+
4564 if (xen_feature(XENFEAT_writable_page_tables))
4565 return;
4566
4567 /*
4568- * Allow uninterrupted access to the mm_unpinned list. We don't
4569- * actually take the mm_unpinned_lock as it is taken inside mm_pin().
4570+ * Allow uninterrupted access to the pgd_list. Also protects
4571+ * __pgd_pin() by disabling preemption.
4572 * All other CPUs must be at a safe point (e.g., in stop_machine
4573 * or offlined entirely).
4574 */
4575- preempt_disable();
4576- while (!list_empty(&mm_unpinned))
4577- mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
4578- context.unpinned));
4579- preempt_enable();
4580+ spin_lock_irqsave(&pgd_lock, flags);
4581+ list_for_each_entry(page, &pgd_list, lru) {
4582+ if (!PagePinned(page))
4583+ __pgd_pin((pgd_t *)page_address(page));
4584+ }
4585+ spin_unlock_irqrestore(&pgd_lock, flags);
4586 }
4587
4588 void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
4589@@ -331,11 +337,11 @@ static struct page *split_large_page(uns
4590 return base;
4591 }
4592
4593-static void cache_flush_page(void *adr)
4594+void clflush_cache_range(void *adr, int size)
4595 {
4596 int i;
4597- for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
4598- asm volatile("clflush (%0)" :: "r" (adr + i));
4599+ for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
4600+ clflush(adr+i);
4601 }
4602
4603 static void flush_kernel_map(void *arg)
4604@@ -350,7 +356,7 @@ static void flush_kernel_map(void *arg)
4605 asm volatile("wbinvd" ::: "memory");
4606 else list_for_each_entry(pg, l, lru) {
4607 void *adr = page_address(pg);
4608- cache_flush_page(adr);
4609+ clflush_cache_range(adr, PAGE_SIZE);
4610 }
4611 __flush_tlb_all();
4612 }
4613@@ -418,6 +424,7 @@ __change_page_attr(unsigned long address
4614 split = split_large_page(address, prot, ref_prot2);
4615 if (!split)
4616 return -ENOMEM;
4617+ pgprot_val(ref_prot2) &= ~_PAGE_NX;
4618 set_pte(kpte, mk_pte(split, ref_prot2));
4619 kpte_page = split;
4620 }
4621@@ -510,9 +517,14 @@ void global_flush_tlb(void)
4622 struct page *pg, *next;
4623 struct list_head l;
4624
4625- down_read(&init_mm.mmap_sem);
4626+ /*
4627+ * Write-protect the semaphore, to exclude two contexts
4628+ * doing a list_replace_init() call in parallel and to
4629+ * exclude new additions to the deferred_pages list:
4630+ */
4631+ down_write(&init_mm.mmap_sem);
4632 list_replace_init(&deferred_pages, &l);
4633- up_read(&init_mm.mmap_sem);
4634+ up_write(&init_mm.mmap_sem);
4635
4636 flush_map(&l);
4637
82094b55
AF
4638--- sle11-2009-10-16.orig/arch/x86/mm/pgtable_32-xen.c 2009-02-16 16:17:21.000000000 +0100
4639+++ sle11-2009-10-16/arch/x86/mm/pgtable_32-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4640@@ -6,6 +6,7 @@
4641 #include <linux/kernel.h>
4642 #include <linux/errno.h>
4643 #include <linux/mm.h>
4644+#include <linux/nmi.h>
4645 #include <linux/swap.h>
4646 #include <linux/smp.h>
4647 #include <linux/highmem.h>
4648@@ -46,6 +47,8 @@ void show_mem(void)
4649 for_each_online_pgdat(pgdat) {
4650 pgdat_resize_lock(pgdat, &flags);
4651 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
4652+ if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
4653+ touch_nmi_watchdog();
4654 page = pgdat_page_nr(pgdat, i);
4655 total++;
4656 if (PageHighMem(page))
4657@@ -206,7 +209,7 @@ void pte_free(struct page *pte)
4658 __free_page(pte);
4659 }
4660
4661-void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
4662+void pmd_ctor(struct kmem_cache *cache, void *pmd)
4663 {
4664 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
4665 }
82094b55
AF
4666--- sle11-2009-10-16.orig/arch/x86/pci/irq-xen.c 2009-02-16 16:17:21.000000000 +0100
4667+++ sle11-2009-10-16/arch/x86/pci/irq-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4668@@ -173,7 +173,7 @@ void eisa_set_level_irq(unsigned int irq
4669 }
4670
4671 /*
4672- * Common IRQ routing practice: nybbles in config space,
4673+ * Common IRQ routing practice: nibbles in config space,
4674 * offset by some magic constant.
4675 */
4676 static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
4677@@ -496,6 +496,26 @@ static int pirq_amd756_set(struct pci_de
4678 return 1;
4679 }
4680
4681+/*
4682+ * PicoPower PT86C523
4683+ */
4684+static int pirq_pico_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
4685+{
4686+ outb(0x10 + ((pirq - 1) >> 1), 0x24);
4687+ return ((pirq - 1) & 1) ? (inb(0x26) >> 4) : (inb(0x26) & 0xf);
4688+}
4689+
4690+static int pirq_pico_set(struct pci_dev *router, struct pci_dev *dev, int pirq,
4691+ int irq)
4692+{
4693+ unsigned int x;
4694+ outb(0x10 + ((pirq - 1) >> 1), 0x24);
4695+ x = inb(0x26);
4696+ x = ((pirq - 1) & 1) ? ((x & 0x0f) | (irq << 4)) : ((x & 0xf0) | (irq));
4697+ outb(x, 0x26);
4698+ return 1;
4699+}
4700+
4701 #ifdef CONFIG_PCI_BIOS
4702
4703 static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
4704@@ -569,7 +589,7 @@ static __init int via_router_probe(struc
4705 /* FIXME: We should move some of the quirk fixup stuff here */
4706
4707 /*
4708- * work arounds for some buggy BIOSes
4709+ * workarounds for some buggy BIOSes
4710 */
4711 if (device == PCI_DEVICE_ID_VIA_82C586_0) {
4712 switch(router->device) {
4713@@ -725,6 +745,24 @@ static __init int amd_router_probe(struc
4714 return 1;
4715 }
4716
4717+static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
4718+{
4719+ switch (device) {
4720+ case PCI_DEVICE_ID_PICOPOWER_PT86C523:
4721+ r->name = "PicoPower PT86C523";
4722+ r->get = pirq_pico_get;
4723+ r->set = pirq_pico_set;
4724+ return 1;
4725+
4726+ case PCI_DEVICE_ID_PICOPOWER_PT86C523BBP:
4727+ r->name = "PicoPower PT86C523 rev. BB+";
4728+ r->get = pirq_pico_get;
4729+ r->set = pirq_pico_set;
4730+ return 1;
4731+ }
4732+ return 0;
4733+}
4734+
4735 static __initdata struct irq_router_handler pirq_routers[] = {
4736 { PCI_VENDOR_ID_INTEL, intel_router_probe },
4737 { PCI_VENDOR_ID_AL, ali_router_probe },
4738@@ -736,6 +774,7 @@ static __initdata struct irq_router_hand
4739 { PCI_VENDOR_ID_VLSI, vlsi_router_probe },
4740 { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
4741 { PCI_VENDOR_ID_AMD, amd_router_probe },
4742+ { PCI_VENDOR_ID_PICOPOWER, pico_router_probe },
4743 /* Someone with docs needs to add the ATI Radeon IGP */
4744 { 0, NULL }
4745 };
4746@@ -1014,7 +1053,7 @@ static void __init pcibios_fixup_irqs(vo
4747 * Work around broken HP Pavilion Notebooks which assign USB to
4748 * IRQ 9 even though it is actually wired to IRQ 11
4749 */
4750-static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
4751+static int __init fix_broken_hp_bios_irq9(const struct dmi_system_id *d)
4752 {
4753 if (!broken_hp_bios_irq9) {
4754 broken_hp_bios_irq9 = 1;
4755@@ -1027,7 +1066,7 @@ static int __init fix_broken_hp_bios_irq
4756 * Work around broken Acer TravelMate 360 Notebooks which assign
4757 * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
4758 */
4759-static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
4760+static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
4761 {
4762 if (!acer_tm360_irqrouting) {
4763 acer_tm360_irqrouting = 1;
82094b55
AF
4764--- sle11-2009-10-16.orig/drivers/acpi/processor_idle.c 2009-08-26 11:52:33.000000000 +0200
4765+++ sle11-2009-10-16/drivers/acpi/processor_idle.c 2009-06-29 15:29:06.000000000 +0200
2cb7cef9
BS
4766@@ -1749,6 +1749,13 @@ int acpi_processor_cst_has_changed(struc
4767 if (!pr->flags.power_setup_done)
4768 return -ENODEV;
4769
4770+ if (processor_pm_external()) {
4771+ acpi_processor_get_power_info(pr);
4772+ processor_notify_external(pr,
4773+ PROCESSOR_PM_CHANGE, PM_TYPE_IDLE);
4774+ return ret;
4775+ }
4776+
4777 cpuidle_pause_and_lock();
4778 cpuidle_disable_device(&pr->power.dev);
4779 acpi_processor_get_power_info(pr);
82094b55
AF
4780--- sle11-2009-10-16.orig/drivers/cpuidle/Kconfig 2009-10-28 14:55:06.000000000 +0100
4781+++ sle11-2009-10-16/drivers/cpuidle/Kconfig 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4782@@ -1,6 +1,7 @@
4783
4784 config CPU_IDLE
4785 bool "CPU idle PM support"
4786+ depends on !PROCESSOR_EXTERNAL_CONTROL
4787 default ACPI
4788 help
4789 CPU idle is a generic framework for supporting software-controlled
82094b55
AF
4790--- sle11-2009-10-16.orig/drivers/oprofile/cpu_buffer.c 2009-02-16 16:01:39.000000000 +0100
4791+++ sle11-2009-10-16/drivers/oprofile/cpu_buffer.c 2009-03-12 16:15:32.000000000 +0100
2cb7cef9
BS
4792@@ -308,6 +308,37 @@ void oprofile_add_trace(unsigned long pc
4793 }
4794
4795 #ifdef CONFIG_XEN
4796+/*
4797+ * This is basically log_sample(b, ESCAPE_CODE, cpu_mode, CPU_TRACE_BEGIN),
4798+ * as was previously accessible through oprofile_add_pc().
4799+ */
4800+void oprofile_add_mode(int cpu_mode)
4801+{
4802+ struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
4803+ struct task_struct *task;
4804+
4805+ if (nr_available_slots(cpu_buf) < 3) {
4806+ cpu_buf->sample_lost_overflow++;
4807+ return;
4808+ }
4809+
4810+ task = current;
4811+
4812+ /* notice a switch from user->kernel or vice versa */
4813+ if (cpu_buf->last_cpu_mode != cpu_mode) {
4814+ cpu_buf->last_cpu_mode = cpu_mode;
4815+ add_code(cpu_buf, cpu_mode);
4816+ }
4817+
4818+ /* notice a task switch */
4819+ if (cpu_buf->last_task != task) {
4820+ cpu_buf->last_task = task;
4821+ add_code(cpu_buf, (unsigned long)task);
4822+ }
4823+
4824+ add_code(cpu_buf, CPU_TRACE_BEGIN);
4825+}
4826+
4827 int oprofile_add_domain_switch(int32_t domain_id)
4828 {
4829 struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
82094b55
AF
4830--- sle11-2009-10-16.orig/drivers/pci/msi-xen.c 2008-12-15 11:27:22.000000000 +0100
4831+++ sle11-2009-10-16/drivers/pci/msi-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4832@@ -264,6 +264,12 @@ static int msi_map_vector(struct pci_dev
4833 return msi_map_pirq_to_vector(dev, -1, entry_nr, table_base);
4834 }
4835
4836+static void pci_intx_for_msi(struct pci_dev *dev, int enable)
4837+{
4838+ if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG))
4839+ pci_intx(dev, enable);
4840+}
4841+
4842 #ifdef CONFIG_PM
4843 static void __pci_restore_msi_state(struct pci_dev *dev)
4844 {
4845@@ -272,7 +278,7 @@ static void __pci_restore_msi_state(stru
4846 if (!dev->msi_enabled)
4847 return;
4848
4849- pci_intx(dev, 0); /* disable intx */
4850+ pci_intx_for_msi(dev, 0);
4851 msi_set_enable(dev, 0);
4852
4853 pirq = msi_map_pirq_to_vector(dev, dev->irq, 0, 0);
4854@@ -295,7 +301,7 @@ static void __pci_restore_msix_state(str
4855 if (!dev->msix_enabled)
4856 return;
4857
4858- pci_intx(dev, 0); /* disable intx */
4859+ pci_intx_for_msi(dev, 0);
4860 msix_set_enable(dev, 0);
4861
4862 msi_dev_entry = get_msi_dev_pirq_list(dev);
4863@@ -348,7 +354,7 @@ static int msi_capability_init(struct pc
4864 return -EBUSY;
4865
4866 /* Set MSI enabled bits */
4867- pci_intx(dev, 0); /* disable intx */
4868+ pci_intx_for_msi(dev, 0);
4869 msi_set_enable(dev, 1);
4870 dev->msi_enabled = 1;
4871
4872@@ -422,7 +428,7 @@ static int msix_capability_init(struct p
4873 return avail;
4874 }
4875
4876- pci_intx(dev, 0); /* disable intx */
4877+ pci_intx_for_msi(dev, 0);
4878 msix_set_enable(dev, 1);
4879 dev->msix_enabled = 1;
4880
4881@@ -557,7 +563,7 @@ void pci_disable_msi(struct pci_dev* dev
4882
4883 /* Disable MSI mode */
4884 msi_set_enable(dev, 0);
4885- pci_intx(dev, 1); /* enable intx */
4886+ pci_intx_for_msi(dev, 1);
4887 dev->msi_enabled = 0;
4888 }
4889 EXPORT_SYMBOL(pci_disable_msi);
4890@@ -696,7 +702,7 @@ void pci_disable_msix(struct pci_dev* de
4891
4892 /* Disable MSI mode */
4893 msix_set_enable(dev, 0);
4894- pci_intx(dev, 1); /* enable intx */
4895+ pci_intx_for_msi(dev, 1);
4896 dev->msix_enabled = 0;
4897 }
4898 EXPORT_SYMBOL(pci_disable_msix);
82094b55
AF
4899--- sle11-2009-10-16.orig/drivers/xen/blkback/blkback.c 2009-02-16 16:17:21.000000000 +0100
4900+++ sle11-2009-10-16/drivers/xen/blkback/blkback.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4901@@ -269,13 +269,10 @@ static void __end_block_io_op(pending_re
4902 }
4903 }
4904
4905-static int end_block_io_op(struct bio *bio, unsigned int done, int error)
4906+static void end_block_io_op(struct bio *bio, int error)
4907 {
4908- if (bio->bi_size != 0)
4909- return 1;
4910 __end_block_io_op(bio->bi_private, error);
4911 bio_put(bio);
4912- return error;
4913 }
4914
4915
82094b55
AF
4916--- sle11-2009-10-16.orig/drivers/xen/blkfront/blkfront.c 2009-03-24 10:12:03.000000000 +0100
4917+++ sle11-2009-10-16/drivers/xen/blkfront/blkfront.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4918@@ -233,7 +233,7 @@ static int setup_blkring(struct xenbus_d
4919 SHARED_RING_INIT(sring);
4920 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
4921
4922- memset(info->sg, 0, sizeof(info->sg));
4923+ sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
4924
4925 err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
4926 if (err < 0) {
4927@@ -625,9 +625,8 @@ static int blkif_queue_request(struct re
4928
4929 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
4930 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
4931- for (i = 0; i < ring_req->nr_segments; ++i) {
4932- sg = info->sg + i;
4933- buffer_mfn = page_to_phys(sg->page) >> PAGE_SHIFT;
4934+ for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
4935+ buffer_mfn = page_to_phys(sg_page(sg)) >> PAGE_SHIFT;
4936 fsect = sg->offset >> 9;
4937 lsect = fsect + (sg->length >> 9) - 1;
4938 /* install a grant reference. */
82094b55
AF
4939--- sle11-2009-10-16.orig/drivers/xen/core/firmware.c 2009-10-28 14:55:06.000000000 +0100
4940+++ sle11-2009-10-16/drivers/xen/core/firmware.c 2009-03-25 18:10:23.000000000 +0100
2cb7cef9
BS
4941@@ -1,4 +1,5 @@
4942 #include <linux/kernel.h>
4943+#include <linux/string.h>
4944 #include <linux/errno.h>
4945 #include <linux/init.h>
4946 #include <linux/edd.h>
82094b55
AF
4947--- sle11-2009-10-16.orig/drivers/xen/core/machine_kexec.c 2009-10-28 14:55:06.000000000 +0100
4948+++ sle11-2009-10-16/drivers/xen/core/machine_kexec.c 2009-02-17 11:46:41.000000000 +0100
2cb7cef9
BS
4949@@ -29,6 +29,10 @@ void __init xen_machine_kexec_setup_reso
4950 int k = 0;
4951 int rc;
4952
4953+ if (strstr(boot_command_line, "crashkernel="))
4954+ printk(KERN_WARNING "Ignoring crashkernel command line, "
4955+ "parameter will be supplied by xen\n");
4956+
4957 if (!is_initial_xendomain())
4958 return;
4959
4960@@ -130,6 +134,13 @@ void __init xen_machine_kexec_setup_reso
4961 xen_max_nr_phys_cpus))
4962 goto err;
4963
4964+#ifdef CONFIG_X86
4965+ if (xen_create_contiguous_region((unsigned long)&vmcoreinfo_note,
4966+ get_order(sizeof(vmcoreinfo_note)),
4967+ BITS_PER_LONG))
4968+ goto err;
4969+#endif
4970+
4971 return;
4972
4973 err:
4974@@ -205,6 +216,13 @@ NORET_TYPE void machine_kexec(struct kim
4975 panic("KEXEC_CMD_kexec hypercall should not return\n");
4976 }
4977
4978+#ifdef CONFIG_X86
4979+unsigned long paddr_vmcoreinfo_note(void)
4980+{
4981+ return virt_to_machine(&vmcoreinfo_note);
4982+}
4983+#endif
4984+
4985 void machine_shutdown(void)
4986 {
4987 /* do nothing */
82094b55
AF
4988--- sle11-2009-10-16.orig/drivers/xen/core/smpboot.c 2008-12-15 11:27:22.000000000 +0100
4989+++ sle11-2009-10-16/drivers/xen/core/smpboot.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
4990@@ -45,8 +45,8 @@ cpumask_t cpu_possible_map;
4991 EXPORT_SYMBOL(cpu_possible_map);
4992 cpumask_t cpu_initialized_map;
4993
4994-struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
4995-EXPORT_SYMBOL(cpu_data);
4996+DEFINE_PER_CPU(struct cpuinfo_x86, cpu_info);
4997+EXPORT_PER_CPU_SYMBOL(cpu_info);
4998
4999 static DEFINE_PER_CPU(int, resched_irq);
5000 static DEFINE_PER_CPU(int, callfunc_irq);
5001@@ -55,13 +55,13 @@ static char callfunc_name[NR_CPUS][15];
5002
5003 u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
5004
5005-cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
5006-cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
5007-EXPORT_SYMBOL(cpu_core_map);
5008+DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
5009+DEFINE_PER_CPU(cpumask_t, cpu_core_map);
5010+EXPORT_PER_CPU_SYMBOL(cpu_core_map);
5011
5012 #if defined(__i386__)
5013-u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
5014-EXPORT_SYMBOL(x86_cpu_to_apicid);
5015+DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
5016+EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
5017 #endif
5018
5019 void __init prefill_possible_map(void)
5020@@ -86,25 +86,25 @@ void __init smp_alloc_memory(void)
5021 static inline void
5022 set_cpu_sibling_map(unsigned int cpu)
5023 {
5024- cpu_data[cpu].phys_proc_id = cpu;
5025- cpu_data[cpu].cpu_core_id = 0;
5026+ cpu_data(cpu).phys_proc_id = cpu;
5027+ cpu_data(cpu).cpu_core_id = 0;
5028
5029- cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
5030- cpu_core_map[cpu] = cpumask_of_cpu(cpu);
5031+ per_cpu(cpu_sibling_map, cpu) = cpumask_of_cpu(cpu);
5032+ per_cpu(cpu_core_map, cpu) = cpumask_of_cpu(cpu);
5033
5034- cpu_data[cpu].booted_cores = 1;
5035+ cpu_data(cpu).booted_cores = 1;
5036 }
5037
5038 static void
5039 remove_siblinginfo(unsigned int cpu)
5040 {
5041- cpu_data[cpu].phys_proc_id = BAD_APICID;
5042- cpu_data[cpu].cpu_core_id = BAD_APICID;
5043+ cpu_data(cpu).phys_proc_id = BAD_APICID;
5044+ cpu_data(cpu).cpu_core_id = BAD_APICID;
5045
5046- cpus_clear(cpu_sibling_map[cpu]);
5047- cpus_clear(cpu_core_map[cpu]);
5048+ cpus_clear(per_cpu(cpu_sibling_map, cpu));
5049+ cpus_clear(per_cpu(cpu_core_map, cpu));
5050
5051- cpu_data[cpu].booted_cores = 0;
5052+ cpu_data(cpu).booted_cores = 0;
5053 }
5054
5055 static int __cpuinit xen_smp_intr_init(unsigned int cpu)
5056@@ -163,9 +163,9 @@ void __cpuinit cpu_bringup(void)
5057 {
5058 cpu_init();
5059 #ifdef __i386__
5060- identify_secondary_cpu(cpu_data + smp_processor_id());
5061+ identify_secondary_cpu(&current_cpu_data);
5062 #else
5063- identify_cpu(cpu_data + smp_processor_id());
5064+ identify_cpu(&current_cpu_data);
5065 #endif
5066 touch_softlockup_watchdog();
5067 preempt_disable();
5068@@ -266,16 +266,16 @@ void __init smp_prepare_cpus(unsigned in
5069 if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, 0, &cpu_id) == 0)
5070 apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
5071 boot_cpu_data.apicid = apicid;
5072- cpu_data[0] = boot_cpu_data;
5073+ cpu_data(0) = boot_cpu_data;
5074
5075 cpu_2_logical_apicid[0] = apicid;
5076- x86_cpu_to_apicid[0] = apicid;
5077+ per_cpu(x86_cpu_to_apicid, 0) = apicid;
5078
5079 current_thread_info()->cpu = 0;
5080
5081 for (cpu = 0; cpu < NR_CPUS; cpu++) {
5082- cpus_clear(cpu_sibling_map[cpu]);
5083- cpus_clear(cpu_core_map[cpu]);
5084+ cpus_clear(per_cpu(cpu_sibling_map, cpu));
5085+ cpus_clear(per_cpu(cpu_core_map, cpu));
5086 }
5087
5088 set_cpu_sibling_map(0);
5089@@ -320,11 +320,12 @@ void __init smp_prepare_cpus(unsigned in
5090 apicid = cpu;
5091 if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, cpu, &cpu_id) == 0)
5092 apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
5093- cpu_data[cpu] = boot_cpu_data;
5094- cpu_data[cpu].apicid = apicid;
5095+ cpu_data(cpu) = boot_cpu_data;
5096+ cpu_data(cpu).cpu_index = cpu;
5097+ cpu_data(cpu).apicid = apicid;
5098
5099 cpu_2_logical_apicid[cpu] = apicid;
5100- x86_cpu_to_apicid[cpu] = apicid;
5101+ per_cpu(x86_cpu_to_apicid, cpu) = apicid;
5102
5103 #ifdef __x86_64__
5104 cpu_pda(cpu)->pcurrent = idle;
82094b55
AF
5105--- sle11-2009-10-16.orig/drivers/xen/netback/loopback.c 2008-12-15 11:26:44.000000000 +0100
5106+++ sle11-2009-10-16/drivers/xen/netback/loopback.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5107@@ -285,9 +285,9 @@ static void __exit clean_loopback(int i)
5108 char dev_name[IFNAMSIZ];
5109
5110 sprintf(dev_name, "vif0.%d", i);
5111- dev1 = dev_get_by_name(dev_name);
5112+ dev1 = dev_get_by_name(&init_net, dev_name);
5113 sprintf(dev_name, "veth%d", i);
5114- dev2 = dev_get_by_name(dev_name);
5115+ dev2 = dev_get_by_name(&init_net, dev_name);
5116 if (dev1 && dev2) {
5117 unregister_netdev(dev2);
5118 unregister_netdev(dev1);
82094b55
AF
5119--- sle11-2009-10-16.orig/drivers/xen/netback/netback.c 2008-12-23 09:33:22.000000000 +0100
5120+++ sle11-2009-10-16/drivers/xen/netback/netback.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5121@@ -350,8 +350,8 @@ static void xen_network_done_notify(void
5122 {
5123 static struct net_device *eth0_dev = NULL;
5124 if (unlikely(eth0_dev == NULL))
5125- eth0_dev = __dev_get_by_name("eth0");
5126- netif_rx_schedule(eth0_dev);
5127+ eth0_dev = __dev_get_by_name(&init_net, "eth0");
5128+ netif_rx_schedule(eth0_dev, ???);
5129 }
5130 /*
5131 * Add following to poll() function in NAPI driver (Tigon3 is example):
82094b55
AF
5132--- sle11-2009-10-16.orig/drivers/xen/netback/xenbus.c 2009-03-04 11:25:55.000000000 +0100
5133+++ sle11-2009-10-16/drivers/xen/netback/xenbus.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5134@@ -149,12 +149,10 @@ fail:
5135 * and vif variables to the environment, for the benefit of the vif-* hotplug
5136 * scripts.
5137 */
5138-static int netback_uevent(struct xenbus_device *xdev, char **envp,
5139- int num_envp, char *buffer, int buffer_size)
5140+static int netback_uevent(struct xenbus_device *xdev, struct kobj_uevent_env *env)
5141 {
5142 struct backend_info *be = xdev->dev.driver_data;
5143 netif_t *netif = be->netif;
5144- int i = 0, length = 0;
5145 char *val;
5146
5147 DPRINTK("netback_uevent");
5148@@ -166,15 +164,11 @@ static int netback_uevent(struct xenbus_
5149 return err;
5150 }
5151 else {
5152- add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
5153- &length, "script=%s", val);
5154+ add_uevent_var(env, "script=%s", val);
5155 kfree(val);
5156 }
5157
5158- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
5159- "vif=%s", netif->dev->name);
5160-
5161- envp[i] = NULL;
5162+ add_uevent_var(env, "vif=%s", netif->dev->name);
5163
5164 return 0;
5165 }
82094b55
AF
5166--- sle11-2009-10-16.orig/drivers/xen/netfront/accel.c 2009-04-09 14:43:45.000000000 +0200
5167+++ sle11-2009-10-16/drivers/xen/netfront/accel.c 2009-03-30 16:39:19.000000000 +0200
2cb7cef9
BS
5168@@ -313,7 +313,7 @@ accelerator_set_vif_state_hooks(struct n
5169 DPRINTK("%p\n",vif_state);
5170
5171 /* Make sure there are no data path operations going on */
5172- netif_poll_disable(vif_state->np->netdev);
5173+ napi_disable(&vif_state->np->napi);
5174 netif_tx_lock_bh(vif_state->np->netdev);
5175
5176 accelerator = vif_state->np->accelerator;
5177@@ -322,7 +322,7 @@ accelerator_set_vif_state_hooks(struct n
5178 spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
5179
5180 netif_tx_unlock_bh(vif_state->np->netdev);
5181- netif_poll_enable(vif_state->np->netdev);
5182+ napi_enable(&vif_state->np->napi);
5183 }
5184
5185
5186@@ -496,7 +496,7 @@ accelerator_remove_single_hook(struct ne
5187 unsigned long flags;
5188
5189 /* Make sure there are no data path operations going on */
5190- netif_poll_disable(vif_state->np->netdev);
5191+ napi_disable(&vif_state->np->napi);
5192 netif_tx_lock_bh(vif_state->np->netdev);
5193
5194 spin_lock_irqsave(&accelerator->vif_states_lock, flags);
5195@@ -512,7 +512,7 @@ accelerator_remove_single_hook(struct ne
5196 spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
5197
5198 netif_tx_unlock_bh(vif_state->np->netdev);
5199- netif_poll_enable(vif_state->np->netdev);
5200+ napi_enable(&vif_state->np->napi);
5201 }
5202
5203
82094b55
AF
5204--- sle11-2009-10-16.orig/drivers/xen/netfront/netfront.c 2009-03-30 16:36:30.000000000 +0200
5205+++ sle11-2009-10-16/drivers/xen/netfront/netfront.c 2009-03-30 16:39:44.000000000 +0200
2cb7cef9
BS
5206@@ -626,6 +626,7 @@ static int network_open(struct net_devic
5207 struct netfront_info *np = netdev_priv(dev);
5208
5209 memset(&np->stats, 0, sizeof(np->stats));
5210+ napi_enable(&np->napi);
5211
5212 spin_lock_bh(&np->rx_lock);
5213 if (netfront_carrier_ok(np)) {
5214@@ -634,7 +635,7 @@ static int network_open(struct net_devic
5215 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){
5216 netfront_accelerator_call_stop_napi_irq(np, dev);
5217
5218- netif_rx_schedule(dev);
5219+ netif_rx_schedule(dev, &np->napi);
5220 }
5221 }
5222 spin_unlock_bh(&np->rx_lock);
5223@@ -706,7 +707,7 @@ static void rx_refill_timeout(unsigned l
5224
5225 netfront_accelerator_call_stop_napi_irq(np, dev);
5226
5227- netif_rx_schedule(dev);
5228+ netif_rx_schedule(dev, &np->napi);
5229 }
5230
5231 static void network_alloc_rx_buffers(struct net_device *dev)
5232@@ -1063,7 +1064,7 @@ static irqreturn_t netif_int(int irq, vo
5233 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) {
5234 netfront_accelerator_call_stop_napi_irq(np, dev);
5235
5236- netif_rx_schedule(dev);
5237+ netif_rx_schedule(dev, &np->napi);
5238 dev->last_rx = jiffies;
5239 }
5240 }
5241@@ -1316,16 +1317,17 @@ static int xennet_set_skb_gso(struct sk_
5242 #endif
5243 }
5244
5245-static int netif_poll(struct net_device *dev, int *pbudget)
5246+static int netif_poll(struct napi_struct *napi, int budget)
5247 {
5248- struct netfront_info *np = netdev_priv(dev);
5249+ struct netfront_info *np = container_of(napi, struct netfront_info, napi);
5250+ struct net_device *dev = np->netdev;
5251 struct sk_buff *skb;
5252 struct netfront_rx_info rinfo;
5253 struct netif_rx_response *rx = &rinfo.rx;
5254 struct netif_extra_info *extras = rinfo.extras;
5255 RING_IDX i, rp;
5256 struct multicall_entry *mcl;
5257- int work_done, budget, more_to_do = 1, accel_more_to_do = 1;
5258+ int work_done, more_to_do = 1, accel_more_to_do = 1;
5259 struct sk_buff_head rxq;
5260 struct sk_buff_head errq;
5261 struct sk_buff_head tmpq;
5262@@ -1345,8 +1347,6 @@ static int netif_poll(struct net_device
5263 skb_queue_head_init(&errq);
5264 skb_queue_head_init(&tmpq);
5265
5266- if ((budget = *pbudget) > dev->quota)
5267- budget = dev->quota;
5268 rp = np->rx.sring->rsp_prod;
5269 rmb(); /* Ensure we see queued responses up to 'rp'. */
5270
5271@@ -1508,9 +1508,6 @@ err:
5272 accel_more_to_do = 0;
5273 }
5274
5275- *pbudget -= work_done;
5276- dev->quota -= work_done;
5277-
5278 if (work_done < budget) {
5279 local_irq_save(flags);
5280
5281@@ -1527,14 +1524,14 @@ err:
5282 }
5283
5284 if (!more_to_do && !accel_more_to_do)
5285- __netif_rx_complete(dev);
5286+ __netif_rx_complete(dev, napi);
5287
5288 local_irq_restore(flags);
5289 }
5290
5291 spin_unlock(&np->rx_lock);
5292
5293- return more_to_do | accel_more_to_do;
5294+ return work_done;
5295 }
5296
5297 static void netif_release_tx_bufs(struct netfront_info *np)
5298@@ -1681,6 +1678,7 @@ static int network_close(struct net_devi
5299 {
5300 struct netfront_info *np = netdev_priv(dev);
5301 netif_stop_queue(np->netdev);
5302+ napi_disable(&np->napi);
5303 return 0;
5304 }
5305
5306@@ -2088,16 +2086,14 @@ static struct net_device * __devinit cre
5307 netdev->hard_start_xmit = network_start_xmit;
5308 netdev->stop = network_close;
5309 netdev->get_stats = network_get_stats;
5310- netdev->poll = netif_poll;
5311+ netif_napi_add(netdev, &np->napi, netif_poll, 64);
5312 netdev->set_multicast_list = network_set_multicast_list;
5313 netdev->uninit = netif_uninit;
5314 netdev->set_mac_address = xennet_set_mac_address;
5315 netdev->change_mtu = xennet_change_mtu;
5316- netdev->weight = 64;
5317 netdev->features = NETIF_F_IP_CSUM;
5318
5319 SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
5320- SET_MODULE_OWNER(netdev);
5321 SET_NETDEV_DEV(netdev, &dev->dev);
5322
5323 np->netdev = netdev;
82094b55
AF
5324--- sle11-2009-10-16.orig/drivers/xen/netfront/netfront.h 2009-10-28 14:55:06.000000000 +0100
5325+++ sle11-2009-10-16/drivers/xen/netfront/netfront.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5326@@ -157,6 +157,8 @@ struct netfront_info {
5327 spinlock_t tx_lock;
5328 spinlock_t rx_lock;
5329
5330+ struct napi_struct napi;
5331+
5332 unsigned int irq;
5333 unsigned int copying_receiver;
5334 unsigned int carrier;
82094b55
AF
5335--- sle11-2009-10-16.orig/drivers/xen/pciback/Makefile 2009-10-28 14:55:06.000000000 +0100
5336+++ sle11-2009-10-16/drivers/xen/pciback/Makefile 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5337@@ -12,6 +12,4 @@ pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT
5338 pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
5339 pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
5340
5341-ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
5342-EXTRA_CFLAGS += -DDEBUG
5343-endif
5344+ccflags-$(CONFIG_XEN_PCIDEV_BE_DEBUG) += -DDEBUG
82094b55
AF
5345--- sle11-2009-10-16.orig/drivers/xen/pcifront/Makefile 2009-10-28 14:55:06.000000000 +0100
5346+++ sle11-2009-10-16/drivers/xen/pcifront/Makefile 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5347@@ -2,6 +2,4 @@ obj-y += pcifront.o
5348
5349 pcifront-y := pci_op.o xenbus.o pci.o
5350
5351-ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y)
5352-EXTRA_CFLAGS += -DDEBUG
5353-endif
5354+ccflags-$(CONFIG_XEN_PCIDEV_FE_DEBUG) += -DDEBUG
82094b55
AF
5355--- sle11-2009-10-16.orig/drivers/xen/scsiback/emulate.c 2009-10-28 14:55:06.000000000 +0100
5356+++ sle11-2009-10-16/drivers/xen/scsiback/emulate.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5357@@ -104,9 +104,10 @@ static void resp_not_supported_cmd(pendi
5358 }
5359
5360
5361-static int __copy_to_sg(struct scatterlist *sg, unsigned int nr_sg,
5362+static int __copy_to_sg(struct scatterlist *sgl, unsigned int nr_sg,
5363 void *buf, unsigned int buflen)
5364 {
5365+ struct scatterlist *sg;
5366 void *from = buf;
5367 void *to;
5368 unsigned int from_rest = buflen;
5369@@ -115,8 +116,8 @@ static int __copy_to_sg(struct scatterli
5370 unsigned int i;
5371 unsigned long pfn;
5372
5373- for (i = 0; i < nr_sg; i++) {
5374- if (sg->page == NULL) {
5375+ for_each_sg (sgl, sg, nr_sg, i) {
5376+ if (sg_page(sg) == NULL) {
5377 printk(KERN_WARNING "%s: inconsistent length field in "
5378 "scatterlist\n", __FUNCTION__);
5379 return -ENOMEM;
5380@@ -125,7 +126,7 @@ static int __copy_to_sg(struct scatterli
5381 to_capa = sg->length;
5382 copy_size = min_t(unsigned int, to_capa, from_rest);
5383
5384- pfn = page_to_pfn(sg->page);
5385+ pfn = page_to_pfn(sg_page(sg));
5386 to = pfn_to_kaddr(pfn) + (sg->offset);
5387 memcpy(to, from, copy_size);
5388
5389@@ -134,7 +135,6 @@ static int __copy_to_sg(struct scatterli
5390 return 0;
5391 }
5392
5393- sg++;
5394 from += copy_size;
5395 }
5396
5397@@ -143,9 +143,10 @@ static int __copy_to_sg(struct scatterli
5398 return -ENOMEM;
5399 }
5400
5401-static int __copy_from_sg(struct scatterlist *sg, unsigned int nr_sg,
5402+static int __copy_from_sg(struct scatterlist *sgl, unsigned int nr_sg,
5403 void *buf, unsigned int buflen)
5404 {
5405+ struct scatterlist *sg;
5406 void *from;
5407 void *to = buf;
5408 unsigned int from_rest;
5409@@ -154,8 +155,8 @@ static int __copy_from_sg(struct scatter
5410 unsigned int i;
5411 unsigned long pfn;
5412
5413- for (i = 0; i < nr_sg; i++) {
5414- if (sg->page == NULL) {
5415+ for_each_sg (sgl, sg, nr_sg, i) {
5416+ if (sg_page(sg) == NULL) {
5417 printk(KERN_WARNING "%s: inconsistent length field in "
5418 "scatterlist\n", __FUNCTION__);
5419 return -ENOMEM;
5420@@ -170,13 +171,11 @@ static int __copy_from_sg(struct scatter
5421 }
5422 copy_size = from_rest;
5423
5424- pfn = page_to_pfn(sg->page);
5425+ pfn = page_to_pfn(sg_page(sg));
5426 from = pfn_to_kaddr(pfn) + (sg->offset);
5427 memcpy(to, from, copy_size);
5428
5429 to_capa -= copy_size;
5430-
5431- sg++;
5432 to += copy_size;
5433 }
5434
82094b55
AF
5435--- sle11-2009-10-16.orig/drivers/xen/scsiback/scsiback.c 2008-12-15 11:26:44.000000000 +0100
5436+++ sle11-2009-10-16/drivers/xen/scsiback/scsiback.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5437@@ -247,6 +247,8 @@ static int scsiback_gnttab_data_map(vscs
5438 write = (data_dir == DMA_TO_DEVICE);
5439
5440 if (nr_segments) {
5441+ struct scatterlist *sg;
5442+
5443 /* free of (sgl) in fast_flush_area()*/
5444 pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
5445 GFP_KERNEL);
5446@@ -255,6 +257,8 @@ static int scsiback_gnttab_data_map(vscs
5447 return -ENOMEM;
5448 }
5449
5450+ sg_init_table(pending_req->sgl, nr_segments);
5451+
5452 for (i = 0; i < nr_segments; i++) {
5453 flags = GNTMAP_host_map;
5454 if (write)
5455@@ -267,7 +271,7 @@ static int scsiback_gnttab_data_map(vscs
5456 err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
5457 BUG_ON(err);
5458
5459- for (i = 0; i < nr_segments; i++) {
5460+ for_each_sg (pending_req->sgl, sg, nr_segments, i) {
5461 if (unlikely(map[i].status != 0)) {
5462 printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
5463 map[i].handle = SCSIBACK_INVALID_HANDLE;
5464@@ -283,15 +287,15 @@ static int scsiback_gnttab_data_map(vscs
5465 pending_req, i)) >> PAGE_SHIFT,
5466 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
5467
5468- pending_req->sgl[i].page = virt_to_page(vaddr(pending_req, i));
5469- pending_req->sgl[i].offset = ring_req->seg[i].offset;
5470- pending_req->sgl[i].length = ring_req->seg[i].length;
5471- data_len += pending_req->sgl[i].length;
5472+ sg_set_page(sg, virt_to_page(vaddr(pending_req, i)),
5473+ ring_req->seg[i].length,
5474+ ring_req->seg[i].offset);
5475+ data_len += sg->length;
5476
5477 barrier();
5478- if (pending_req->sgl[i].offset >= PAGE_SIZE ||
5479- pending_req->sgl[i].length > PAGE_SIZE ||
5480- pending_req->sgl[i].offset + pending_req->sgl[i].length > PAGE_SIZE)
5481+ if (sg->offset >= PAGE_SIZE ||
5482+ sg->length > PAGE_SIZE ||
5483+ sg->offset + sg->length > PAGE_SIZE)
5484 err |= 1;
5485
5486 }
5487@@ -320,27 +324,14 @@ static int scsiback_merge_bio(struct req
5488
5489 blk_queue_bounce(q, &bio);
5490
5491- if (!rq->bio)
5492- blk_rq_bio_prep(q, rq, bio);
5493- else if (!ll_back_merge_fn(q, rq, bio))
5494- return -EINVAL;
5495- else {
5496- rq->biotail->bi_next = bio;
5497- rq->biotail = bio;
5498- }
5499-
5500- return 0;
5501+ return blk_rq_append_bio(q, rq, bio);
5502 }
5503
5504
5505 /* quoted scsi_lib.c/scsi_bi_endio */
5506-static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
5507+static void scsiback_bi_endio(struct bio *bio, int error)
5508 {
5509- if (bio->bi_size)
5510- return 1;
5511-
5512 bio_put(bio);
5513- return 0;
5514 }
5515
5516
5517@@ -351,16 +342,16 @@ static int request_map_sg(struct request
5518 struct request_queue *q = rq->q;
5519 int nr_pages;
5520 unsigned int nsegs = count;
5521-
5522 unsigned int data_len = 0, len, bytes, off;
5523+ struct scatterlist *sg;
5524 struct page *page;
5525 struct bio *bio = NULL;
5526 int i, err, nr_vecs = 0;
5527
5528- for (i = 0; i < nsegs; i++) {
5529- page = pending_req->sgl[i].page;
5530- off = (unsigned int)pending_req->sgl[i].offset;
5531- len = (unsigned int)pending_req->sgl[i].length;
5532+ for_each_sg (pending_req->sgl, sg, nsegs, i) {
5533+ page = sg_page(sg);
5534+ off = sg->offset;
5535+ len = sg->length;
5536 data_len += len;
5537
5538 nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
5539@@ -388,7 +379,7 @@ static int request_map_sg(struct request
5540 if (bio->bi_vcnt >= nr_vecs) {
5541 err = scsiback_merge_bio(rq, bio);
5542 if (err) {
5543- bio_endio(bio, bio->bi_size, 0);
5544+ bio_endio(bio, 0);
5545 goto free_bios;
5546 }
5547 bio = NULL;
5548@@ -411,7 +402,7 @@ free_bios:
5549 /*
5550 * call endio instead of bio_put incase it was bounced
5551 */
5552- bio_endio(bio, bio->bi_size, 0);
5553+ bio_endio(bio, 0);
5554 }
5555
5556 return err;
82094b55
AF
5557--- sle11-2009-10-16.orig/drivers/xen/scsifront/scsifront.c 2009-02-16 16:17:21.000000000 +0100
5558+++ sle11-2009-10-16/drivers/xen/scsifront/scsifront.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5559@@ -246,11 +246,10 @@ static int map_data_for_request(struct v
5560 {
5561 grant_ref_t gref_head;
5562 struct page *page;
5563- int err, i, ref, ref_cnt = 0;
5564+ int err, ref, ref_cnt = 0;
5565 int write = (sc->sc_data_direction == DMA_TO_DEVICE);
5566- int nr_pages, off, len, bytes;
5567+ unsigned int i, nr_pages, off, len, bytes;
5568 unsigned long buffer_pfn;
5569- unsigned int data_len = 0;
5570
5571 if (sc->sc_data_direction == DMA_NONE)
5572 return 0;
5573@@ -263,25 +262,31 @@ static int map_data_for_request(struct v
5574
5575 if (sc->use_sg) {
5576 /* quoted scsi_lib.c/scsi_req_map_sg . */
5577- struct scatterlist *sg = (struct scatterlist *)sc->request_buffer;
5578- nr_pages = (sc->request_bufflen + sg[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
5579+ struct scatterlist *sg, *sgl = (struct scatterlist *)sc->request_buffer;
5580+ unsigned int data_len = sc->request_bufflen;
5581
5582+ nr_pages = (sc->request_bufflen + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
5583 if (nr_pages > VSCSIIF_SG_TABLESIZE) {
5584 printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n");
5585 ref_cnt = (-E2BIG);
5586 goto big_to_sg;
5587 }
5588
5589- for (i = 0; i < sc->use_sg; i++) {
5590- page = sg[i].page;
5591- off = sg[i].offset;
5592- len = sg[i].length;
5593- data_len += len;
5594+ for_each_sg (sgl, sg, sc->use_sg, i) {
5595+ page = sg_page(sg);
5596+ off = sg->offset;
5597+ len = sg->length;
5598
5599 buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
5600
5601- while (len > 0) {
5602+ while (len > 0 && data_len > 0) {
5603+ /*
5604+ * sg sends a scatterlist that is larger than
5605+ * the data_len it wants transferred for certain
5606+ * IO sizes
5607+ */
5608 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
5609+ bytes = min(bytes, data_len);
5610
5611 ref = gnttab_claim_grant_reference(&gref_head);
5612 BUG_ON(ref == -ENOSPC);
5613@@ -296,6 +301,7 @@ static int map_data_for_request(struct v
5614
5615 buffer_pfn++;
5616 len -= bytes;
5617+ data_len -= bytes;
5618 off = 0;
5619 ref_cnt++;
5620 }
82094b55
AF
5621--- sle11-2009-10-16.orig/drivers/xen/sfc_netback/accel_fwd.c 2008-12-15 11:27:22.000000000 +0100
5622+++ sle11-2009-10-16/drivers/xen/sfc_netback/accel_fwd.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5623@@ -181,10 +181,11 @@ int netback_accel_fwd_add(const __u8 *ma
5624 unsigned long flags;
5625 cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac);
5626 struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
5627+ DECLARE_MAC_BUF(buf);
5628
5629 BUG_ON(fwd_priv == NULL);
5630
5631- DPRINTK("Adding mac " MAC_FMT "\n", MAC_ARG(mac));
5632+ DPRINTK("Adding mac %s\n", print_mac(buf, mac));
5633
5634 spin_lock_irqsave(&fwd_set->fwd_lock, flags);
5635
5636@@ -199,8 +200,8 @@ int netback_accel_fwd_add(const __u8 *ma
5637 if (cuckoo_hash_lookup(&fwd_set->fwd_hash_table,
5638 (cuckoo_hash_key *)(&key), &rc) != 0) {
5639 spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
5640- EPRINTK("MAC address " MAC_FMT " already accelerated.\n",
5641- MAC_ARG(mac));
5642+ EPRINTK("MAC address %s already accelerated.\n",
5643+ print_mac(buf, mac));
5644 return -EEXIST;
5645 }
5646
5647@@ -235,8 +236,9 @@ void netback_accel_fwd_remove(const __u8
5648 unsigned long flags;
5649 cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac);
5650 struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
5651+ DECLARE_MAC_BUF(buf);
5652
5653- DPRINTK("Removing mac " MAC_FMT "\n", MAC_ARG(mac));
5654+ DPRINTK("Removing mac %s\n", print_mac(buf, mac));
5655
5656 BUG_ON(fwd_priv == NULL);
5657
5658@@ -394,14 +396,16 @@ void netback_accel_tx_packet(struct sk_b
5659
5660 if (is_broadcast_ether_addr(skb_mac_header(skb))
5661 && packet_is_arp_reply(skb)) {
5662+ DECLARE_MAC_BUF(buf);
5663+
5664 /*
5665 * update our fast path forwarding to reflect this
5666 * gratuitous ARP
5667 */
5668 mac = skb_mac_header(skb)+ETH_ALEN;
5669
5670- DPRINTK("%s: found gratuitous ARP for " MAC_FMT "\n",
5671- __FUNCTION__, MAC_ARG(mac));
5672+ DPRINTK("%s: found gratuitous ARP for %s\n",
5673+ __FUNCTION__, print_mac(buf, mac));
5674
5675 spin_lock_irqsave(&fwd_set->fwd_lock, flags);
5676 /*
82094b55
AF
5677--- sle11-2009-10-16.orig/drivers/xen/sfc_netback/accel_msg.c 2009-10-28 14:55:06.000000000 +0100
5678+++ sle11-2009-10-16/drivers/xen/sfc_netback/accel_msg.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5679@@ -57,11 +57,11 @@ static void netback_accel_msg_tx_localma
5680 {
5681 unsigned long lock_state;
5682 struct net_accel_msg *msg;
5683+ DECLARE_MAC_BUF(buf);
5684
5685 BUG_ON(bend == NULL || mac == NULL);
5686
5687- VPRINTK("Sending local mac message: " MAC_FMT "\n",
5688- MAC_ARG((const char *)mac));
5689+ VPRINTK("Sending local mac message: %s\n", print_mac(buf, mac));
5690
5691 msg = net_accel_msg_start_send(bend->shared_page, &bend->to_domU,
5692 &lock_state);
82094b55
AF
5693--- sle11-2009-10-16.orig/drivers/xen/sfc_netfront/accel_msg.c 2009-03-04 11:28:34.000000000 +0100
5694+++ sle11-2009-10-16/drivers/xen/sfc_netfront/accel_msg.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5695@@ -41,11 +41,13 @@ static void vnic_start_interrupts(netfro
5696 /* Prime our interrupt */
5697 spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
5698 if (!netfront_accel_vi_enable_interrupts(vnic)) {
5699+ struct netfront_info *np = netdev_priv(vnic->net_dev);
5700+
5701 /* Cripes, that was quick, better pass it up */
5702 netfront_accel_disable_net_interrupts(vnic);
5703 vnic->irq_enabled = 0;
5704 NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_schedule_count++);
5705- netif_rx_schedule(vnic->net_dev);
5706+ netif_rx_schedule(vnic->net_dev, &np->napi);
5707 } else {
5708 /*
5709 * Nothing yet, make sure we get interrupts through
5710@@ -72,6 +74,7 @@ static void vnic_stop_interrupts(netfron
5711 static void vnic_start_fastpath(netfront_accel_vnic *vnic)
5712 {
5713 struct net_device *net_dev = vnic->net_dev;
5714+ struct netfront_info *np = netdev_priv(net_dev);
5715 unsigned long flags;
5716
5717 DPRINTK("%s\n", __FUNCTION__);
5718@@ -80,9 +83,9 @@ static void vnic_start_fastpath(netfront
5719 vnic->tx_enabled = 1;
5720 spin_unlock_irqrestore(&vnic->tx_lock, flags);
5721
5722- netif_poll_disable(net_dev);
5723+ napi_disable(&np->napi);
5724 vnic->poll_enabled = 1;
5725- netif_poll_enable(net_dev);
5726+ napi_enable(&np->napi);
5727
5728 vnic_start_interrupts(vnic);
5729 }
5730@@ -114,11 +117,11 @@ void vnic_stop_fastpath(netfront_accel_v
5731 spin_unlock_irqrestore(&vnic->tx_lock, flags1);
5732
5733 /* Must prevent polls and hold lock to modify poll_enabled */
5734- netif_poll_disable(net_dev);
5735+ napi_disable(&np->napi);
5736 spin_lock_irqsave(&vnic->irq_enabled_lock, flags1);
5737 vnic->poll_enabled = 0;
5738 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags1);
5739- netif_poll_enable(net_dev);
5740+ napi_enable(&np->napi);
5741 }
5742
5743
5744@@ -326,8 +329,10 @@ static int vnic_process_localmac_msg(net
5745 cuckoo_hash_mac_key key;
5746
5747 if (msg->u.localmac.flags & NET_ACCEL_MSG_ADD) {
5748- DPRINTK("MAC has moved, could be local: " MAC_FMT "\n",
5749- MAC_ARG(msg->u.localmac.mac));
5750+ DECLARE_MAC_BUF(buf);
5751+
5752+ DPRINTK("MAC has moved, could be local: %s\n",
5753+ print_mac(buf, msg->u.localmac.mac));
5754 key = cuckoo_mac_to_key(msg->u.localmac.mac);
5755 spin_lock_irqsave(&vnic->table_lock, flags);
5756 /* Try to remove it, not a big deal if not there */
5757@@ -515,6 +520,8 @@ irqreturn_t netfront_accel_net_channel_i
5758
5759 spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
5760 if (vnic->irq_enabled) {
5761+ struct netfront_info *np = netdev_priv(net_dev);
5762+
5763 netfront_accel_disable_net_interrupts(vnic);
5764 vnic->irq_enabled = 0;
5765 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
5766@@ -527,7 +534,7 @@ irqreturn_t netfront_accel_net_channel_i
5767 vnic->stats.event_count_since_irq;
5768 vnic->stats.event_count_since_irq = 0;
5769 #endif
5770- netif_rx_schedule(net_dev);
5771+ netif_rx_schedule(net_dev, &np->napi);
5772 }
5773 else {
5774 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
82094b55
AF
5775--- sle11-2009-10-16.orig/drivers/xen/sfc_netfront/accel_vi.c 2009-03-30 16:36:26.000000000 +0200
5776+++ sle11-2009-10-16/drivers/xen/sfc_netfront/accel_vi.c 2009-03-30 16:39:38.000000000 +0200
2cb7cef9
BS
5777@@ -641,8 +641,10 @@ netfront_accel_vi_tx_post(netfront_accel
5778 (cuckoo_hash_key *)(&key), &value);
5779
5780 if (!try_fastpath) {
5781- VPRINTK("try fast path false for mac: " MAC_FMT "\n",
5782- MAC_ARG(skb->data));
5783+ DECLARE_MAC_BUF(buf);
5784+
5785+ VPRINTK("try fast path false for mac: %s\n",
5786+ print_mac(buf, skb->data));
5787
5788 return NETFRONT_ACCEL_STATUS_CANT;
5789 }
5790@@ -768,9 +770,10 @@ static void netfront_accel_vi_rx_comple
5791 if (compare_ether_addr(skb->data, vnic->mac)) {
5792 struct iphdr *ip = (struct iphdr *)(skb->data + ETH_HLEN);
5793 u16 port;
5794+ DECLARE_MAC_BUF(buf);
5795
5796- DPRINTK("%s: saw wrong MAC address " MAC_FMT "\n",
5797- __FUNCTION__, MAC_ARG(skb->data));
5798+ DPRINTK("%s: saw wrong MAC address %s\n",
5799+ __FUNCTION__, print_mac(buf, skb->data));
5800
5801 if (ip->protocol == IPPROTO_TCP) {
5802 struct tcphdr *tcp = (struct tcphdr *)
82094b55
AF
5803--- sle11-2009-10-16.orig/drivers/xen/sfc_netutil/accel_util.h 2009-10-28 14:55:06.000000000 +0100
5804+++ sle11-2009-10-16/drivers/xen/sfc_netutil/accel_util.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5805@@ -63,9 +63,6 @@
5806 DPRINTK("%s at %s:%d\n", #exp, __FILE__, __LINE__); \
5807 } while(0)
5808
5809-#define MAC_FMT "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"
5810-#define MAC_ARG(_mac) (_mac)[0], (_mac)[1], (_mac)[2], (_mac)[3], (_mac)[4], (_mac)[5]
5811-
5812 #include <xen/xenbus.h>
5813
5814 /*! Map a set of pages from another domain
82094b55
AF
5815--- sle11-2009-10-16.orig/drivers/xen/xenbus/xenbus_probe.c 2009-02-16 16:17:21.000000000 +0100
5816+++ sle11-2009-10-16/drivers/xen/xenbus/xenbus_probe.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5817@@ -174,11 +174,9 @@ static int read_backend_details(struct x
5818 }
5819
5820 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16) && (defined(CONFIG_XEN) || defined(MODULE))
5821-static int xenbus_uevent_frontend(struct device *dev, char **envp,
5822- int num_envp, char *buffer, int buffer_size)
5823+static int xenbus_uevent_frontend(struct device *dev, struct kobj_uevent_env *env)
5824 {
5825 struct xenbus_device *xdev;
5826- int length = 0, i = 0;
5827
5828 if (dev == NULL)
5829 return -ENODEV;
5830@@ -187,12 +185,9 @@ static int xenbus_uevent_frontend(struct
5831 return -ENODEV;
5832
5833 /* stuff we want to pass to /sbin/hotplug */
5834- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
5835- "XENBUS_TYPE=%s", xdev->devicetype);
5836- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
5837- "XENBUS_PATH=%s", xdev->nodename);
5838- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
5839- "MODALIAS=xen:%s", xdev->devicetype);
5840+ add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype);
5841+ add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename);
5842+ add_uevent_var(env, "MODALIAS=xen:%s", xdev->devicetype);
5843
5844 return 0;
5845 }
82094b55
AF
5846--- sle11-2009-10-16.orig/drivers/xen/xenbus/xenbus_probe_backend.c 2009-02-16 16:17:21.000000000 +0100
5847+++ sle11-2009-10-16/drivers/xen/xenbus/xenbus_probe_backend.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5848@@ -60,8 +60,7 @@
5849 #include <xen/platform-compat.h>
5850 #endif
5851
5852-static int xenbus_uevent_backend(struct device *dev, char **envp,
5853- int num_envp, char *buffer, int buffer_size);
5854+static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env);
5855 static int xenbus_probe_backend(const char *type, const char *domid);
5856
5857 extern int read_otherend_details(struct xenbus_device *xendev,
5858@@ -128,13 +127,10 @@ static struct xen_bus_type xenbus_backen
5859 },
5860 };
5861
5862-static int xenbus_uevent_backend(struct device *dev, char **envp,
5863- int num_envp, char *buffer, int buffer_size)
5864+static int xenbus_uevent_backend(struct device *dev, struct kobj_uevent_env *env)
5865 {
5866 struct xenbus_device *xdev;
5867 struct xenbus_driver *drv;
5868- int i = 0;
5869- int length = 0;
5870
5871 DPRINTK("");
5872
5873@@ -146,27 +142,16 @@ static int xenbus_uevent_backend(struct
5874 return -ENODEV;
5875
5876 /* stuff we want to pass to /sbin/hotplug */
5877- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
5878- "XENBUS_TYPE=%s", xdev->devicetype);
5879+ add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype);
5880
5881- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
5882- "XENBUS_PATH=%s", xdev->nodename);
5883+ add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename);
5884
5885- add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
5886- "XENBUS_BASE_PATH=%s", xenbus_backend.root);
5887-
5888- /* terminate, set to next free slot, shrink available space */
5889- envp[i] = NULL;
5890- envp = &envp[i];
5891- num_envp -= i;
5892- buffer = &buffer[length];
5893- buffer_size -= length;
5894+ add_uevent_var(env, "XENBUS_BASE_PATH=%s", xenbus_backend.root);
5895
5896 if (dev->driver) {
5897 drv = to_xenbus_driver(dev->driver);
5898 if (drv && drv->uevent)
5899- return drv->uevent(xdev, envp, num_envp, buffer,
5900- buffer_size);
5901+ return drv->uevent(xdev, env);
5902 }
5903
5904 return 0;
82094b55
AF
5905--- sle11-2009-10-16.orig/drivers/xen/xenoprof/xenoprofile.c 2009-03-04 11:25:55.000000000 +0100
5906+++ sle11-2009-10-16/drivers/xen/xenoprof/xenoprofile.c 2009-03-11 15:39:38.000000000 +0100
2cb7cef9
BS
5907@@ -29,7 +29,6 @@
5908 #include <xen/driver_util.h>
5909 #include <xen/interface/xen.h>
5910 #include <xen/interface/xenoprof.h>
5911-#include "../../../drivers/oprofile/cpu_buffer.h"
5912 #include "../../../drivers/oprofile/event_buffer.h"
5913
5914 #define MAX_XENOPROF_SAMPLES 16
5915@@ -142,8 +141,7 @@ static void xenoprof_add_pc(xenoprof_buf
5916 if (xenoprof_is_escape(buf, tail) &&
5917 xenoprof_get_event(buf, tail) == XENOPROF_TRACE_BEGIN) {
5918 tracing=1;
5919- oprofile_add_pc(ESCAPE_CODE, buf->event_log[tail].mode,
5920- CPU_TRACE_BEGIN);
5921+ oprofile_add_mode(buf->event_log[tail].mode);
5922 if (!is_passive)
5923 oprofile_samples++;
5924 else
82094b55
AF
5925--- sle11-2009-10-16.orig/fs/xfs/linux-2.6/xfs_buf.c 2009-10-28 14:55:06.000000000 +0100
5926+++ sle11-2009-10-16/fs/xfs/linux-2.6/xfs_buf.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5927@@ -187,7 +187,7 @@ free_address(
5928 {
5929 a_list_t *aentry;
5930
5931-#ifdef CONFIG_XEN
5932+#if defined(CONFIG_XEN) || defined(CONFIG_PARAVIRT_XEN)
5933 /*
5934 * Xen needs to be able to make sure it can get an exclusive
5935 * RO mapping of pages it wants to turn into a pagetable. If
82094b55
AF
5936--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/agp.h 2009-10-28 14:55:06.000000000 +0100
5937+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/agp.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5938@@ -1,20 +1,22 @@
5939-#ifndef AGP_H
5940-#define AGP_H 1
5941+#ifndef _ASM_X86_AGP_H
5942+#define _ASM_X86_AGP_H
5943
5944 #include <asm/pgtable.h>
5945 #include <asm/cacheflush.h>
5946 #include <asm/system.h>
5947
5948-/*
5949- * Functions to keep the agpgart mappings coherent with the MMU.
5950- * The GART gives the CPU a physical alias of pages in memory. The alias region is
5951- * mapped uncacheable. Make sure there are no conflicting mappings
5952- * with different cachability attributes for the same page. This avoids
5953- * data corruption on some CPUs.
5954+/*
5955+ * Functions to keep the agpgart mappings coherent with the MMU. The
5956+ * GART gives the CPU a physical alias of pages in memory. The alias
5957+ * region is mapped uncacheable. Make sure there are no conflicting
5958+ * mappings with different cachability attributes for the same
5959+ * page. This avoids data corruption on some CPUs.
5960 */
5961
5962-/* Caller's responsibility to call global_flush_tlb() for
5963- * performance reasons */
5964+/*
5965+ * Caller's responsibility to call global_flush_tlb() for performance
5966+ * reasons
5967+ */
5968 #define map_page_into_agp(page) ( \
5969 xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
5970 ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
5971@@ -24,9 +26,11 @@
5972 change_page_attr(page, 1, PAGE_KERNEL))
5973 #define flush_agp_mappings() global_flush_tlb()
5974
5975-/* Could use CLFLUSH here if the cpu supports it. But then it would
5976- need to be called for each cacheline of the whole page so it may not be
5977- worth it. Would need a page for it. */
5978+/*
5979+ * Could use CLFLUSH here if the cpu supports it. But then it would
5980+ * need to be called for each cacheline of the whole page so it may
5981+ * not be worth it. Would need a page for it.
5982+ */
5983 #define flush_agp_cache() wbinvd()
5984
5985 /* Convert a physical address to an address suitable for the GART. */
5986--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 5987+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/desc.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5988@@ -0,0 +1,5 @@
5989+#ifdef CONFIG_X86_32
5990+# include "desc_32.h"
5991+#else
5992+# include "desc_64.h"
5993+#endif
82094b55
AF
5994--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/desc_64.h 2008-12-15 11:27:22.000000000 +0100
5995+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/desc_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
5996@@ -34,6 +34,18 @@ static inline void clear_LDT(void)
5997 put_cpu();
5998 }
5999
6000+#ifndef CONFIG_X86_NO_TSS
6001+static inline unsigned long __store_tr(void)
6002+{
6003+ unsigned long tr;
6004+
6005+ asm volatile ("str %w0":"=r" (tr));
6006+ return tr;
6007+}
6008+
6009+#define store_tr(tr) (tr) = __store_tr()
6010+#endif
6011+
6012 /*
6013 * This is the ldt that every process will get unless we need
6014 * something other than this.
6015@@ -47,6 +59,18 @@ extern struct desc_ptr cpu_gdt_descr[];
6016 /* the cpu gdt accessor */
6017 #define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
6018
6019+#ifndef CONFIG_XEN
6020+static inline void load_gdt(const struct desc_ptr *ptr)
6021+{
6022+ asm volatile("lgdt %w0"::"m" (*ptr));
6023+}
6024+
6025+static inline void store_gdt(struct desc_ptr *ptr)
6026+{
6027+ asm("sgdt %w0":"=m" (*ptr));
6028+}
6029+#endif
6030+
6031 static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
6032 {
6033 struct gate_struct s;
6034@@ -87,6 +111,16 @@ static inline void set_system_gate_ist(i
6035 {
6036 _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
6037 }
6038+
6039+static inline void load_idt(const struct desc_ptr *ptr)
6040+{
6041+ asm volatile("lidt %w0"::"m" (*ptr));
6042+}
6043+
6044+static inline void store_idt(struct desc_ptr *dtr)
6045+{
6046+ asm("sidt %w0":"=m" (*dtr));
6047+}
6048 #endif
6049
6050 static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
6051--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 6052+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/dma-mapping.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
6053@@ -0,0 +1,5 @@
6054+#ifdef CONFIG_X86_32
6055+# include "dma-mapping_32.h"
6056+#else
6057+# include "dma-mapping_64.h"
6058+#endif
82094b55
AF
6059--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/dma-mapping_32.h 2008-12-15 11:26:44.000000000 +0100
6060+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/dma-mapping_32.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
6061@@ -7,9 +7,9 @@
6062 */
6063
6064 #include <linux/mm.h>
6065+#include <linux/scatterlist.h>
6066 #include <asm/cache.h>
6067 #include <asm/io.h>
6068-#include <asm/scatterlist.h>
6069 #include <asm/swiotlb.h>
6070
6071 static inline int
82094b55
AF
6072--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/dma-mapping_64.h 2008-12-15 11:27:22.000000000 +0100
6073+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/dma-mapping_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
6074@@ -6,8 +6,7 @@
6075 * documentation.
6076 */
6077
6078-
6079-#include <asm/scatterlist.h>
6080+#include <linux/scatterlist.h>
6081 #include <asm/io.h>
6082
6083 struct dma_mapping_ops {
6084@@ -203,4 +202,4 @@ extern int panic_on_overflow;
6085
6086 #endif /* _X8664_DMA_MAPPING_H */
6087
6088-#include <asm-i386/mach-xen/asm/dma-mapping.h>
6089+#include "dma-mapping_32.h"
6090--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 6091+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/fixmap.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
6092@@ -0,0 +1,5 @@
6093+#ifdef CONFIG_X86_32
6094+# include "fixmap_32.h"
6095+#else
6096+# include "fixmap_64.h"
6097+#endif
6098--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 6099+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/hypercall.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
6100@@ -0,0 +1,404 @@
6101+/******************************************************************************
6102+ * hypercall.h
6103+ *
6104+ * Linux-specific hypervisor handling.
6105+ *
6106+ * Copyright (c) 2002-2004, K A Fraser
6107+ *
6108+ * 64-bit updates:
6109+ * Benjamin Liu <benjamin.liu@intel.com>
6110+ * Jun Nakajima <jun.nakajima@intel.com>
6111+ *
6112+ * This program is free software; you can redistribute it and/or
6113+ * modify it under the terms of the GNU General Public License version 2
6114+ * as published by the Free Software Foundation; or, when distributed
6115+ * separately from the Linux kernel or incorporated into other
6116+ * software packages, subject to the following license:
6117+ *
6118+ * Permission is hereby granted, free of charge, to any person obtaining a copy
6119+ * of this source file (the "Software"), to deal in the Software without
6120+ * restriction, including without limitation the rights to use, copy, modify,
6121+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
6122+ * and to permit persons to whom the Software is furnished to do so, subject to
6123+ * the following conditions:
6124+ *
6125+ * The above copyright notice and this permission notice shall be included in
6126+ * all copies or substantial portions of the Software.
6127+ *
6128+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
6129+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
6130+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
6131+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
6132+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
6133+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
6134+ * IN THE SOFTWARE.
6135+ */
6136+
6137+#ifndef __HYPERCALL_H__
6138+#define __HYPERCALL_H__
6139+
6140+#ifndef __HYPERVISOR_H__
6141+# error "please don't include this file directly"
6142+#endif
6143+
6144+#if CONFIG_XEN_COMPAT <= 0x030002
6145+# include <linux/string.h> /* memcpy() */
6146+#endif
6147+
6148+#ifdef CONFIG_XEN
6149+#define HYPERCALL_ASM_OPERAND "%c"
6150+#define HYPERCALL_LOCATION(op) (hypercall_page + (op) * 32)
6151+#define HYPERCALL_C_OPERAND(name) "i" (HYPERCALL_LOCATION(__HYPERVISOR_##name))
6152+#else
6153+#define HYPERCALL_ASM_OPERAND "*%"
6154+#define HYPERCALL_LOCATION(op) (hypercall_stubs + (op) * 32)
6155+#define HYPERCALL_C_OPERAND(name) "g" (HYPERCALL_LOCATION(__HYPERVISOR_##name))
6156+#endif
6157+
6158+#define HYPERCALL_ARG(arg, n) \
6159+ register typeof((arg)+0) __arg##n asm(HYPERCALL_arg##n) = (arg)
6160+
6161+#define _hypercall0(type, name) \
6162+({ \
6163+ type __res; \
6164+ asm volatile ( \
6165+ "call " HYPERCALL_ASM_OPERAND "1" \
6166+ : "=a" (__res) \
6167+ : HYPERCALL_C_OPERAND(name) \
6168+ : "memory" ); \
6169+ __res; \
6170+})
6171+
6172+#define _hypercall1(type, name, arg) \
6173+({ \
6174+ type __res; \
6175+ HYPERCALL_ARG(arg, 1); \
6176+ asm volatile ( \
6177+ "call " HYPERCALL_ASM_OPERAND "2" \
6178+ : "=a" (__res), "+r" (__arg1) \
6179+ : HYPERCALL_C_OPERAND(name) \
6180+ : "memory" ); \
6181+ __res; \
6182+})
6183+
6184+#define _hypercall2(type, name, a1, a2) \
6185+({ \
6186+ type __res; \
6187+ HYPERCALL_ARG(a1, 1); \
6188+ HYPERCALL_ARG(a2, 2); \
6189+ asm volatile ( \
6190+ "call " HYPERCALL_ASM_OPERAND "3" \
6191+ : "=a" (__res), "+r" (__arg1), "+r" (__arg2) \
6192+ : HYPERCALL_C_OPERAND(name) \
6193+ : "memory" ); \
6194+ __res; \
6195+})
6196+
6197+#define _hypercall3(type, name, a1, a2, a3) \
6198+({ \
6199+ type __res; \
6200+ HYPERCALL_ARG(a1, 1); \
6201+ HYPERCALL_ARG(a2, 2); \
6202+ HYPERCALL_ARG(a3, 3); \
6203+ asm volatile ( \
6204+ "call " HYPERCALL_ASM_OPERAND "4" \
6205+ : "=a" (__res), "+r" (__arg1), \
6206+ "+r" (__arg2), "+r" (__arg3) \
6207+ : HYPERCALL_C_OPERAND(name) \
6208+ : "memory" ); \
6209+ __res; \
6210+})
6211+
6212+#define _hypercall4(type, name, a1, a2, a3, a4) \
6213+({ \
6214+ type __res; \
6215+ HYPERCALL_ARG(a1, 1); \
6216+ HYPERCALL_ARG(a2, 2); \
6217+ HYPERCALL_ARG(a3, 3); \
6218+ HYPERCALL_ARG(a4, 4); \
6219+ asm volatile ( \
6220+ "call " HYPERCALL_ASM_OPERAND "5" \
6221+ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \
6222+ "+r" (__arg3), "+r" (__arg4) \
6223+ : HYPERCALL_C_OPERAND(name) \
6224+ : "memory" ); \
6225+ __res; \
6226+})
6227+
6228+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
6229+({ \
6230+ type __res; \
6231+ HYPERCALL_ARG(a1, 1); \
6232+ HYPERCALL_ARG(a2, 2); \
6233+ HYPERCALL_ARG(a3, 3); \
6234+ HYPERCALL_ARG(a4, 4); \
6235+ HYPERCALL_ARG(a5, 5); \
6236+ asm volatile ( \
6237+ "call " HYPERCALL_ASM_OPERAND "6" \
6238+ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \
6239+ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \
6240+ : HYPERCALL_C_OPERAND(name) \
6241+ : "memory" ); \
6242+ __res; \
6243+})
6244+
6245+#define _hypercall(type, op, a1, a2, a3, a4, a5) \
6246+({ \
6247+ type __res; \
6248+ HYPERCALL_ARG(a1, 1); \
6249+ HYPERCALL_ARG(a2, 2); \
6250+ HYPERCALL_ARG(a3, 3); \
6251+ HYPERCALL_ARG(a4, 4); \
6252+ HYPERCALL_ARG(a5, 5); \
6253+ asm volatile ( \
6254+ "call *%6" \
6255+ : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \
6256+ "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \
6257+ : "g" (HYPERCALL_LOCATION(op)) \
6258+ : "memory" ); \
6259+ __res; \
6260+})
6261+
6262+#ifdef CONFIG_X86_32
6263+# include "hypercall_32.h"
6264+#else
6265+# include "hypercall_64.h"
6266+#endif
6267+
6268+static inline int __must_check
6269+HYPERVISOR_set_trap_table(
6270+ const trap_info_t *table)
6271+{
6272+ return _hypercall1(int, set_trap_table, table);
6273+}
6274+
6275+static inline int __must_check
6276+HYPERVISOR_mmu_update(
6277+ mmu_update_t *req, unsigned int count, unsigned int *success_count,
6278+ domid_t domid)
6279+{
6280+ if (arch_use_lazy_mmu_mode())
6281+ return xen_multi_mmu_update(req, count, success_count, domid);
6282+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
6283+}
6284+
6285+static inline int __must_check
6286+HYPERVISOR_mmuext_op(
6287+ struct mmuext_op *op, unsigned int count, unsigned int *success_count,
6288+ domid_t domid)
6289+{
6290+ if (arch_use_lazy_mmu_mode())
6291+ return xen_multi_mmuext_op(op, count, success_count, domid);
6292+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
6293+}
6294+
6295+static inline int __must_check
6296+HYPERVISOR_set_gdt(
6297+ unsigned long *frame_list, unsigned int entries)
6298+{
6299+ return _hypercall2(int, set_gdt, frame_list, entries);
6300+}
6301+
6302+static inline int __must_check
6303+HYPERVISOR_stack_switch(
6304+ unsigned long ss, unsigned long esp)
6305+{
6306+ return _hypercall2(int, stack_switch, ss, esp);
6307+}
6308+
6309+static inline int
6310+HYPERVISOR_fpu_taskswitch(
6311+ int set)
6312+{
6313+ return _hypercall1(int, fpu_taskswitch, set);
6314+}
6315+
6316+#if CONFIG_XEN_COMPAT <= 0x030002
6317+static inline int __must_check
6318+HYPERVISOR_sched_op_compat(
6319+ int cmd, unsigned long arg)
6320+{
6321+ return _hypercall2(int, sched_op_compat, cmd, arg);
6322+}
6323+#endif
6324+
6325+static inline int __must_check
6326+HYPERVISOR_sched_op(
6327+ int cmd, void *arg)
6328+{
6329+ return _hypercall2(int, sched_op, cmd, arg);
6330+}
6331+
6332+static inline int __must_check
6333+HYPERVISOR_platform_op(
6334+ struct xen_platform_op *platform_op)
6335+{
6336+ platform_op->interface_version = XENPF_INTERFACE_VERSION;
6337+ return _hypercall1(int, platform_op, platform_op);
6338+}
6339+
6340+static inline int __must_check
6341+HYPERVISOR_set_debugreg(
6342+ unsigned int reg, unsigned long value)
6343+{
6344+ return _hypercall2(int, set_debugreg, reg, value);
6345+}
6346+
6347+static inline unsigned long __must_check
6348+HYPERVISOR_get_debugreg(
6349+ unsigned int reg)
6350+{
6351+ return _hypercall1(unsigned long, get_debugreg, reg);
6352+}
6353+
6354+static inline int __must_check
6355+HYPERVISOR_memory_op(
6356+ unsigned int cmd, void *arg)
6357+{
6358+ if (arch_use_lazy_mmu_mode())
6359+ xen_multicall_flush(false);
6360+ return _hypercall2(int, memory_op, cmd, arg);
6361+}
6362+
6363+static inline int __must_check
6364+HYPERVISOR_multicall(
6365+ multicall_entry_t *call_list, unsigned int nr_calls)
6366+{
6367+ return _hypercall2(int, multicall, call_list, nr_calls);
6368+}
6369+
6370+static inline int __must_check
6371+HYPERVISOR_event_channel_op(
6372+ int cmd, void *arg)
6373+{
6374+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
6375+
6376+#if CONFIG_XEN_COMPAT <= 0x030002
6377+ if (unlikely(rc == -ENOSYS)) {
6378+ struct evtchn_op op;
6379+ op.cmd = cmd;
6380+ memcpy(&op.u, arg, sizeof(op.u));
6381+ rc = _hypercall1(int, event_channel_op_compat, &op);
6382+ memcpy(arg, &op.u, sizeof(op.u));
6383+ }
6384+#endif
6385+
6386+ return rc;
6387+}
6388+
6389+static inline int __must_check
6390+HYPERVISOR_xen_version(
6391+ int cmd, void *arg)
6392+{
6393+ return _hypercall2(int, xen_version, cmd, arg);
6394+}
6395+
6396+static inline int __must_check
6397+HYPERVISOR_console_io(
6398+ int cmd, unsigned int count, char *str)
6399+{
6400+ return _hypercall3(int, console_io, cmd, count, str);
6401+}
6402+
6403+static inline int __must_check
6404+HYPERVISOR_physdev_op(
6405+ int cmd, void *arg)
6406+{
6407+ int rc = _hypercall2(int, physdev_op, cmd, arg);
6408+
6409+#if CONFIG_XEN_COMPAT <= 0x030002
6410+ if (unlikely(rc == -ENOSYS)) {
6411+ struct physdev_op op;
6412+ op.cmd = cmd;
6413+ memcpy(&op.u, arg, sizeof(op.u));
6414+ rc = _hypercall1(int, physdev_op_compat, &op);
6415+ memcpy(arg, &op.u, sizeof(op.u));
6416+ }
6417+#endif
6418+
6419+ return rc;
6420+}
6421+
6422+static inline int __must_check
6423+HYPERVISOR_grant_table_op(
6424+ unsigned int cmd, void *uop, unsigned int count)
6425+{
6426+ if (arch_use_lazy_mmu_mode())
6427+ xen_multicall_flush(false);
6428+ return _hypercall3(int, grant_table_op, cmd, uop, count);
6429+}
6430+
6431+static inline int __must_check
6432+HYPERVISOR_vm_assist(
6433+ unsigned int cmd, unsigned int type)
6434+{
6435+ return _hypercall2(int, vm_assist, cmd, type);
6436+}
6437+
6438+static inline int __must_check
6439+HYPERVISOR_vcpu_op(
6440+ int cmd, unsigned int vcpuid, void *extra_args)
6441+{
6442+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
6443+}
6444+
6445+static inline int __must_check
6446+HYPERVISOR_suspend(
6447+ unsigned long srec)
6448+{
6449+ struct sched_shutdown sched_shutdown = {
6450+ .reason = SHUTDOWN_suspend
6451+ };
6452+
6453+ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
6454+ &sched_shutdown, srec);
6455+
6456+#if CONFIG_XEN_COMPAT <= 0x030002
6457+ if (rc == -ENOSYS)
6458+ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
6459+ SHUTDOWN_suspend, srec);
6460+#endif
6461+
6462+ return rc;
6463+}
6464+
6465+#if CONFIG_XEN_COMPAT <= 0x030002
6466+static inline int
6467+HYPERVISOR_nmi_op(
6468+ unsigned long op, void *arg)
6469+{
6470+ return _hypercall2(int, nmi_op, op, arg);
6471+}
6472+#endif
6473+
6474+#ifndef CONFIG_XEN
6475+static inline unsigned long __must_check
6476+HYPERVISOR_hvm_op(
6477+ int op, void *arg)
6478+{
6479+ return _hypercall2(unsigned long, hvm_op, op, arg);
6480+}
6481+#endif
6482+
6483+static inline int __must_check
6484+HYPERVISOR_callback_op(
6485+ int cmd, const void *arg)
6486+{
6487+ return _hypercall2(int, callback_op, cmd, arg);
6488+}
6489+
6490+static inline int __must_check
6491+HYPERVISOR_xenoprof_op(
6492+ int op, void *arg)
6493+{
6494+ return _hypercall2(int, xenoprof_op, op, arg);
6495+}
6496+
6497+static inline int __must_check
6498+HYPERVISOR_kexec_op(
6499+ unsigned long op, void *args)
6500+{
6501+ return _hypercall2(int, kexec_op, op, args);
6502+}
6503+
6504+#endif /* __HYPERCALL_H__ */
82094b55
AF
6505--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/hypercall_32.h 2009-03-04 11:28:34.000000000 +0100
6506+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/hypercall_32.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
6507@@ -1,191 +1,10 @@
6508-/******************************************************************************
6509- * hypercall.h
6510- *
6511- * Linux-specific hypervisor handling.
6512- *
6513- * Copyright (c) 2002-2004, K A Fraser
6514- *
6515- * This program is free software; you can redistribute it and/or
6516- * modify it under the terms of the GNU General Public License version 2
6517- * as published by the Free Software Foundation; or, when distributed
6518- * separately from the Linux kernel or incorporated into other
6519- * software packages, subject to the following license:
6520- *
6521- * Permission is hereby granted, free of charge, to any person obtaining a copy
6522- * of this source file (the "Software"), to deal in the Software without
6523- * restriction, including without limitation the rights to use, copy, modify,
6524- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
6525- * and to permit persons to whom the Software is furnished to do so, subject to
6526- * the following conditions:
6527- *
6528- * The above copyright notice and this permission notice shall be included in
6529- * all copies or substantial portions of the Software.
6530- *
6531- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
6532- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
6533- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
6534- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
6535- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
6536- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
6537- * IN THE SOFTWARE.
6538- */
6539-
6540-#ifndef __HYPERCALL_H__
6541-#define __HYPERCALL_H__
6542-
6543-#include <linux/string.h> /* memcpy() */
6544-#include <linux/stringify.h>
6545-
6546-#ifndef __HYPERVISOR_H__
6547-# error "please don't include this file directly"
6548-#endif
6549-
6550-#ifdef CONFIG_XEN
6551-#define HYPERCALL_STR(name) \
6552- "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)"
6553-#else
6554-#define HYPERCALL_STR(name) \
6555- "mov hypercall_stubs,%%eax; " \
6556- "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\
6557- "call *%%eax"
6558-#endif
6559-
6560-#define _hypercall0(type, name) \
6561-({ \
6562- type __res; \
6563- asm volatile ( \
6564- HYPERCALL_STR(name) \
6565- : "=a" (__res) \
6566- : \
6567- : "memory" ); \
6568- __res; \
6569-})
6570-
6571-#define _hypercall1(type, name, a1) \
6572-({ \
6573- type __res; \
6574- long __ign1; \
6575- asm volatile ( \
6576- HYPERCALL_STR(name) \
6577- : "=a" (__res), "=b" (__ign1) \
6578- : "1" ((long)(a1)) \
6579- : "memory" ); \
6580- __res; \
6581-})
6582-
6583-#define _hypercall2(type, name, a1, a2) \
6584-({ \
6585- type __res; \
6586- long __ign1, __ign2; \
6587- asm volatile ( \
6588- HYPERCALL_STR(name) \
6589- : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
6590- : "1" ((long)(a1)), "2" ((long)(a2)) \
6591- : "memory" ); \
6592- __res; \
6593-})
6594-
6595-#define _hypercall3(type, name, a1, a2, a3) \
6596-({ \
6597- type __res; \
6598- long __ign1, __ign2, __ign3; \
6599- asm volatile ( \
6600- HYPERCALL_STR(name) \
6601- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
6602- "=d" (__ign3) \
6603- : "1" ((long)(a1)), "2" ((long)(a2)), \
6604- "3" ((long)(a3)) \
6605- : "memory" ); \
6606- __res; \
6607-})
6608-
6609-#define _hypercall4(type, name, a1, a2, a3, a4) \
6610-({ \
6611- type __res; \
6612- long __ign1, __ign2, __ign3, __ign4; \
6613- asm volatile ( \
6614- HYPERCALL_STR(name) \
6615- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
6616- "=d" (__ign3), "=S" (__ign4) \
6617- : "1" ((long)(a1)), "2" ((long)(a2)), \
6618- "3" ((long)(a3)), "4" ((long)(a4)) \
6619- : "memory" ); \
6620- __res; \
6621-})
6622-
6623-#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
6624-({ \
6625- type __res; \
6626- long __ign1, __ign2, __ign3, __ign4, __ign5; \
6627- asm volatile ( \
6628- HYPERCALL_STR(name) \
6629- : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
6630- "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
6631- : "1" ((long)(a1)), "2" ((long)(a2)), \
6632- "3" ((long)(a3)), "4" ((long)(a4)), \
6633- "5" ((long)(a5)) \
6634- : "memory" ); \
6635- __res; \
6636-})
6637-
6638-#define _hypercall(type, op, a1, a2, a3, a4, a5) \
6639-({ \
6640- type __res; \
6641- register typeof((a1)+0) __arg1 asm("ebx") = (a1); \
6642- register typeof((a2)+0) __arg2 asm("ecx") = (a2); \
6643- register typeof((a3)+0) __arg3 asm("edx") = (a3); \
6644- register typeof((a4)+0) __arg4 asm("esi") = (a4); \
6645- register typeof((a5)+0) __arg5 asm("edi") = (a5); \
6646- asm volatile ( \
6647- "call *%6" \
6648- : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \
6649- "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \
6650- : "0" (hypercall_page + (op) * 32) \
6651- : "memory" ); \
6652- __res; \
6653-})
6654-
6655-static inline int __must_check
6656-HYPERVISOR_set_trap_table(
6657- const trap_info_t *table)
6658-{
6659- return _hypercall1(int, set_trap_table, table);
6660-}
6661-
6662-static inline int __must_check
6663-HYPERVISOR_mmu_update(
6664- mmu_update_t *req, unsigned int count, unsigned int *success_count,
6665- domid_t domid)
6666-{
6667- if (arch_use_lazy_mmu_mode())
6668- return xen_multi_mmu_update(req, count, success_count, domid);
6669- return _hypercall4(int, mmu_update, req, count, success_count, domid);
6670-}
6671-
6672-static inline int __must_check
6673-HYPERVISOR_mmuext_op(
6674- struct mmuext_op *op, unsigned int count, unsigned int *success_count,
6675- domid_t domid)
6676-{
6677- if (arch_use_lazy_mmu_mode())
6678- return xen_multi_mmuext_op(op, count, success_count, domid);
6679- return _hypercall4(int, mmuext_op, op, count, success_count, domid);
6680-}
6681-
6682-static inline int __must_check
6683-HYPERVISOR_set_gdt(
6684- unsigned long *frame_list, unsigned int entries)
6685-{
6686- return _hypercall2(int, set_gdt, frame_list, entries);
6687-}
6688-
6689-static inline int __must_check
6690-HYPERVISOR_stack_switch(
6691- unsigned long ss, unsigned long esp)
6692-{
6693- return _hypercall2(int, stack_switch, ss, esp);
6694-}
6695+#define HYPERCALL_arg1 "ebx"
6696+#define HYPERCALL_arg2 "ecx"
6697+#define HYPERCALL_arg3 "edx"
6698+#define HYPERCALL_arg4 "esi"
6699+#define HYPERCALL_arg5 "edi"
6700
6701+#if CONFIG_XEN_COMPAT <= 0x030002
6702 static inline int __must_check
6703 HYPERVISOR_set_callbacks(
6704 unsigned long event_selector, unsigned long event_address,
6705@@ -195,80 +14,24 @@ HYPERVISOR_set_callbacks(
6706 event_selector, event_address,
6707 failsafe_selector, failsafe_address);
6708 }
6709-
6710-static inline int
6711-HYPERVISOR_fpu_taskswitch(
6712- int set)
6713-{
6714- return _hypercall1(int, fpu_taskswitch, set);
6715-}
6716-
6717-static inline int __must_check
6718-HYPERVISOR_sched_op_compat(
6719- int cmd, unsigned long arg)
6720-{
6721- return _hypercall2(int, sched_op_compat, cmd, arg);
6722-}
6723-
6724-static inline int __must_check
6725-HYPERVISOR_sched_op(
6726- int cmd, void *arg)
6727-{
6728- return _hypercall2(int, sched_op, cmd, arg);
6729-}
6730+#endif
6731
6732 static inline long __must_check
6733 HYPERVISOR_set_timer_op(
6734 u64 timeout)
6735 {
6736- unsigned long timeout_hi = (unsigned long)(timeout>>32);
6737- unsigned long timeout_lo = (unsigned long)timeout;
6738- return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
6739-}
6740-
6741-static inline int __must_check
6742-HYPERVISOR_platform_op(
6743- struct xen_platform_op *platform_op)
6744-{
6745- platform_op->interface_version = XENPF_INTERFACE_VERSION;
6746- return _hypercall1(int, platform_op, platform_op);
6747-}
6748-
6749-static inline int __must_check
6750-HYPERVISOR_set_debugreg(
6751- unsigned int reg, unsigned long value)
6752-{
6753- return _hypercall2(int, set_debugreg, reg, value);
6754-}
6755-
6756-static inline unsigned long __must_check
6757-HYPERVISOR_get_debugreg(
6758- unsigned int reg)
6759-{
6760- return _hypercall1(unsigned long, get_debugreg, reg);
6761+ return _hypercall2(long, set_timer_op,
6762+ (unsigned long)timeout,
6763+ (unsigned long)(timeout>>32));
6764 }
6765
6766 static inline int __must_check
6767 HYPERVISOR_update_descriptor(
6768 u64 ma, u64 desc)
6769 {
6770- return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
6771-}
6772-
6773-static inline int __must_check
6774-HYPERVISOR_memory_op(
6775- unsigned int cmd, void *arg)
6776-{
6777- if (arch_use_lazy_mmu_mode())
6778- xen_multicall_flush(false);
6779- return _hypercall2(int, memory_op, cmd, arg);
6780-}
6781-
6782-static inline int __must_check
6783-HYPERVISOR_multicall(
6784- multicall_entry_t *call_list, unsigned int nr_calls)
6785-{
6786- return _hypercall2(int, multicall, call_list, nr_calls);
6787+ return _hypercall4(int, update_descriptor,
6788+ (unsigned long)ma, (unsigned long)(ma>>32),
6789+ (unsigned long)desc, (unsigned long)(desc>>32));
6790 }
6791
6792 static inline int __must_check
6793@@ -287,67 +50,6 @@ HYPERVISOR_update_va_mapping(
6794 }
6795
6796 static inline int __must_check
6797-HYPERVISOR_event_channel_op(
6798- int cmd, void *arg)
6799-{
6800- int rc = _hypercall2(int, event_channel_op, cmd, arg);
6801-
6802-#if CONFIG_XEN_COMPAT <= 0x030002
6803- if (unlikely(rc == -ENOSYS)) {
6804- struct evtchn_op op;
6805- op.cmd = cmd;
6806- memcpy(&op.u, arg, sizeof(op.u));
6807- rc = _hypercall1(int, event_channel_op_compat, &op);
6808- memcpy(arg, &op.u, sizeof(op.u));
6809- }
6810-#endif
6811-
6812- return rc;
6813-}
6814-
6815-static inline int __must_check
6816-HYPERVISOR_xen_version(
6817- int cmd, void *arg)
6818-{
6819- return _hypercall2(int, xen_version, cmd, arg);
6820-}
6821-
6822-static inline int __must_check
6823-HYPERVISOR_console_io(
6824- int cmd, unsigned int count, char *str)
6825-{
6826- return _hypercall3(int, console_io, cmd, count, str);
6827-}
6828-
6829-static inline int __must_check
6830-HYPERVISOR_physdev_op(
6831- int cmd, void *arg)
6832-{
6833- int rc = _hypercall2(int, physdev_op, cmd, arg);
6834-
6835-#if CONFIG_XEN_COMPAT <= 0x030002
6836- if (unlikely(rc == -ENOSYS)) {
6837- struct physdev_op op;
6838- op.cmd = cmd;
6839- memcpy(&op.u, arg, sizeof(op.u));
6840- rc = _hypercall1(int, physdev_op_compat, &op);
6841- memcpy(arg, &op.u, sizeof(op.u));
6842- }
6843-#endif
6844-
6845- return rc;
6846-}
6847-
6848-static inline int __must_check
6849-HYPERVISOR_grant_table_op(
6850- unsigned int cmd, void *uop, unsigned int count)
6851-{
6852- if (arch_use_lazy_mmu_mode())
6853- xen_multicall_flush(false);
6854- return _hypercall3(int, grant_table_op, cmd, uop, count);
6855-}
6856-
6857-static inline int __must_check
6858 HYPERVISOR_update_va_mapping_otherdomain(
6859 unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
6860 {
6861@@ -358,80 +60,3 @@ HYPERVISOR_update_va_mapping_otherdomain
6862 return _hypercall5(int, update_va_mapping_otherdomain, va,
6863 new_val.pte_low, pte_hi, flags, domid);
6864 }
6865-
6866-static inline int __must_check
6867-HYPERVISOR_vm_assist(
6868- unsigned int cmd, unsigned int type)
6869-{
6870- return _hypercall2(int, vm_assist, cmd, type);
6871-}
6872-
6873-static inline int __must_check
6874-HYPERVISOR_vcpu_op(
6875- int cmd, unsigned int vcpuid, void *extra_args)
6876-{
6877- return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
6878-}
6879-
6880-static inline int __must_check
6881-HYPERVISOR_suspend(
6882- unsigned long srec)
6883-{
6884- struct sched_shutdown sched_shutdown = {
6885- .reason = SHUTDOWN_suspend
6886- };
6887-
6888- int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
6889- &sched_shutdown, srec);
6890-
6891-#if CONFIG_XEN_COMPAT <= 0x030002
6892- if (rc == -ENOSYS)
6893- rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
6894- SHUTDOWN_suspend, srec);
6895-#endif
6896-
6897- return rc;
6898-}
6899-
6900-#if CONFIG_XEN_COMPAT <= 0x030002
6901-static inline int
6902-HYPERVISOR_nmi_op(
6903- unsigned long op, void *arg)
6904-{
6905- return _hypercall2(int, nmi_op, op, arg);
6906-}
6907-#endif
6908-
6909-#ifndef CONFIG_XEN
6910-static inline unsigned long __must_check
6911-HYPERVISOR_hvm_op(
6912- int op, void *arg)
6913-{
6914- return _hypercall2(unsigned long, hvm_op, op, arg);
6915-}
6916-#endif
6917-
6918-static inline int __must_check
6919-HYPERVISOR_callback_op(
6920- int cmd, const void *arg)
6921-{
6922- return _hypercall2(int, callback_op, cmd, arg);
6923-}
6924-
6925-static inline int __must_check
6926-HYPERVISOR_xenoprof_op(
6927- int op, void *arg)
6928-{
6929- return _hypercall2(int, xenoprof_op, op, arg);
6930-}
6931-
6932-static inline int __must_check
6933-HYPERVISOR_kexec_op(
6934- unsigned long op, void *args)
6935-{
6936- return _hypercall2(int, kexec_op, op, args);
6937-}
6938-
6939-
6940-
6941-#endif /* __HYPERCALL_H__ */
82094b55
AF
6942--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/hypercall_64.h 2009-03-04 11:28:34.000000000 +0100
6943+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/hypercall_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
6944@@ -1,197 +1,10 @@
6945-/******************************************************************************
6946- * hypercall.h
6947- *
6948- * Linux-specific hypervisor handling.
6949- *
6950- * Copyright (c) 2002-2004, K A Fraser
6951- *
6952- * 64-bit updates:
6953- * Benjamin Liu <benjamin.liu@intel.com>
6954- * Jun Nakajima <jun.nakajima@intel.com>
6955- *
6956- * This program is free software; you can redistribute it and/or
6957- * modify it under the terms of the GNU General Public License version 2
6958- * as published by the Free Software Foundation; or, when distributed
6959- * separately from the Linux kernel or incorporated into other
6960- * software packages, subject to the following license:
6961- *
6962- * Permission is hereby granted, free of charge, to any person obtaining a copy
6963- * of this source file (the "Software"), to deal in the Software without
6964- * restriction, including without limitation the rights to use, copy, modify,
6965- * merge, publish, distribute, sublicense, and/or sell copies of the Software,
6966- * and to permit persons to whom the Software is furnished to do so, subject to
6967- * the following conditions:
6968- *
6969- * The above copyright notice and this permission notice shall be included in
6970- * all copies or substantial portions of the Software.
6971- *
6972- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
6973- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
6974- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
6975- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
6976- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
6977- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
6978- * IN THE SOFTWARE.
6979- */
6980-
6981-#ifndef __HYPERCALL_H__
6982-#define __HYPERCALL_H__
6983-
6984-#include <linux/string.h> /* memcpy() */
6985-#include <linux/stringify.h>
6986-
6987-#ifndef __HYPERVISOR_H__
6988-# error "please don't include this file directly"
6989-#endif
6990-
6991-#ifdef CONFIG_XEN
6992-#define HYPERCALL_STR(name) \
6993- "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)"
6994-#else
6995-#define HYPERCALL_STR(name) \
6996- "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\
6997- "add hypercall_stubs(%%rip),%%rax; " \
6998- "call *%%rax"
6999-#endif
7000-
7001-#define _hypercall0(type, name) \
7002-({ \
7003- type __res; \
7004- asm volatile ( \
7005- HYPERCALL_STR(name) \
7006- : "=a" (__res) \
7007- : \
7008- : "memory" ); \
7009- __res; \
7010-})
7011-
7012-#define _hypercall1(type, name, a1) \
7013-({ \
7014- type __res; \
7015- long __ign1; \
7016- asm volatile ( \
7017- HYPERCALL_STR(name) \
7018- : "=a" (__res), "=D" (__ign1) \
7019- : "1" ((long)(a1)) \
7020- : "memory" ); \
7021- __res; \
7022-})
7023-
7024-#define _hypercall2(type, name, a1, a2) \
7025-({ \
7026- type __res; \
7027- long __ign1, __ign2; \
7028- asm volatile ( \
7029- HYPERCALL_STR(name) \
7030- : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
7031- : "1" ((long)(a1)), "2" ((long)(a2)) \
7032- : "memory" ); \
7033- __res; \
7034-})
7035-
7036-#define _hypercall3(type, name, a1, a2, a3) \
7037-({ \
7038- type __res; \
7039- long __ign1, __ign2, __ign3; \
7040- asm volatile ( \
7041- HYPERCALL_STR(name) \
7042- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
7043- "=d" (__ign3) \
7044- : "1" ((long)(a1)), "2" ((long)(a2)), \
7045- "3" ((long)(a3)) \
7046- : "memory" ); \
7047- __res; \
7048-})
7049-
7050-#define _hypercall4(type, name, a1, a2, a3, a4) \
7051-({ \
7052- type __res; \
7053- long __ign1, __ign2, __ign3; \
7054- register long __arg4 asm("r10") = (long)(a4); \
7055- asm volatile ( \
7056- HYPERCALL_STR(name) \
7057- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
7058- "=d" (__ign3), "+r" (__arg4) \
7059- : "1" ((long)(a1)), "2" ((long)(a2)), \
7060- "3" ((long)(a3)) \
7061- : "memory" ); \
7062- __res; \
7063-})
7064-
7065-#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
7066-({ \
7067- type __res; \
7068- long __ign1, __ign2, __ign3; \
7069- register long __arg4 asm("r10") = (long)(a4); \
7070- register long __arg5 asm("r8") = (long)(a5); \
7071- asm volatile ( \
7072- HYPERCALL_STR(name) \
7073- : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
7074- "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \
7075- : "1" ((long)(a1)), "2" ((long)(a2)), \
7076- "3" ((long)(a3)) \
7077- : "memory" ); \
7078- __res; \
7079-})
7080-
7081-#define _hypercall(type, op, a1, a2, a3, a4, a5) \
7082-({ \
7083- type __res; \
7084- register typeof((a1)+0) __arg1 asm("rdi") = (a1); \
7085- register typeof((a2)+0) __arg2 asm("rsi") = (a2); \
7086- register typeof((a3)+0) __arg3 asm("rdx") = (a3); \
7087- register typeof((a4)+0) __arg4 asm("r10") = (a4); \
7088- register typeof((a5)+0) __arg5 asm("r8") = (a5); \
7089- asm volatile ( \
7090- "call *%6" \
7091- : "=a" (__res), "+r" (__arg1), "+r" (__arg2), \
7092- "+r" (__arg3), "+r" (__arg4), "+r" (__arg5) \
7093- : "0" (hypercall_page + (op) * 32) \
7094- : "memory" ); \
7095- __res; \
7096-})
7097-
7098-static inline int __must_check
7099-HYPERVISOR_set_trap_table(
7100- const trap_info_t *table)
7101-{
7102- return _hypercall1(int, set_trap_table, table);
7103-}
7104-
7105-static inline int __must_check
7106-HYPERVISOR_mmu_update(
7107- mmu_update_t *req, unsigned int count, unsigned int *success_count,
7108- domid_t domid)
7109-{
7110- if (arch_use_lazy_mmu_mode())
7111- return xen_multi_mmu_update(req, count, success_count, domid);
7112- return _hypercall4(int, mmu_update, req, count, success_count, domid);
7113-}
7114-
7115-static inline int __must_check
7116-HYPERVISOR_mmuext_op(
7117- struct mmuext_op *op, unsigned int count, unsigned int *success_count,
7118- domid_t domid)
7119-{
7120- if (arch_use_lazy_mmu_mode())
7121- return xen_multi_mmuext_op(op, count, success_count, domid);
7122- return _hypercall4(int, mmuext_op, op, count, success_count, domid);
7123-}
7124-
7125-static inline int __must_check
7126-HYPERVISOR_set_gdt(
7127- unsigned long *frame_list, unsigned int entries)
7128-{
7129- return _hypercall2(int, set_gdt, frame_list, entries);
7130-}
7131-
7132-static inline int __must_check
7133-HYPERVISOR_stack_switch(
7134- unsigned long ss, unsigned long esp)
7135-{
7136- return _hypercall2(int, stack_switch, ss, esp);
7137-}
7138+#define HYPERCALL_arg1 "rdi"
7139+#define HYPERCALL_arg2 "rsi"
7140+#define HYPERCALL_arg3 "rdx"
7141+#define HYPERCALL_arg4 "r10"
7142+#define HYPERCALL_arg5 "r8"
7143
7144+#if CONFIG_XEN_COMPAT <= 0x030002
7145 static inline int __must_check
7146 HYPERVISOR_set_callbacks(
7147 unsigned long event_address, unsigned long failsafe_address,
7148@@ -200,27 +13,7 @@ HYPERVISOR_set_callbacks(
7149 return _hypercall3(int, set_callbacks,
7150 event_address, failsafe_address, syscall_address);
7151 }
7152-
7153-static inline int
7154-HYPERVISOR_fpu_taskswitch(
7155- int set)
7156-{
7157- return _hypercall1(int, fpu_taskswitch, set);
7158-}
7159-
7160-static inline int __must_check
7161-HYPERVISOR_sched_op_compat(
7162- int cmd, unsigned long arg)
7163-{
7164- return _hypercall2(int, sched_op_compat, cmd, arg);
7165-}
7166-
7167-static inline int __must_check
7168-HYPERVISOR_sched_op(
7169- int cmd, void *arg)
7170-{
7171- return _hypercall2(int, sched_op, cmd, arg);
7172-}
7173+#endif
7174
7175 static inline long __must_check
7176 HYPERVISOR_set_timer_op(
7177@@ -230,28 +23,6 @@ HYPERVISOR_set_timer_op(
7178 }
7179
7180 static inline int __must_check
7181-HYPERVISOR_platform_op(
7182- struct xen_platform_op *platform_op)
7183-{
7184- platform_op->interface_version = XENPF_INTERFACE_VERSION;
7185- return _hypercall1(int, platform_op, platform_op);
7186-}
7187-
7188-static inline int __must_check
7189-HYPERVISOR_set_debugreg(
7190- unsigned int reg, unsigned long value)
7191-{
7192- return _hypercall2(int, set_debugreg, reg, value);
7193-}
7194-
7195-static inline unsigned long __must_check
7196-HYPERVISOR_get_debugreg(
7197- unsigned int reg)
7198-{
7199- return _hypercall1(unsigned long, get_debugreg, reg);
7200-}
7201-
7202-static inline int __must_check
7203 HYPERVISOR_update_descriptor(
7204 unsigned long ma, unsigned long word)
7205 {
7206@@ -259,22 +30,6 @@ HYPERVISOR_update_descriptor(
7207 }
7208
7209 static inline int __must_check
7210-HYPERVISOR_memory_op(
7211- unsigned int cmd, void *arg)
7212-{
7213- if (arch_use_lazy_mmu_mode())
7214- xen_multicall_flush(false);
7215- return _hypercall2(int, memory_op, cmd, arg);
7216-}
7217-
7218-static inline int __must_check
7219-HYPERVISOR_multicall(
7220- multicall_entry_t *call_list, unsigned int nr_calls)
7221-{
7222- return _hypercall2(int, multicall, call_list, nr_calls);
7223-}
7224-
7225-static inline int __must_check
7226 HYPERVISOR_update_va_mapping(
7227 unsigned long va, pte_t new_val, unsigned long flags)
7228 {
7229@@ -284,67 +39,6 @@ HYPERVISOR_update_va_mapping(
7230 }
7231
7232 static inline int __must_check
7233-HYPERVISOR_event_channel_op(
7234- int cmd, void *arg)
7235-{
7236- int rc = _hypercall2(int, event_channel_op, cmd, arg);
7237-
7238-#if CONFIG_XEN_COMPAT <= 0x030002
7239- if (unlikely(rc == -ENOSYS)) {
7240- struct evtchn_op op;
7241- op.cmd = cmd;
7242- memcpy(&op.u, arg, sizeof(op.u));
7243- rc = _hypercall1(int, event_channel_op_compat, &op);
7244- memcpy(arg, &op.u, sizeof(op.u));
7245- }
7246-#endif
7247-
7248- return rc;
7249-}
7250-
7251-static inline int __must_check
7252-HYPERVISOR_xen_version(
7253- int cmd, void *arg)
7254-{
7255- return _hypercall2(int, xen_version, cmd, arg);
7256-}
7257-
7258-static inline int __must_check
7259-HYPERVISOR_console_io(
7260- int cmd, unsigned int count, char *str)
7261-{
7262- return _hypercall3(int, console_io, cmd, count, str);
7263-}
7264-
7265-static inline int __must_check
7266-HYPERVISOR_physdev_op(
7267- int cmd, void *arg)
7268-{
7269- int rc = _hypercall2(int, physdev_op, cmd, arg);
7270-
7271-#if CONFIG_XEN_COMPAT <= 0x030002
7272- if (unlikely(rc == -ENOSYS)) {
7273- struct physdev_op op;
7274- op.cmd = cmd;
7275- memcpy(&op.u, arg, sizeof(op.u));
7276- rc = _hypercall1(int, physdev_op_compat, &op);
7277- memcpy(arg, &op.u, sizeof(op.u));
7278- }
7279-#endif
7280-
7281- return rc;
7282-}
7283-
7284-static inline int __must_check
7285-HYPERVISOR_grant_table_op(
7286- unsigned int cmd, void *uop, unsigned int count)
7287-{
7288- if (arch_use_lazy_mmu_mode())
7289- xen_multicall_flush(false);
7290- return _hypercall3(int, grant_table_op, cmd, uop, count);
7291-}
7292-
7293-static inline int __must_check
7294 HYPERVISOR_update_va_mapping_otherdomain(
7295 unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
7296 {
7297@@ -353,83 +47,8 @@ HYPERVISOR_update_va_mapping_otherdomain
7298 }
7299
7300 static inline int __must_check
7301-HYPERVISOR_vm_assist(
7302- unsigned int cmd, unsigned int type)
7303-{
7304- return _hypercall2(int, vm_assist, cmd, type);
7305-}
7306-
7307-static inline int __must_check
7308-HYPERVISOR_vcpu_op(
7309- int cmd, unsigned int vcpuid, void *extra_args)
7310-{
7311- return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
7312-}
7313-
7314-static inline int __must_check
7315 HYPERVISOR_set_segment_base(
7316 int reg, unsigned long value)
7317 {
7318 return _hypercall2(int, set_segment_base, reg, value);
7319 }
7320-
7321-static inline int __must_check
7322-HYPERVISOR_suspend(
7323- unsigned long srec)
7324-{
7325- struct sched_shutdown sched_shutdown = {
7326- .reason = SHUTDOWN_suspend
7327- };
7328-
7329- int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
7330- &sched_shutdown, srec);
7331-
7332-#if CONFIG_XEN_COMPAT <= 0x030002
7333- if (rc == -ENOSYS)
7334- rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
7335- SHUTDOWN_suspend, srec);
7336-#endif
7337-
7338- return rc;
7339-}
7340-
7341-#if CONFIG_XEN_COMPAT <= 0x030002
7342-static inline int
7343-HYPERVISOR_nmi_op(
7344- unsigned long op, void *arg)
7345-{
7346- return _hypercall2(int, nmi_op, op, arg);
7347-}
7348-#endif
7349-
7350-#ifndef CONFIG_XEN
7351-static inline unsigned long __must_check
7352-HYPERVISOR_hvm_op(
7353- int op, void *arg)
7354-{
7355- return _hypercall2(unsigned long, hvm_op, op, arg);
7356-}
7357-#endif
7358-
7359-static inline int __must_check
7360-HYPERVISOR_callback_op(
7361- int cmd, const void *arg)
7362-{
7363- return _hypercall2(int, callback_op, cmd, arg);
7364-}
7365-
7366-static inline int __must_check
7367-HYPERVISOR_xenoprof_op(
7368- int op, void *arg)
7369-{
7370- return _hypercall2(int, xenoprof_op, op, arg);
7371-}
7372-
7373-static inline int __must_check
7374-HYPERVISOR_kexec_op(
7375- unsigned long op, void *args)
7376-{
7377- return _hypercall2(int, kexec_op, op, args);
7378-}
7379-
7380-#endif /* __HYPERCALL_H__ */
82094b55
AF
7381--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/hypervisor.h 2009-03-04 11:28:11.000000000 +0100
7382+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/hypervisor.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7383@@ -194,7 +194,6 @@ static inline void xen_multicall_flush(b
7384 extern char hypercall_page[PAGE_SIZE];
7385 #else
7386 extern char *hypercall_stubs;
7387-#define hypercall_page hypercall_stubs
7388 #define is_running_on_xen() (!!hypercall_stubs)
7389 #endif
7390
7391--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 7392+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/io.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7393@@ -0,0 +1,5 @@
7394+#ifdef CONFIG_X86_32
7395+# include "io_32.h"
7396+#else
7397+# include "io_64.h"
7398+#endif
7399--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 7400+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/irqflags.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7401@@ -0,0 +1,5 @@
7402+#ifdef CONFIG_X86_32
7403+# include "irqflags_32.h"
7404+#else
7405+# include "irqflags_64.h"
7406+#endif
82094b55
AF
7407--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/irqflags_32.h 2008-12-15 11:27:22.000000000 +0100
7408+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/irqflags_32.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7409@@ -148,6 +148,23 @@ static inline int raw_irqs_disabled_flag
7410 \
7411 raw_irqs_disabled_flags(flags); \
7412 })
7413+
7414+/*
7415+ * makes the traced hardirq state match with the machine state
7416+ *
7417+ * should be a rarely used function, only in places where its
7418+ * otherwise impossible to know the irq state, like in traps.
7419+ */
7420+static inline void trace_hardirqs_fixup_flags(unsigned long flags)
7421+{
7422+ if (raw_irqs_disabled_flags(flags))
7423+ trace_hardirqs_off();
7424+ else
7425+ trace_hardirqs_on();
7426+}
7427+
7428+#define trace_hardirqs_fixup() \
7429+ trace_hardirqs_fixup_flags(__raw_local_save_flags())
7430 #endif /* __ASSEMBLY__ */
7431
7432 /*
7433@@ -179,4 +196,17 @@ static inline int raw_irqs_disabled_flag
7434 # define TRACE_IRQS_OFF
7435 #endif
7436
7437+#ifdef CONFIG_DEBUG_LOCK_ALLOC
7438+# define LOCKDEP_SYS_EXIT \
7439+ pushl %eax; \
7440+ pushl %ecx; \
7441+ pushl %edx; \
7442+ call lockdep_sys_exit; \
7443+ popl %edx; \
7444+ popl %ecx; \
7445+ popl %eax;
7446+#else
7447+# define LOCKDEP_SYS_EXIT
7448+#endif
7449+
7450 #endif
82094b55
AF
7451--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/irqflags_64.h 2008-12-15 11:27:22.000000000 +0100
7452+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/irqflags_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7453@@ -116,6 +116,22 @@ static inline int raw_irqs_disabled_flag
7454 })
7455
7456 /*
7457+ * makes the traced hardirq state match with the machine state
7458+ *
7459+ * should be a rarely used function, only in places where its
7460+ * otherwise impossible to know the irq state, like in traps.
7461+ */
7462+static inline void trace_hardirqs_fixup_flags(unsigned long flags)
7463+{
7464+ if (raw_irqs_disabled_flags(flags))
7465+ trace_hardirqs_off();
7466+ else
7467+ trace_hardirqs_on();
7468+}
7469+
7470+#define trace_hardirqs_fixup() \
7471+ trace_hardirqs_fixup_flags(__raw_local_save_flags())
7472+/*
7473 * Used in the idle loop; sti takes one instruction cycle
7474 * to complete:
7475 */
7476@@ -143,6 +159,20 @@ static inline void halt(void)
7477 # define TRACE_IRQS_ON
7478 # define TRACE_IRQS_OFF
7479 # endif
7480+# ifdef CONFIG_DEBUG_LOCK_ALLOC
7481+# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
7482+# define LOCKDEP_SYS_EXIT_IRQ \
7483+ TRACE_IRQS_ON; \
7484+ sti; \
7485+ SAVE_REST; \
7486+ LOCKDEP_SYS_EXIT; \
7487+ RESTORE_REST; \
7488+ cli; \
7489+ TRACE_IRQS_OFF;
7490+# else
7491+# define LOCKDEP_SYS_EXIT
7492+# define LOCKDEP_SYS_EXIT_IRQ
7493+# endif
7494 #endif
7495
7496 #endif
7497--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 7498+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/maddr.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7499@@ -0,0 +1,5 @@
7500+#ifdef CONFIG_X86_32
7501+# include "maddr_32.h"
7502+#else
7503+# include "maddr_64.h"
7504+#endif
7505--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 7506+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/mmu_context.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7507@@ -0,0 +1,5 @@
7508+#ifdef CONFIG_X86_32
7509+# include "mmu_context_32.h"
7510+#else
7511+# include "mmu_context_64.h"
7512+#endif
7513--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 7514+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/page.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7515@@ -0,0 +1,13 @@
7516+#ifdef __KERNEL__
7517+# ifdef CONFIG_X86_32
7518+# include "page_32.h"
7519+# else
7520+# include "page_64.h"
7521+# endif
7522+#else
7523+# ifdef __i386__
7524+# include "page_32.h"
7525+# else
7526+# include "page_64.h"
7527+# endif
7528+#endif
82094b55
AF
7529--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/page_64.h 2009-02-16 16:17:21.000000000 +0100
7530+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/page_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7531@@ -207,6 +207,7 @@ static inline unsigned long __phys_addr(
7532 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7533
7534 #define __HAVE_ARCH_GATE_AREA 1
7535+#define vmemmap ((struct page *)VMEMMAP_START)
7536
7537 #include <asm-generic/memory_model.h>
7538 #include <asm-generic/page.h>
7539--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 7540+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/pci.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7541@@ -0,0 +1,100 @@
7542+#ifndef __x86_PCI_H
7543+#define __x86_PCI_H
7544+
7545+#include <linux/mm.h> /* for struct page */
7546+#include <linux/types.h>
7547+#include <linux/slab.h>
7548+#include <linux/string.h>
7549+#include <asm/scatterlist.h>
7550+#include <asm/io.h>
7551+
7552+
7553+#ifdef __KERNEL__
7554+
7555+struct pci_sysdata {
7556+ int domain; /* PCI domain */
7557+ int node; /* NUMA node */
7558+#ifdef CONFIG_X86_64
7559+ void* iommu; /* IOMMU private data */
7560+#endif
7561+#ifdef CONFIG_XEN_PCIDEV_FRONTEND
7562+ struct pcifront_device *pdev;
7563+#endif
7564+};
7565+
7566+/* scan a bus after allocating a pci_sysdata for it */
7567+extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
7568+
7569+static inline int pci_domain_nr(struct pci_bus *bus)
7570+{
7571+ struct pci_sysdata *sd = bus->sysdata;
7572+ return sd->domain;
7573+}
7574+
7575+static inline int pci_proc_domain(struct pci_bus *bus)
7576+{
7577+ return pci_domain_nr(bus);
7578+}
7579+
7580+
7581+/* Can be used to override the logic in pci_scan_bus for skipping
7582+ already-configured bus numbers - to be used for buggy BIOSes
7583+ or architectures with incomplete PCI setup by the loader */
7584+
7585+#ifdef CONFIG_PCI
7586+extern unsigned int pcibios_assign_all_busses(void);
7587+#else
7588+#define pcibios_assign_all_busses() 0
7589+#endif
7590+
7591+#include <asm/hypervisor.h>
7592+#define pcibios_scan_all_fns(a, b) (!is_initial_xendomain())
7593+
7594+extern unsigned long pci_mem_start;
7595+#define PCIBIOS_MIN_IO 0x1000
7596+#define PCIBIOS_MIN_MEM (pci_mem_start)
7597+
7598+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
7599+
7600+void pcibios_config_init(void);
7601+struct pci_bus * pcibios_scan_root(int bus);
7602+
7603+void pcibios_set_master(struct pci_dev *dev);
7604+void pcibios_penalize_isa_irq(int irq, int active);
7605+struct irq_routing_table *pcibios_get_irq_routing_table(void);
7606+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
7607+
7608+
7609+#define HAVE_PCI_MMAP
7610+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
7611+ enum pci_mmap_state mmap_state, int write_combine);
7612+
7613+
7614+#ifdef CONFIG_PCI
7615+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
7616+ enum pci_dma_burst_strategy *strat,
7617+ unsigned long *strategy_parameter)
7618+{
7619+ *strat = PCI_DMA_BURST_INFINITY;
7620+ *strategy_parameter = ~0UL;
7621+}
7622+#endif
7623+
7624+
7625+#endif /* __KERNEL__ */
7626+
7627+#ifdef CONFIG_X86_32
7628+# include "pci_32.h"
7629+#else
7630+# include "pci_64.h"
7631+#endif
7632+
7633+/* implement the pci_ DMA API in terms of the generic device dma_ one */
7634+#include <asm-generic/pci-dma-compat.h>
7635+
7636+/* generic pci stuff */
7637+#include <asm-generic/pci.h>
7638+
7639+
7640+
7641+#endif
82094b55
AF
7642--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/pci_32.h 2009-02-16 16:17:21.000000000 +0100
7643+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/pci_32.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7644@@ -4,52 +4,10 @@
7645
7646 #ifdef __KERNEL__
7647
7648-struct pci_sysdata {
7649- int node; /* NUMA node */
7650-};
7651-
7652-/* scan a bus after allocating a pci_sysdata for it */
7653-extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
7654-
7655-#include <linux/mm.h> /* for struct page */
7656-
7657-/* Can be used to override the logic in pci_scan_bus for skipping
7658- already-configured bus numbers - to be used for buggy BIOSes
7659- or architectures with incomplete PCI setup by the loader */
7660-
7661-#ifdef CONFIG_PCI
7662-extern unsigned int pcibios_assign_all_busses(void);
7663-#else
7664-#define pcibios_assign_all_busses() 0
7665-#endif
7666-
7667-#include <asm/hypervisor.h>
7668-#define pcibios_scan_all_fns(a, b) (!is_initial_xendomain())
7669-
7670-extern unsigned long pci_mem_start;
7671-#define PCIBIOS_MIN_IO 0x1000
7672-#define PCIBIOS_MIN_MEM (pci_mem_start)
7673-
7674-#define PCIBIOS_MIN_CARDBUS_IO 0x4000
7675-
7676-void pcibios_config_init(void);
7677-struct pci_bus * pcibios_scan_root(int bus);
7678-
7679-void pcibios_set_master(struct pci_dev *dev);
7680-void pcibios_penalize_isa_irq(int irq, int active);
7681-struct irq_routing_table *pcibios_get_irq_routing_table(void);
7682-int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
7683-
7684 /* Dynamic DMA mapping stuff.
7685 * i386 has everything mapped statically.
7686 */
7687
7688-#include <linux/types.h>
7689-#include <linux/slab.h>
7690-#include <asm/scatterlist.h>
7691-#include <linux/string.h>
7692-#include <asm/io.h>
7693-
7694 struct pci_dev;
7695
7696 #ifdef CONFIG_SWIOTLB
7697@@ -89,31 +47,8 @@ struct pci_dev;
7698
7699 #endif
7700
7701-#define HAVE_PCI_MMAP
7702-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
7703- enum pci_mmap_state mmap_state, int write_combine);
7704-
7705-
7706-#ifdef CONFIG_PCI
7707-static inline void pci_dma_burst_advice(struct pci_dev *pdev,
7708- enum pci_dma_burst_strategy *strat,
7709- unsigned long *strategy_parameter)
7710-{
7711- *strat = PCI_DMA_BURST_INFINITY;
7712- *strategy_parameter = ~0UL;
7713-}
7714-#endif
7715
7716 #endif /* __KERNEL__ */
7717
7718-#ifdef CONFIG_XEN_PCIDEV_FRONTEND
7719-#include <xen/pcifront.h>
7720-#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
7721-
7722-/* implement the pci_ DMA API in terms of the generic device dma_ one */
7723-#include <asm-generic/pci-dma-compat.h>
7724-
7725-/* generic pci stuff */
7726-#include <asm-generic/pci.h>
7727
7728 #endif /* __i386_PCI_H */
7729--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 7730+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/pgalloc.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7731@@ -0,0 +1,5 @@
7732+#ifdef CONFIG_X86_32
7733+# include "pgalloc_32.h"
7734+#else
7735+# include "pgalloc_64.h"
7736+#endif
82094b55
AF
7737--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/pgalloc_64.h 2009-02-16 16:17:21.000000000 +0100
7738+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/pgalloc_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7739@@ -112,6 +112,8 @@ static inline void pgd_list_del(pgd_t *p
7740 spin_unlock(&pgd_lock);
7741 }
7742
7743+extern void pgd_test_and_unpin(pgd_t *);
7744+
7745 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
7746 {
7747 /*
7748@@ -122,6 +124,7 @@ static inline pgd_t *pgd_alloc(struct mm
7749 if (!pgd)
7750 return NULL;
7751 pgd_list_add(pgd);
7752+ pgd_test_and_unpin(pgd);
7753 /*
7754 * Copy kernel pointers in from init.
7755 * Could keep a freelist or slab cache of those because the kernel
7756@@ -144,27 +147,7 @@ static inline pgd_t *pgd_alloc(struct mm
7757
7758 static inline void pgd_free(pgd_t *pgd)
7759 {
7760- pte_t *ptep = virt_to_ptep(pgd);
7761-
7762- if (!pte_write(*ptep)) {
7763- xen_pgd_unpin(__pa(pgd));
7764- BUG_ON(HYPERVISOR_update_va_mapping(
7765- (unsigned long)pgd,
7766- pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
7767- 0));
7768- }
7769-
7770- ptep = virt_to_ptep(__user_pgd(pgd));
7771-
7772- if (!pte_write(*ptep)) {
7773- xen_pgd_unpin(__pa(__user_pgd(pgd)));
7774- BUG_ON(HYPERVISOR_update_va_mapping(
7775- (unsigned long)__user_pgd(pgd),
7776- pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT,
7777- PAGE_KERNEL),
7778- 0));
7779- }
7780-
7781+ pgd_test_and_unpin(pgd);
7782 pgd_list_del(pgd);
7783 free_pages((unsigned long)pgd, 1);
7784 }
7785--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 7786+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/pgtable.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7787@@ -0,0 +1,5 @@
7788+#ifdef CONFIG_X86_32
7789+# include "pgtable_32.h"
7790+#else
7791+# include "pgtable_64.h"
7792+#endif
82094b55
AF
7793--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/pgtable_32.h 2009-02-16 16:17:21.000000000 +0100
7794+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/pgtable_32.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7795@@ -17,10 +17,7 @@
7796 #include <asm/fixmap.h>
7797 #include <linux/threads.h>
7798
7799-#ifndef _I386_BITOPS_H
7800-#include <asm/bitops.h>
7801-#endif
7802-
7803+#include <linux/bitops.h>
7804 #include <linux/slab.h>
7805 #include <linux/list.h>
7806 #include <linux/spinlock.h>
7807@@ -40,7 +37,7 @@ extern spinlock_t pgd_lock;
7808 extern struct page *pgd_list;
7809 void check_pgt_cache(void);
7810
7811-void pmd_ctor(void *, struct kmem_cache *, unsigned long);
7812+void pmd_ctor(struct kmem_cache *, void *);
7813 void pgtable_cache_init(void);
7814 void paging_init(void);
7815
82094b55
AF
7816--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/pgtable_64.h 2009-02-16 16:17:21.000000000 +0100
7817+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/pgtable_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7818@@ -9,7 +9,7 @@
7819 * the x86-64 page table tree.
7820 */
7821 #include <asm/processor.h>
7822-#include <asm/bitops.h>
7823+#include <linux/bitops.h>
7824 #include <linux/threads.h>
7825 #include <linux/sched.h>
7826 #include <asm/pda.h>
7827@@ -139,6 +139,7 @@ static inline void pgd_clear (pgd_t * pg
7828 #define MAXMEM _AC(0x3fffffffffff, UL)
7829 #define VMALLOC_START _AC(0xffffc20000000000, UL)
7830 #define VMALLOC_END _AC(0xffffe1ffffffffff, UL)
7831+#define VMEMMAP_START _AC(0xffffe20000000000, UL)
7832 #define MODULES_VADDR _AC(0xffffffff88000000, UL)
7833 #define MODULES_END _AC(0xfffffffffff00000, UL)
7834 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
7835--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 7836+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/processor.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7837@@ -0,0 +1,5 @@
7838+#ifdef CONFIG_X86_32
7839+# include "processor_32.h"
7840+#else
7841+# include "processor_64.h"
7842+#endif
82094b55
AF
7843--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/processor_32.h 2009-02-16 16:17:21.000000000 +0100
7844+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/processor_32.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7845@@ -80,6 +80,7 @@ struct cpuinfo_x86 {
7846 unsigned char booted_cores; /* number of cores as seen by OS */
7847 __u8 phys_proc_id; /* Physical processor id. */
7848 __u8 cpu_core_id; /* Core id */
7849+ __u8 cpu_index; /* index into per_cpu list */
7850 #endif
7851 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
7852
7853@@ -106,14 +107,19 @@ DECLARE_PER_CPU(struct tss_struct, init_
7854 #endif
7855
7856 #ifdef CONFIG_SMP
7857-extern struct cpuinfo_x86 cpu_data[];
7858-#define current_cpu_data cpu_data[smp_processor_id()]
7859+DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
7860+#define cpu_data(cpu) per_cpu(cpu_info, cpu)
7861+#define current_cpu_data cpu_data(smp_processor_id())
7862 #else
7863-#define cpu_data (&boot_cpu_data)
7864-#define current_cpu_data boot_cpu_data
7865+#define cpu_data(cpu) boot_cpu_data
7866+#define current_cpu_data boot_cpu_data
7867 #endif
7868
7869-extern int cpu_llc_id[NR_CPUS];
7870+/*
7871+ * the following now lives in the per cpu area:
7872+ * extern int cpu_llc_id[NR_CPUS];
7873+ */
7874+DECLARE_PER_CPU(u8, cpu_llc_id);
7875 extern char ignore_fpu_irq;
7876
7877 void __init cpu_detect(struct cpuinfo_x86 *c);
7878@@ -560,7 +566,9 @@ static inline void xen_set_iopl_mask(uns
7879 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
7880 * resulting in stale register contents being returned.
7881 */
7882-static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
7883+static inline void cpuid(unsigned int op,
7884+ unsigned int *eax, unsigned int *ebx,
7885+ unsigned int *ecx, unsigned int *edx)
7886 {
7887 *eax = op;
7888 *ecx = 0;
7889@@ -568,8 +576,9 @@ static inline void cpuid(unsigned int op
7890 }
7891
7892 /* Some CPUID calls want 'count' to be placed in ecx */
7893-static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
7894- int *edx)
7895+static inline void cpuid_count(unsigned int op, int count,
7896+ unsigned int *eax, unsigned int *ebx,
7897+ unsigned int *ecx, unsigned int *edx)
7898 {
7899 *eax = op;
7900 *ecx = count;
7901@@ -639,6 +648,17 @@ static inline unsigned int cpuid_edx(uns
7902 #define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
7903 #define K7_NOP8 K7_NOP7 ASM_NOP1
7904
7905+/* P6 nops */
7906+/* uses eax dependencies (Intel-recommended choice) */
7907+#define P6_NOP1 GENERIC_NOP1
7908+#define P6_NOP2 ".byte 0x66,0x90\n"
7909+#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
7910+#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
7911+#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
7912+#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
7913+#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
7914+#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
7915+
7916 #ifdef CONFIG_MK8
7917 #define ASM_NOP1 K8_NOP1
7918 #define ASM_NOP2 K8_NOP2
7919@@ -657,6 +677,17 @@ static inline unsigned int cpuid_edx(uns
7920 #define ASM_NOP6 K7_NOP6
7921 #define ASM_NOP7 K7_NOP7
7922 #define ASM_NOP8 K7_NOP8
7923+#elif defined(CONFIG_M686) || defined(CONFIG_MPENTIUMII) || \
7924+ defined(CONFIG_MPENTIUMIII) || defined(CONFIG_MPENTIUMM) || \
7925+ defined(CONFIG_MCORE2) || defined(CONFIG_PENTIUM4)
7926+#define ASM_NOP1 P6_NOP1
7927+#define ASM_NOP2 P6_NOP2
7928+#define ASM_NOP3 P6_NOP3
7929+#define ASM_NOP4 P6_NOP4
7930+#define ASM_NOP5 P6_NOP5
7931+#define ASM_NOP6 P6_NOP6
7932+#define ASM_NOP7 P6_NOP7
7933+#define ASM_NOP8 P6_NOP8
7934 #else
7935 #define ASM_NOP1 GENERIC_NOP1
7936 #define ASM_NOP2 GENERIC_NOP2
82094b55
AF
7937--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/processor_64.h 2009-02-16 16:17:21.000000000 +0100
7938+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/processor_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
7939@@ -74,6 +74,7 @@ struct cpuinfo_x86 {
7940 __u8 booted_cores; /* number of cores as seen by OS */
7941 __u8 phys_proc_id; /* Physical Processor id. */
7942 __u8 cpu_core_id; /* Core id. */
7943+ __u8 cpu_index; /* index into per_cpu list */
7944 #endif
7945 } ____cacheline_aligned;
7946
7947@@ -88,11 +89,12 @@ struct cpuinfo_x86 {
7948 #define X86_VENDOR_UNKNOWN 0xff
7949
7950 #ifdef CONFIG_SMP
7951-extern struct cpuinfo_x86 cpu_data[];
7952-#define current_cpu_data cpu_data[smp_processor_id()]
7953+DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info);
7954+#define cpu_data(cpu) per_cpu(cpu_info, cpu)
7955+#define current_cpu_data cpu_data(smp_processor_id())
7956 #else
7957-#define cpu_data (&boot_cpu_data)
7958-#define current_cpu_data boot_cpu_data
7959+#define cpu_data(cpu) boot_cpu_data
7960+#define current_cpu_data boot_cpu_data
7961 #endif
7962
7963 extern char ignore_irq13;
7964@@ -343,6 +345,16 @@ struct extended_sigtable {
7965 };
7966
7967
7968+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2)
7969+#define ASM_NOP1 P6_NOP1
7970+#define ASM_NOP2 P6_NOP2
7971+#define ASM_NOP3 P6_NOP3
7972+#define ASM_NOP4 P6_NOP4
7973+#define ASM_NOP5 P6_NOP5
7974+#define ASM_NOP6 P6_NOP6
7975+#define ASM_NOP7 P6_NOP7
7976+#define ASM_NOP8 P6_NOP8
7977+#else
7978 #define ASM_NOP1 K8_NOP1
7979 #define ASM_NOP2 K8_NOP2
7980 #define ASM_NOP3 K8_NOP3
7981@@ -351,6 +363,7 @@ struct extended_sigtable {
7982 #define ASM_NOP6 K8_NOP6
7983 #define ASM_NOP7 K8_NOP7
7984 #define ASM_NOP8 K8_NOP8
7985+#endif
7986
7987 /* Opteron nops */
7988 #define K8_NOP1 ".byte 0x90\n"
7989@@ -362,6 +375,17 @@ struct extended_sigtable {
7990 #define K8_NOP7 K8_NOP4 K8_NOP3
7991 #define K8_NOP8 K8_NOP4 K8_NOP4
7992
7993+/* P6 nops */
7994+/* uses eax dependencies (Intel-recommended choice) */
7995+#define P6_NOP1 ".byte 0x90\n"
7996+#define P6_NOP2 ".byte 0x66,0x90\n"
7997+#define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
7998+#define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
7999+#define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
8000+#define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
8001+#define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
8002+#define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
8003+
8004 #define ASM_NOP_MAX 8
8005
8006 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
8007@@ -377,12 +401,6 @@ static inline void sync_core(void)
8008 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
8009 }
8010
8011-#define ARCH_HAS_PREFETCH
8012-static inline void prefetch(void *x)
8013-{
8014- asm volatile("prefetcht0 (%0)" :: "r" (x));
8015-}
8016-
8017 #define ARCH_HAS_PREFETCHW 1
8018 static inline void prefetchw(void *x)
8019 {
8020@@ -398,11 +416,6 @@ static inline void prefetchw(void *x)
8021
8022 #define cpu_relax() rep_nop()
8023
8024-static inline void serialize_cpu(void)
8025-{
8026- __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
8027-}
8028-
8029 static inline void __monitor(const void *eax, unsigned long ecx,
8030 unsigned long edx)
8031 {
8032--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 8033+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/segment.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8034@@ -0,0 +1,5 @@
8035+#ifdef CONFIG_X86_32
8036+# include "segment_32.h"
8037+#else
8038+# include "../../segment_64.h"
8039+#endif
8040--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 8041+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/smp.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8042@@ -0,0 +1,5 @@
8043+#ifdef CONFIG_X86_32
8044+# include "smp_32.h"
8045+#else
8046+# include "smp_64.h"
8047+#endif
82094b55
AF
8048--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/smp_32.h 2008-12-15 11:27:22.000000000 +0100
8049+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/smp_32.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8050@@ -11,7 +11,7 @@
8051 #endif
8052
8053 #if defined(CONFIG_X86_LOCAL_APIC) && !defined(__ASSEMBLY__)
8054-#include <asm/bitops.h>
8055+#include <linux/bitops.h>
8056 #include <asm/mpspec.h>
8057 #include <asm/apic.h>
8058 #ifdef CONFIG_X86_IO_APIC
8059@@ -30,8 +30,8 @@
8060 extern void smp_alloc_memory(void);
8061 extern int pic_mode;
8062 extern int smp_num_siblings;
8063-extern cpumask_t cpu_sibling_map[];
8064-extern cpumask_t cpu_core_map[];
8065+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
8066+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
8067
8068 extern void (*mtrr_hook) (void);
8069 extern void zap_low_mappings (void);
8070@@ -39,9 +39,11 @@ extern void lock_ipi_call_lock(void);
8071 extern void unlock_ipi_call_lock(void);
8072
8073 #define MAX_APICID 256
8074-extern u8 x86_cpu_to_apicid[];
8075+extern u8 __initdata x86_cpu_to_apicid_init[];
8076+extern void *x86_cpu_to_apicid_ptr;
8077+DECLARE_PER_CPU(u8, x86_cpu_to_apicid);
8078
8079-#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
8080+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
8081
8082 #ifdef CONFIG_HOTPLUG_CPU
8083 extern void cpu_exit_clear(void);
82094b55
AF
8084--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/smp_64.h 2008-12-15 11:27:22.000000000 +0100
8085+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/smp_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8086@@ -40,10 +40,19 @@ extern void lock_ipi_call_lock(void);
8087 extern void unlock_ipi_call_lock(void);
8088 extern int smp_num_siblings;
8089 extern void smp_send_reschedule(int cpu);
8090+extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
8091+ void *info, int wait);
8092
8093-extern cpumask_t cpu_sibling_map[NR_CPUS];
8094-extern cpumask_t cpu_core_map[NR_CPUS];
8095-extern u8 cpu_llc_id[NR_CPUS];
8096+/*
8097+ * cpu_sibling_map and cpu_core_map now live
8098+ * in the per cpu area
8099+ *
8100+ * extern cpumask_t cpu_sibling_map[NR_CPUS];
8101+ * extern cpumask_t cpu_core_map[NR_CPUS];
8102+ */
8103+DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
8104+DECLARE_PER_CPU(cpumask_t, cpu_core_map);
8105+DECLARE_PER_CPU(u8, cpu_llc_id);
8106
8107 #define SMP_TRAMPOLINE_BASE 0x6000
8108
8109@@ -70,6 +79,8 @@ extern unsigned __cpuinitdata disabled_c
8110
8111 #endif /* CONFIG_SMP */
8112
8113+#define safe_smp_processor_id() smp_processor_id()
8114+
8115 #ifdef CONFIG_X86_LOCAL_APIC
8116 static inline int hard_smp_processor_id(void)
8117 {
8118@@ -82,8 +93,9 @@ static inline int hard_smp_processor_id(
8119 * Some lowlevel functions might want to know about
8120 * the real APIC ID <-> CPU # mapping.
8121 */
8122-extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
8123-extern u8 x86_cpu_to_log_apicid[NR_CPUS];
8124+extern u8 __initdata x86_cpu_to_apicid_init[];
8125+extern void *x86_cpu_to_apicid_ptr;
8126+DECLARE_PER_CPU(u8, x86_cpu_to_apicid); /* physical ID */
8127 extern u8 bios_cpu_apicid[];
8128
8129 #ifdef CONFIG_X86_LOCAL_APIC
8130@@ -118,8 +130,9 @@ static __inline int logical_smp_processo
8131 #endif
8132
8133 #ifdef CONFIG_SMP
8134-#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
8135+#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
8136 #else
8137+extern unsigned int boot_cpu_id;
8138 #define cpu_physical_id(cpu) boot_cpu_id
8139 #endif /* !CONFIG_SMP */
8140 #endif
8141--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 8142+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/swiotlb.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8143@@ -0,0 +1,5 @@
8144+#ifdef CONFIG_X86_32
8145+# include "swiotlb_32.h"
8146+#else
8147+# include "../../swiotlb.h"
8148+#endif
8149--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 8150+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/system.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8151@@ -0,0 +1,5 @@
8152+#ifdef CONFIG_X86_32
8153+# include "system_32.h"
8154+#else
8155+# include "system_64.h"
8156+#endif
82094b55
AF
8157--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/system_32.h 2009-02-16 16:17:21.000000000 +0100
8158+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/system_32.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8159@@ -9,6 +9,7 @@
8160 #include <asm/hypervisor.h>
8161
8162 #ifdef __KERNEL__
8163+#define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
8164
8165 struct task_struct; /* one of the stranger aspects of C forward declarations.. */
8166 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
8167@@ -138,7 +139,7 @@ static inline unsigned long xen_read_cr4
8168 {
8169 unsigned long val;
8170 /* This could fault if %cr4 does not exist */
8171- asm("1: movl %%cr4, %0 \n"
8172+ asm volatile("1: movl %%cr4, %0 \n"
8173 "2: \n"
8174 ".section __ex_table,\"a\" \n"
8175 ".long 1b,2b \n"
8176@@ -157,6 +158,11 @@ static inline void xen_wbinvd(void)
8177 asm volatile("wbinvd": : :"memory");
8178 }
8179
8180+static inline void clflush(volatile void *__p)
8181+{
8182+ asm volatile("clflush %0" : "+m" (*(char __force *)__p));
8183+}
8184+
8185 #define read_cr0() (xen_read_cr0())
8186 #define write_cr0(x) (xen_write_cr0(x))
8187 #define read_cr2() (xen_read_cr2())
8188@@ -207,6 +213,7 @@ static inline unsigned long get_limit(un
8189
8190 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
8191 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
8192+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
8193
8194 /**
8195 * read_barrier_depends - Flush all pending reads that subsequents reads
8196@@ -262,18 +269,18 @@ static inline unsigned long get_limit(un
8197
8198 #define read_barrier_depends() do { } while(0)
8199
8200+#ifdef CONFIG_SMP
8201+#define smp_mb() mb()
8202+#ifdef CONFIG_X86_PPRO_FENCE
8203+# define smp_rmb() rmb()
8204+#else
8205+# define smp_rmb() barrier()
8206+#endif
8207 #ifdef CONFIG_X86_OOSTORE
8208-/* Actually there are no OOO store capable CPUs for now that do SSE,
8209- but make it already an possibility. */
8210-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
8211+# define smp_wmb() wmb()
8212 #else
8213-#define wmb() __asm__ __volatile__ ("": : :"memory")
8214+# define smp_wmb() barrier()
8215 #endif
8216-
8217-#ifdef CONFIG_SMP
8218-#define smp_mb() mb()
8219-#define smp_rmb() rmb()
8220-#define smp_wmb() wmb()
8221 #define smp_read_barrier_depends() read_barrier_depends()
8222 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
8223 #else
8224@@ -300,5 +307,6 @@ extern unsigned long arch_align_stack(un
8225 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8226
8227 void default_idle(void);
8228+void __show_registers(struct pt_regs *, int all);
8229
8230 #endif
82094b55
AF
8231--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/system_64.h 2009-02-16 16:17:21.000000000 +0100
8232+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/system_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8233@@ -11,8 +11,12 @@
8234
8235 #ifdef __KERNEL__
8236
8237-#define __STR(x) #x
8238-#define STR(x) __STR(x)
8239+/* entries in ARCH_DLINFO: */
8240+#ifdef CONFIG_IA32_EMULATION
8241+# define AT_VECTOR_SIZE_ARCH 2
8242+#else
8243+# define AT_VECTOR_SIZE_ARCH 1
8244+#endif
8245
8246 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
8247 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
8248@@ -92,7 +96,7 @@ static inline void write_cr0(unsigned lo
8249
8250 #define read_cr3() ({ \
8251 unsigned long __dummy; \
8252- asm("movq %%cr3,%0" : "=r" (__dummy)); \
8253+ asm volatile("movq %%cr3,%0" : "=r" (__dummy)); \
8254 machine_to_phys(__dummy); \
8255 })
8256
8257@@ -105,7 +109,7 @@ static inline void write_cr3(unsigned lo
8258 static inline unsigned long read_cr4(void)
8259 {
8260 unsigned long cr4;
8261- asm("movq %%cr4,%0" : "=r" (cr4));
8262+ asm volatile("movq %%cr4,%0" : "=r" (cr4));
8263 return cr4;
8264 }
8265
8266@@ -131,12 +135,17 @@ static inline void write_cr8(unsigned lo
8267
8268 #endif /* __KERNEL__ */
8269
8270+static inline void clflush(volatile void *__p)
8271+{
8272+ asm volatile("clflush %0" : "+m" (*(char __force *)__p));
8273+}
8274+
8275 #define nop() __asm__ __volatile__ ("nop")
8276
8277 #ifdef CONFIG_SMP
8278 #define smp_mb() mb()
8279-#define smp_rmb() rmb()
8280-#define smp_wmb() wmb()
8281+#define smp_rmb() barrier()
8282+#define smp_wmb() barrier()
8283 #define smp_read_barrier_depends() do {} while(0)
8284 #else
8285 #define smp_mb() barrier()
8286@@ -153,12 +162,8 @@ static inline void write_cr8(unsigned lo
8287 */
8288 #define mb() asm volatile("mfence":::"memory")
8289 #define rmb() asm volatile("lfence":::"memory")
8290-
8291-#ifdef CONFIG_UNORDERED_IO
8292 #define wmb() asm volatile("sfence" ::: "memory")
8293-#else
8294-#define wmb() asm volatile("" ::: "memory")
8295-#endif
8296+
8297 #define read_barrier_depends() do {} while(0)
8298 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
8299
8300--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 8301+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/tlbflush.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8302@@ -0,0 +1,5 @@
8303+#ifdef CONFIG_X86_32
8304+# include "tlbflush_32.h"
8305+#else
8306+# include "tlbflush_64.h"
8307+#endif
82094b55
AF
8308--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/tlbflush_32.h 2009-02-16 16:17:21.000000000 +0100
8309+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/tlbflush_32.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8310@@ -23,7 +23,6 @@
8311 * - flush_tlb_page(vma, vmaddr) flushes one page
8312 * - flush_tlb_range(vma, start, end) flushes a range of pages
8313 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
8314- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
8315 *
8316 * ..but the i386 has somewhat limited tlb flushing capabilities,
8317 * and page-granular flushes are available only on i486 and up.
8318@@ -97,10 +96,4 @@ static inline void flush_tlb_kernel_rang
8319 flush_tlb_all();
8320 }
8321
8322-static inline void flush_tlb_pgtables(struct mm_struct *mm,
8323- unsigned long start, unsigned long end)
8324-{
8325- /* i386 does not keep any page table caches in TLB */
8326-}
8327-
8328 #endif /* _I386_TLBFLUSH_H */
82094b55
AF
8329--- sle11-2009-10-16.orig/include/asm-x86/mach-xen/asm/tlbflush_64.h 2009-02-16 16:17:21.000000000 +0100
8330+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/tlbflush_64.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8331@@ -28,7 +28,6 @@
8332 * - flush_tlb_page(vma, vmaddr) flushes one page
8333 * - flush_tlb_range(vma, start, end) flushes a range of pages
8334 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
8335- * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
8336 *
8337 * x86-64 can only flush individual pages or full VMs. For a range flush
8338 * we always do the full VM. Might be worth trying if for a small
8339@@ -95,12 +94,4 @@ static inline void flush_tlb_kernel_rang
8340 flush_tlb_all();
8341 }
8342
8343-static inline void flush_tlb_pgtables(struct mm_struct *mm,
8344- unsigned long start, unsigned long end)
8345-{
8346- /* x86_64 does not keep any page table caches in a software TLB.
8347- The CPUs do in their hardware TLBs, but they are handled
8348- by the normal TLB flushing algorithms. */
8349-}
8350-
8351 #endif /* _X8664_TLBFLUSH_H */
8352--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 8353+++ sle11-2009-10-16/include/asm-x86/mach-xen/asm/xor.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8354@@ -0,0 +1,5 @@
8355+#ifdef CONFIG_X86_32
8356+# include "../../xor_32.h"
8357+#else
8358+# include "xor_64.h"
8359+#endif
82094b55
AF
8360--- sle11-2009-10-16.orig/include/asm-x86/mmu.h 2009-10-28 14:55:06.000000000 +0100
8361+++ sle11-2009-10-16/include/asm-x86/mmu.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8362@@ -16,6 +16,9 @@ typedef struct {
8363 rwlock_t ldtlock;
8364 #endif
8365 int size;
8366+#ifdef CONFIG_XEN
8367+ unsigned has_foreign_mappings:1;
8368+#endif
8369 struct mutex lock;
8370 void *vdso;
8371 } mm_context_t;
82094b55
AF
8372--- sle11-2009-10-16.orig/include/linux/kexec.h 2009-08-26 11:52:33.000000000 +0200
8373+++ sle11-2009-10-16/include/linux/kexec.h 2009-02-17 12:43:57.000000000 +0100
2cb7cef9
BS
8374@@ -202,8 +202,15 @@ extern int dump_after_notifier;
8375 #define VMCOREINFO_BYTES (4096)
8376 #define VMCOREINFO_NOTE_NAME "VMCOREINFO"
8377 #define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
8378+#if !defined(CONFIG_XEN) || !defined(CONFIG_X86)
8379 #define VMCOREINFO_NOTE_SIZE (KEXEC_NOTE_HEAD_BYTES*2 + VMCOREINFO_BYTES \
8380 + VMCOREINFO_NOTE_NAME_BYTES)
8381+#else
8382+#define VMCOREINFO_NOTE_SIZE ALIGN(KEXEC_NOTE_HEAD_BYTES*2 \
8383+ + VMCOREINFO_BYTES \
8384+ + VMCOREINFO_NOTE_NAME_BYTES, \
8385+ PAGE_SIZE)
8386+#endif
8387
8388 /* Location of a reserved region to hold the crash kernel.
8389 */
82094b55
AF
8390--- sle11-2009-10-16.orig/include/linux/oprofile.h 2009-06-29 15:28:01.000000000 +0200
8391+++ sle11-2009-10-16/include/linux/oprofile.h 2009-06-29 15:28:57.000000000 +0200
2cb7cef9
BS
8392@@ -119,6 +119,8 @@ void oprofile_add_pc(unsigned long pc, i
8393 /* add a backtrace entry, to be called from the ->backtrace callback */
8394 void oprofile_add_trace(unsigned long eip);
8395
8396+void oprofile_add_mode(int cpu_mode);
8397+
8398 /* add a domain switch entry */
8399 int oprofile_add_domain_switch(int32_t domain_id);
8400
82094b55
AF
8401--- sle11-2009-10-16.orig/include/linux/sysctl.h 2009-10-28 14:55:06.000000000 +0100
8402+++ sle11-2009-10-16/include/linux/sysctl.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8403@@ -69,6 +69,7 @@ enum
8404 CTL_BUS=8, /* Busses */
8405 CTL_ABI=9, /* Binary emulation */
8406 CTL_CPU=10, /* CPU stuff (speed scaling, etc) */
8407+ CTL_XEN=123, /* Xen info and control */
8408 CTL_ARLAN=254, /* arlan wireless driver */
8409 CTL_S390DBF=5677, /* s390 debug */
8410 CTL_SUNRPC=7249, /* sunrpc debug */
82094b55
AF
8411--- sle11-2009-10-16.orig/include/xen/pcifront.h 2009-10-28 14:55:06.000000000 +0100
8412+++ sle11-2009-10-16/include/xen/pcifront.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8413@@ -12,13 +12,11 @@
8414
8415 #ifndef __ia64__
8416
8417+#include <asm/pci.h>
8418+
8419 struct pcifront_device;
8420 struct pci_bus;
8421-
8422-struct pcifront_sd {
8423- int domain;
8424- struct pcifront_device *pdev;
8425-};
8426+#define pcifront_sd pci_sysdata
8427
8428 static inline struct pcifront_device *
8429 pcifront_get_pdev(struct pcifront_sd *sd)
8430@@ -34,18 +32,6 @@ static inline void pcifront_init_sd(stru
8431 sd->pdev = pdev;
8432 }
8433
8434-#if defined(CONFIG_PCI_DOMAINS)
8435-static inline int pci_domain_nr(struct pci_bus *bus)
8436-{
8437- struct pcifront_sd *sd = bus->sysdata;
8438- return sd->domain;
8439-}
8440-static inline int pci_proc_domain(struct pci_bus *bus)
8441-{
8442- return pci_domain_nr(bus);
8443-}
8444-#endif /* CONFIG_PCI_DOMAINS */
8445-
8446 static inline void pcifront_setup_root_resources(struct pci_bus *bus,
8447 struct pcifront_sd *sd)
8448 {
8449--- /dev/null 1970-01-01 00:00:00.000000000 +0000
82094b55 8450+++ sle11-2009-10-16/include/xen/sysctl.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8451@@ -0,0 +1,11 @@
8452+#ifndef _XEN_SYSCTL_H
8453+#define _XEN_SYSCTL_H
8454+
8455+/* CTL_XEN names: */
8456+enum
8457+{
8458+ CTL_XEN_INDEPENDENT_WALLCLOCK=1,
8459+ CTL_XEN_PERMITTED_CLOCK_JITTER=2,
8460+};
8461+
8462+#endif /* _XEN_SYSCTL_H */
82094b55
AF
8463--- sle11-2009-10-16.orig/include/xen/xenbus.h 2009-02-16 16:17:21.000000000 +0100
8464+++ sle11-2009-10-16/include/xen/xenbus.h 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8465@@ -107,7 +107,7 @@ struct xenbus_driver {
8466 int (*suspend)(struct xenbus_device *dev);
8467 int (*suspend_cancel)(struct xenbus_device *dev);
8468 int (*resume)(struct xenbus_device *dev);
8469- int (*uevent)(struct xenbus_device *, char **, int, char *, int);
8470+ int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
8471 struct device_driver driver;
8472 int (*read_otherend_details)(struct xenbus_device *dev);
8473 int (*is_ready)(struct xenbus_device *dev);
82094b55
AF
8474--- sle11-2009-10-16.orig/kernel/kexec.c 2009-02-17 11:34:22.000000000 +0100
8475+++ sle11-2009-10-16/kernel/kexec.c 2009-02-17 12:38:20.000000000 +0100
2cb7cef9
BS
8476@@ -52,7 +52,11 @@ int dump_after_notifier;
8477
8478 /* vmcoreinfo stuff */
8479 unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
8480-u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
8481+u32
8482+#if defined(CONFIG_XEN) && defined(CONFIG_X86)
8483+__attribute__((__section__(".bss.page_aligned"), __aligned__(PAGE_SIZE)))
8484+#endif
8485+vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
8486 size_t vmcoreinfo_size;
8487 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
8488
8489@@ -1240,6 +1244,7 @@ static int __init crash_notes_memory_ini
8490 module_init(crash_notes_memory_init)
8491
8492
8493+#ifndef CONFIG_XEN
8494 /*
8495 * parsing the "crashkernel" commandline
8496 *
8497@@ -1402,7 +1407,7 @@ int __init parse_crashkernel(char *cm
8498
8499 return 0;
8500 }
8501-
8502+#endif
8503
8504
8505 void crash_save_vmcoreinfo(void)
8506@@ -1459,7 +1464,18 @@ static int __init crash_save_vmcoreinfo_
8507
8508 VMCOREINFO_SYMBOL(init_uts_ns);
8509 VMCOREINFO_SYMBOL(node_online_map);
8510+#ifndef CONFIG_X86_XEN
8511 VMCOREINFO_SYMBOL(swapper_pg_dir);
8512+#else
8513+/*
8514+ * Since for x86-32 Xen swapper_pg_dir is a pointer rather than an array,
8515+ * make the value stored consistent with native (i.e. the base address of
8516+ * the page directory).
8517+ */
8518+# define swapper_pg_dir *swapper_pg_dir
8519+ VMCOREINFO_SYMBOL(swapper_pg_dir);
8520+# undef swapper_pg_dir
8521+#endif
8522 VMCOREINFO_SYMBOL(_stext);
8523
8524 #ifndef CONFIG_NEED_MULTIPLE_NODES
82094b55
AF
8525--- sle11-2009-10-16.orig/kernel/sysctl_check.c 2009-10-28 14:55:06.000000000 +0100
8526+++ sle11-2009-10-16/kernel/sysctl_check.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8527@@ -4,6 +4,7 @@
8528 #include <linux/sunrpc/debug.h>
8529 #include <linux/string.h>
8530 #include <net/ip_vs.h>
8531+#include <xen/sysctl.h>
8532
8533 struct trans_ctl_table {
8534 int ctl_name;
8535@@ -897,6 +898,14 @@ static const struct trans_ctl_table tran
8536 {}
8537 };
8538
8539+#ifdef CONFIG_XEN
8540+static struct trans_ctl_table trans_xen_table[] = {
8541+ { CTL_XEN_INDEPENDENT_WALLCLOCK, "independent_wallclock" },
8542+ { CTL_XEN_PERMITTED_CLOCK_JITTER, "permitted_clock_jitter" },
8543+ {}
8544+};
8545+#endif
8546+
8547 static const struct trans_ctl_table trans_arlan_conf_table0[] = {
8548 { 1, "spreadingCode" },
8549 { 2, "channelNumber" },
8550@@ -1232,6 +1241,9 @@ static const struct trans_ctl_table tran
8551 { CTL_BUS, "bus", trans_bus_table },
8552 { CTL_ABI, "abi" },
8553 /* CTL_CPU not used */
8554+#ifdef CONFIG_XEN
8555+ { CTL_XEN, "xen", trans_xen_table },
8556+#endif
8557 { CTL_ARLAN, "arlan", trans_arlan_table },
8558 { CTL_S390DBF, "s390dbf", trans_s390dbf_table },
8559 { CTL_SUNRPC, "sunrpc", trans_sunrpc_table },
82094b55
AF
8560--- sle11-2009-10-16.orig/lib/swiotlb-xen.c 2009-02-05 11:16:51.000000000 +0100
8561+++ sle11-2009-10-16/lib/swiotlb-xen.c 2009-02-16 16:18:36.000000000 +0100
2cb7cef9
BS
8562@@ -27,7 +27,7 @@
8563 #include <asm/uaccess.h>
8564 #include <xen/gnttab.h>
8565 #include <xen/interface/memory.h>
8566-#include <asm-i386/mach-xen/asm/gnttab_dma.h>
8567+#include <asm/gnttab_dma.h>
8568
8569 int swiotlb;
8570 EXPORT_SYMBOL(swiotlb);
8571@@ -574,9 +574,10 @@ swiotlb_sync_single_for_device(struct de
8572 * same here.
8573 */
8574 int
8575-swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
8576+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
8577 int dir)
8578 {
8579+ struct scatterlist *sg;
8580 struct phys_addr buffer;
8581 dma_addr_t dev_addr;
8582 char *map;
8583@@ -584,22 +585,22 @@ swiotlb_map_sg(struct device *hwdev, str
8584
8585 BUG_ON(dir == DMA_NONE);
8586
8587- for (i = 0; i < nelems; i++, sg++) {
8588- dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
8589+ for_each_sg(sgl, sg, nelems, i) {
8590+ dev_addr = gnttab_dma_map_page(sg_page(sg)) + sg->offset;
8591
8592- if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
8593+ if (range_straddles_page_boundary(page_to_pseudophys(sg_page(sg))
8594 + sg->offset, sg->length)
8595 || address_needs_mapping(hwdev, dev_addr)) {
8596 gnttab_dma_unmap_page(dev_addr);
8597- buffer.page = sg->page;
8598+ buffer.page = sg_page(sg);
8599 buffer.offset = sg->offset;
8600 map = map_single(hwdev, buffer, sg->length, dir);
8601 if (!map) {
8602 /* Don't panic here, we expect map_sg users
8603 to do proper error handling. */
8604 swiotlb_full(hwdev, sg->length, dir, 0);
8605- swiotlb_unmap_sg(hwdev, sg - i, i, dir);
8606- sg[0].dma_length = 0;
8607+ swiotlb_unmap_sg(hwdev, sgl, i, dir);
8608+ sgl[0].dma_length = 0;
8609 return 0;
8610 }
8611 sg->dma_address = virt_to_bus(map);
8612@@ -615,19 +616,21 @@ swiotlb_map_sg(struct device *hwdev, str
8613 * concerning calls here are the same as for swiotlb_unmap_single() above.
8614 */
8615 void
8616-swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
8617+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
8618 int dir)
8619 {
8620+ struct scatterlist *sg;
8621 int i;
8622
8623 BUG_ON(dir == DMA_NONE);
8624
8625- for (i = 0; i < nelems; i++, sg++)
8626+ for_each_sg(sgl, sg, nelems, i) {
8627 if (in_swiotlb_aperture(sg->dma_address))
8628 unmap_single(hwdev, bus_to_virt(sg->dma_address),
8629 sg->dma_length, dir);
8630 else
8631 gnttab_dma_unmap_page(sg->dma_address);
8632+ }
8633 }
8634
8635 /*
8636@@ -638,31 +641,35 @@ swiotlb_unmap_sg(struct device *hwdev, s
8637 * and usage.
8638 */
8639 void
8640-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
8641+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sgl,
8642 int nelems, int dir)
8643 {
8644+ struct scatterlist *sg;
8645 int i;
8646
8647 BUG_ON(dir == DMA_NONE);
8648
8649- for (i = 0; i < nelems; i++, sg++)
8650+ for_each_sg(sgl, sg, nelems, i) {
8651 if (in_swiotlb_aperture(sg->dma_address))
8652 sync_single(hwdev, bus_to_virt(sg->dma_address),
8653 sg->dma_length, dir);
8654+ }
8655 }
8656
8657 void
8658-swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
8659+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sgl,
8660 int nelems, int dir)
8661 {
8662+ struct scatterlist *sg;
8663 int i;
8664
8665 BUG_ON(dir == DMA_NONE);
8666
8667- for (i = 0; i < nelems; i++, sg++)
8668+ for_each_sg(sgl, sg, nelems, i) {
8669 if (in_swiotlb_aperture(sg->dma_address))
8670 sync_single(hwdev, bus_to_virt(sg->dma_address),
8671 sg->dma_length, dir);
8672+ }
8673 }
8674
8675 #ifdef CONFIG_HIGHMEM