]> git.ipfire.org Git - people/teissler/ipfire-2.x.git/blame - src/patches/suse-2.6.27.31/patches.xen/xen3-auto-xen-arch.diff
Add a patch to fix Intel E100 wake-on-lan problems.
[people/teissler/ipfire-2.x.git] / src / patches / suse-2.6.27.31 / patches.xen / xen3-auto-xen-arch.diff
CommitLineData
6a930a95
BS
1Subject: xen3 xen-arch
2From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
3Patch-mainline: obsolete
4Acked-by: jbeulich@novell.com
5
6List of files having Xen derivates (perhaps created during the merging
7of newer kernel versions), for xen-port-patches.py to pick up (i.e. this
8must be retained here until the XenSource tree has these in the right
9places):
10+++ linux/arch/x86/kernel/acpi/sleep-xen.c
11+++ linux/arch/x86/kernel/cpu/common_64-xen.c
12+++ linux/arch/x86/kernel/e820-xen.c
13+++ linux/arch/x86/kernel/head-xen.c
14+++ linux/arch/x86/kernel/head32-xen.c
15+++ linux/arch/x86/kernel/ioport-xen.c
16+++ linux/arch/x86/kernel/ipi-xen.c
17+++ linux/arch/x86/kernel/ldt-xen.c
18+++ linux/arch/x86/kernel/mpparse-xen.c
19+++ linux/arch/x86/kernel/pci-nommu-xen.c
20+++ linux/arch/x86/kernel/process-xen.c
21+++ linux/arch/x86/kernel/setup-xen.c
22+++ linux/arch/x86/kernel/setup_percpu-xen.c
23+++ linux/arch/x86/kernel/smp-xen.c
24+++ linux/arch/x86/mm/fault-xen.c
25+++ linux/arch/x86/mm/ioremap-xen.c
26+++ linux/arch/x86/mm/pageattr-xen.c
27+++ linux/arch/x86/mm/pat-xen.c
28+++ linux/arch/x86/mm/pgtable-xen.c
29+++ linux/arch/x86/vdso/vdso32-setup-xen.c
30+++ linux/drivers/char/mem-xen.c
31+++ linux/include/asm-x86/mach-xen/asm/desc.h
32+++ linux/include/asm-x86/mach-xen/asm/dma-mapping.h
33+++ linux/include/asm-x86/mach-xen/asm/fixmap.h
34+++ linux/include/asm-x86/mach-xen/asm/io.h
35+++ linux/include/asm-x86/mach-xen/asm/irq_vectors.h
36+++ linux/include/asm-x86/mach-xen/asm/irqflags.h
37+++ linux/include/asm-x86/mach-xen/asm/mmu_context.h
38+++ linux/include/asm-x86/mach-xen/asm/page.h
39+++ linux/include/asm-x86/mach-xen/asm/pci.h
40+++ linux/include/asm-x86/mach-xen/asm/pgalloc.h
41+++ linux/include/asm-x86/mach-xen/asm/pgtable.h
42+++ linux/include/asm-x86/mach-xen/asm/processor.h
43+++ linux/include/asm-x86/mach-xen/asm/segment.h
44+++ linux/include/asm-x86/mach-xen/asm/smp.h
45+++ linux/include/asm-x86/mach-xen/asm/spinlock.h
46+++ linux/include/asm-x86/mach-xen/asm/swiotlb.h
47+++ linux/include/asm-x86/mach-xen/asm/system.h
48+++ linux/include/asm-x86/mach-xen/asm/tlbflush.h
49+++ linux/include/asm-x86/mach-xen/asm/xor.h
50
51List of files folded into their native counterparts (and hence removed
52from this patch for xen-port-patches.py to not needlessly pick them up;
53for reference, prefixed with the version the removal occured):
542.6.18/include/asm-x86/mach-xen/asm/pgtable-2level.h
552.6.18/include/asm-x86/mach-xen/asm/pgtable-2level-defs.h
562.6.19/include/asm-x86/mach-xen/asm/ptrace.h
572.6.23/arch/x86/kernel/vsyscall-note_32-xen.S
582.6.23/include/asm-x86/mach-xen/asm/ptrace_64.h
592.6.24/arch/x86/kernel/early_printk_32-xen.c
602.6.24/include/asm-x86/mach-xen/asm/arch_hooks_64.h
612.6.24/include/asm-x86/mach-xen/asm/bootsetup_64.h
622.6.24/include/asm-x86/mach-xen/asm/mmu_32.h
632.6.24/include/asm-x86/mach-xen/asm/mmu_64.h
642.6.24/include/asm-x86/mach-xen/asm/nmi_64.h
652.6.24/include/asm-x86/mach-xen/asm/setup.h
662.6.24/include/asm-x86/mach-xen/asm/time_64.h (added in 2.6.20)
672.6.24/include/asm-x86/mach-xen/mach_timer.h
682.6.25/arch/x86/ia32/syscall32-xen.c
692.6.25/arch/x86/ia32/syscall32_syscall-xen.S
702.6.25/arch/x86/ia32/vsyscall-int80.S
712.6.25/arch/x86/kernel/acpi/boot-xen.c
722.6.25/include/asm-x86/mach-xen/asm/msr.h
732.6.25/include/asm-x86/mach-xen/asm/page_32.h
742.6.25/include/asm-x86/mach-xen/asm/spinlock_32.h
752.6.25/include/asm-x86/mach-xen/asm/timer.h (added in 2.6.24)
762.6.25/include/asm-x86/mach-xen/asm/timer_64.h
772.6.25/include/asm-x86/mach-xen/mach_time.h
782.6.26/arch/x86/kernel/pci-dma_32-xen.c
792.6.26/arch/x86/kernel/pci-swiotlb_64-xen.c
802.6.26/include/asm-x86/mach-xen/asm/dma-mapping_32.h
812.6.26/include/asm-x86/mach-xen/asm/dma-mapping_64.h
822.6.26/include/asm-x86/mach-xen/asm/nmi.h (added in 2.6.24)
832.6.26/include/asm-x86/mach-xen/asm/scatterlist.h (added in 2.6.24)
842.6.26/include/asm-x86/mach-xen/asm/scatterlist_32.h
852.6.26/include/xen/xencomm.h
862.6.27/arch/x86/kernel/e820_32-xen.c
872.6.27/include/asm-x86/mach-xen/asm/e820.h (added in 2.6.24)
882.6.27/include/asm-x86/mach-xen/asm/e820_64.h
892.6.27/include/asm-x86/mach-xen/asm/hw_irq.h (added in 2.6.24)
902.6.27/include/asm-x86/mach-xen/asm/hw_irq_32.h
912.6.27/include/asm-x86/mach-xen/asm/hw_irq_64.h
922.6.27/include/asm-x86/mach-xen/asm/io_32.h
932.6.27/include/asm-x86/mach-xen/asm/io_64.h
942.6.27/include/asm-x86/mach-xen/asm/irq.h (added in 2.6.24)
952.6.27/include/asm-x86/mach-xen/asm/irq_64.h
962.6.27.8/include/asm-x86/mach-xen/asm/pci_64.h
97
98Index: head-2008-11-25/arch/x86/kernel/acpi/processor_extcntl_xen.c
99===================================================================
100--- /dev/null 1970-01-01 00:00:00.000000000 +0000
101+++ head-2008-11-25/arch/x86/kernel/acpi/processor_extcntl_xen.c 2008-10-01 15:43:24.000000000 +0200
102@@ -0,0 +1,209 @@
103+/*
104+ * processor_extcntl_xen.c - interface to notify Xen
105+ *
106+ * Copyright (C) 2008, Intel corporation
107+ *
108+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
109+ *
110+ * This program is free software; you can redistribute it and/or modify
111+ * it under the terms of the GNU General Public License as published by
112+ * the Free Software Foundation; either version 2 of the License, or (at
113+ * your option) any later version.
114+ *
115+ * This program is distributed in the hope that it will be useful, but
116+ * WITHOUT ANY WARRANTY; without even the implied warranty of
117+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
118+ * General Public License for more details.
119+ *
120+ * You should have received a copy of the GNU General Public License along
121+ * with this program; if not, write to the Free Software Foundation, Inc.,
122+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
123+ *
124+ */
125+
126+#include <linux/kernel.h>
127+#include <linux/init.h>
128+#include <linux/types.h>
129+#include <linux/acpi.h>
130+#include <linux/pm.h>
131+#include <linux/cpu.h>
132+
133+#include <linux/cpufreq.h>
134+#include <acpi/processor.h>
135+#include <asm/hypercall.h>
136+
137+static int xen_cx_notifier(struct acpi_processor *pr, int action)
138+{
139+ int ret, count = 0, i;
140+ xen_platform_op_t op = {
141+ .cmd = XENPF_set_processor_pminfo,
142+ .interface_version = XENPF_INTERFACE_VERSION,
143+ .u.set_pminfo.id = pr->acpi_id,
144+ .u.set_pminfo.type = XEN_PM_CX,
145+ };
146+ struct xen_processor_cx *data, *buf;
147+ struct acpi_processor_cx *cx;
148+
149+ if (action == PROCESSOR_PM_CHANGE)
150+ return -EINVAL;
151+
152+ /* Convert to Xen defined structure and hypercall */
153+ buf = kzalloc(pr->power.count * sizeof(struct xen_processor_cx),
154+ GFP_KERNEL);
155+ if (!buf)
156+ return -ENOMEM;
157+
158+ data = buf;
159+ for (i = 1; i <= pr->power.count; i++) {
160+ cx = &pr->power.states[i];
161+ /* Skip invalid cstate entry */
162+ if (!cx->valid)
163+ continue;
164+
165+ data->type = cx->type;
166+ data->latency = cx->latency;
167+ data->power = cx->power;
168+ data->reg.space_id = cx->reg.space_id;
169+ data->reg.bit_width = cx->reg.bit_width;
170+ data->reg.bit_offset = cx->reg.bit_offset;
171+ data->reg.access_size = cx->reg.reserved;
172+ data->reg.address = cx->reg.address;
173+
174+ /* Get dependency relationships */
175+ if (cx->csd_count) {
176+ printk("Wow! _CSD is found. Not support for now!\n");
177+ kfree(buf);
178+ return -EINVAL;
179+ } else {
180+ data->dpcnt = 0;
181+ set_xen_guest_handle(data->dp, NULL);
182+ }
183+
184+ data++;
185+ count++;
186+ }
187+
188+ if (!count) {
189+ printk("No available Cx info for cpu %d\n", pr->acpi_id);
190+ kfree(buf);
191+ return -EINVAL;
192+ }
193+
194+ op.u.set_pminfo.power.count = count;
195+ op.u.set_pminfo.power.flags.bm_control = pr->flags.bm_control;
196+ op.u.set_pminfo.power.flags.bm_check = pr->flags.bm_check;
197+ op.u.set_pminfo.power.flags.has_cst = pr->flags.has_cst;
198+ op.u.set_pminfo.power.flags.power_setup_done = pr->flags.power_setup_done;
199+
200+ set_xen_guest_handle(op.u.set_pminfo.power.states, buf);
201+ ret = HYPERVISOR_platform_op(&op);
202+ kfree(buf);
203+ return ret;
204+}
205+
206+static int xen_px_notifier(struct acpi_processor *pr, int action)
207+{
208+ int ret = -EINVAL;
209+ xen_platform_op_t op = {
210+ .cmd = XENPF_set_processor_pminfo,
211+ .interface_version = XENPF_INTERFACE_VERSION,
212+ .u.set_pminfo.id = pr->acpi_id,
213+ .u.set_pminfo.type = XEN_PM_PX,
214+ };
215+ struct xen_processor_performance *perf;
216+ struct xen_processor_px *states = NULL;
217+ struct acpi_processor_performance *px;
218+ struct acpi_psd_package *pdomain;
219+
220+ if (!pr)
221+ return -EINVAL;
222+
223+ perf = &op.u.set_pminfo.perf;
224+ px = pr->performance;
225+
226+ switch(action) {
227+ case PROCESSOR_PM_CHANGE:
228+ /* ppc dynamic handle */
229+ perf->flags = XEN_PX_PPC;
230+ perf->platform_limit = pr->performance_platform_limit;
231+
232+ ret = HYPERVISOR_platform_op(&op);
233+ break;
234+
235+ case PROCESSOR_PM_INIT:
236+ /* px normal init */
237+ perf->flags = XEN_PX_PPC |
238+ XEN_PX_PCT |
239+ XEN_PX_PSS |
240+ XEN_PX_PSD;
241+
242+ /* ppc */
243+ perf->platform_limit = pr->performance_platform_limit;
244+
245+ /* pct */
246+ xen_convert_pct_reg(&perf->control_register, &px->control_register);
247+ xen_convert_pct_reg(&perf->status_register, &px->status_register);
248+
249+ /* pss */
250+ perf->state_count = px->state_count;
251+ states = kzalloc(px->state_count*sizeof(xen_processor_px_t),GFP_KERNEL);
252+ if (!states)
253+ return -ENOMEM;
254+ xen_convert_pss_states(states, px->states, px->state_count);
255+ set_xen_guest_handle(perf->states, states);
256+
257+ /* psd */
258+ pdomain = &px->domain_info;
259+ xen_convert_psd_pack(&perf->domain_info, pdomain);
260+ if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
261+ perf->shared_type = CPUFREQ_SHARED_TYPE_ALL;
262+ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
263+ perf->shared_type = CPUFREQ_SHARED_TYPE_ANY;
264+ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
265+ perf->shared_type = CPUFREQ_SHARED_TYPE_HW;
266+ else {
267+ ret = -ENODEV;
268+ kfree(states);
269+ break;
270+ }
271+
272+ ret = HYPERVISOR_platform_op(&op);
273+ kfree(states);
274+ break;
275+
276+ default:
277+ break;
278+ }
279+
280+ return ret;
281+}
282+
283+static int xen_tx_notifier(struct acpi_processor *pr, int action)
284+{
285+ return -EINVAL;
286+}
287+static int xen_hotplug_notifier(struct acpi_processor *pr, int event)
288+{
289+ return -EINVAL;
290+}
291+
292+static struct processor_extcntl_ops xen_extcntl_ops = {
293+ .hotplug = xen_hotplug_notifier,
294+};
295+
296+void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **ops)
297+{
298+ unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
299+
300+ if (!pmbits)
301+ return;
302+ if (pmbits & XEN_PROCESSOR_PM_CX)
303+ xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier;
304+ if (pmbits & XEN_PROCESSOR_PM_PX)
305+ xen_extcntl_ops.pm_ops[PM_TYPE_PERF] = xen_px_notifier;
306+ if (pmbits & XEN_PROCESSOR_PM_TX)
307+ xen_extcntl_ops.pm_ops[PM_TYPE_THR] = xen_tx_notifier;
308+
309+ *ops = &xen_extcntl_ops;
310+}
311+EXPORT_SYMBOL(arch_acpi_processor_init_extcntl);
312Index: head-2008-11-25/arch/x86/kernel/acpi/sleep_32-xen.c
313===================================================================
314--- /dev/null 1970-01-01 00:00:00.000000000 +0000
315+++ head-2008-11-25/arch/x86/kernel/acpi/sleep_32-xen.c 2008-04-15 09:29:41.000000000 +0200
316@@ -0,0 +1,113 @@
317+/*
318+ * sleep.c - x86-specific ACPI sleep support.
319+ *
320+ * Copyright (C) 2001-2003 Patrick Mochel
321+ * Copyright (C) 2001-2003 Pavel Machek <pavel@suse.cz>
322+ */
323+
324+#include <linux/acpi.h>
325+#include <linux/bootmem.h>
326+#include <linux/dmi.h>
327+#include <linux/cpumask.h>
328+
329+#include <asm/smp.h>
330+
331+#ifndef CONFIG_ACPI_PV_SLEEP
332+/* address in low memory of the wakeup routine. */
333+unsigned long acpi_wakeup_address = 0;
334+unsigned long acpi_video_flags;
335+extern char wakeup_start, wakeup_end;
336+
337+extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
338+#endif
339+
340+/**
341+ * acpi_save_state_mem - save kernel state
342+ *
343+ * Create an identity mapped page table and copy the wakeup routine to
344+ * low memory.
345+ */
346+int acpi_save_state_mem(void)
347+{
348+#ifndef CONFIG_ACPI_PV_SLEEP
349+ if (!acpi_wakeup_address)
350+ return 1;
351+ memcpy((void *)acpi_wakeup_address, &wakeup_start,
352+ &wakeup_end - &wakeup_start);
353+ acpi_copy_wakeup_routine(acpi_wakeup_address);
354+#endif
355+ return 0;
356+}
357+
358+/*
359+ * acpi_restore_state - undo effects of acpi_save_state_mem
360+ */
361+void acpi_restore_state_mem(void)
362+{
363+}
364+
365+/**
366+ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
367+ *
368+ * We allocate a page from the first 1MB of memory for the wakeup
369+ * routine for when we come back from a sleep state. The
370+ * runtime allocator allows specification of <16MB pages, but not
371+ * <1MB pages.
372+ */
373+void __init acpi_reserve_bootmem(void)
374+{
375+#ifndef CONFIG_ACPI_PV_SLEEP
376+ if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) {
377+ printk(KERN_ERR
378+ "ACPI: Wakeup code way too big, S3 disabled.\n");
379+ return;
380+ }
381+
382+ acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
383+ if (!acpi_wakeup_address)
384+ printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
385+#endif
386+}
387+
388+#ifndef CONFIG_ACPI_PV_SLEEP
389+static int __init acpi_sleep_setup(char *str)
390+{
391+ while ((str != NULL) && (*str != '\0')) {
392+ if (strncmp(str, "s3_bios", 7) == 0)
393+ acpi_video_flags = 1;
394+ if (strncmp(str, "s3_mode", 7) == 0)
395+ acpi_video_flags |= 2;
396+ str = strchr(str, ',');
397+ if (str != NULL)
398+ str += strspn(str, ", \t");
399+ }
400+ return 1;
401+}
402+
403+__setup("acpi_sleep=", acpi_sleep_setup);
404+
405+static __init int reset_videomode_after_s3(struct dmi_system_id *d)
406+{
407+ acpi_video_flags |= 2;
408+ return 0;
409+}
410+
411+static __initdata struct dmi_system_id acpisleep_dmi_table[] = {
412+ { /* Reset video mode after returning from ACPI S3 sleep */
413+ .callback = reset_videomode_after_s3,
414+ .ident = "Toshiba Satellite 4030cdt",
415+ .matches = {
416+ DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
417+ },
418+ },
419+ {}
420+};
421+
422+static int __init acpisleep_dmi_init(void)
423+{
424+ dmi_check_system(acpisleep_dmi_table);
425+ return 0;
426+}
427+
428+core_initcall(acpisleep_dmi_init);
429+#endif /* CONFIG_ACPI_PV_SLEEP */
430Index: head-2008-11-25/arch/x86/kernel/apic_32-xen.c
431===================================================================
432--- /dev/null 1970-01-01 00:00:00.000000000 +0000
433+++ head-2008-11-25/arch/x86/kernel/apic_32-xen.c 2007-06-12 13:12:48.000000000 +0200
434@@ -0,0 +1,155 @@
435+/*
436+ * Local APIC handling, local APIC timers
437+ *
438+ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
439+ *
440+ * Fixes
441+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
442+ * thanks to Eric Gilmore
443+ * and Rolf G. Tews
444+ * for testing these extensively.
445+ * Maciej W. Rozycki : Various updates and fixes.
446+ * Mikael Pettersson : Power Management for UP-APIC.
447+ * Pavel Machek and
448+ * Mikael Pettersson : PM converted to driver model.
449+ */
450+
451+#include <linux/init.h>
452+
453+#include <linux/mm.h>
454+#include <linux/delay.h>
455+#include <linux/bootmem.h>
456+#include <linux/smp_lock.h>
457+#include <linux/interrupt.h>
458+#include <linux/mc146818rtc.h>
459+#include <linux/kernel_stat.h>
460+#include <linux/sysdev.h>
461+#include <linux/cpu.h>
462+#include <linux/module.h>
463+
464+#include <asm/atomic.h>
465+#include <asm/smp.h>
466+#include <asm/mtrr.h>
467+#include <asm/mpspec.h>
468+#include <asm/desc.h>
469+#include <asm/arch_hooks.h>
470+#include <asm/hpet.h>
471+#include <asm/i8253.h>
472+#include <asm/nmi.h>
473+
474+#include <mach_apic.h>
475+#include <mach_apicdef.h>
476+#include <mach_ipi.h>
477+
478+#include "io_ports.h"
479+
480+#ifndef CONFIG_XEN
481+/*
482+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
483+ * IPIs in place of local APIC timers
484+ */
485+static cpumask_t timer_bcast_ipi;
486+#endif
487+
488+/*
489+ * Knob to control our willingness to enable the local APIC.
490+ */
491+int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
492+
493+/*
494+ * Debug level
495+ */
496+int apic_verbosity;
497+
498+#ifndef CONFIG_XEN
499+static int modern_apic(void)
500+{
501+ unsigned int lvr, version;
502+ /* AMD systems use old APIC versions, so check the CPU */
503+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
504+ boot_cpu_data.x86 >= 0xf)
505+ return 1;
506+ lvr = apic_read(APIC_LVR);
507+ version = GET_APIC_VERSION(lvr);
508+ return version >= 0x14;
509+}
510+#endif /* !CONFIG_XEN */
511+
512+/*
513+ * 'what should we do if we get a hw irq event on an illegal vector'.
514+ * each architecture has to answer this themselves.
515+ */
516+void ack_bad_irq(unsigned int irq)
517+{
518+ printk("unexpected IRQ trap at vector %02x\n", irq);
519+ /*
520+ * Currently unexpected vectors happen only on SMP and APIC.
521+ * We _must_ ack these because every local APIC has only N
522+ * irq slots per priority level, and a 'hanging, unacked' IRQ
523+ * holds up an irq slot - in excessive cases (when multiple
524+ * unexpected vectors occur) that might lock up the APIC
525+ * completely.
526+ * But only ack when the APIC is enabled -AK
527+ */
528+ if (cpu_has_apic)
529+ ack_APIC_irq();
530+}
531+
532+int get_physical_broadcast(void)
533+{
534+ return 0xff;
535+}
536+
537+#ifndef CONFIG_XEN
538+#ifndef CONFIG_SMP
539+static void up_apic_timer_interrupt_call(struct pt_regs *regs)
540+{
541+ int cpu = smp_processor_id();
542+
543+ /*
544+ * the NMI deadlock-detector uses this.
545+ */
546+ per_cpu(irq_stat, cpu).apic_timer_irqs++;
547+
548+ smp_local_timer_interrupt(regs);
549+}
550+#endif
551+
552+void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
553+{
554+ cpumask_t mask;
555+
556+ cpus_and(mask, cpu_online_map, timer_bcast_ipi);
557+ if (!cpus_empty(mask)) {
558+#ifdef CONFIG_SMP
559+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
560+#else
561+ /*
562+ * We can directly call the apic timer interrupt handler
563+ * in UP case. Minus all irq related functions
564+ */
565+ up_apic_timer_interrupt_call(regs);
566+#endif
567+ }
568+}
569+#endif
570+
571+int setup_profiling_timer(unsigned int multiplier)
572+{
573+ return -EINVAL;
574+}
575+
576+/*
577+ * This initializes the IO-APIC and APIC hardware if this is
578+ * a UP kernel.
579+ */
580+int __init APIC_init_uniprocessor (void)
581+{
582+#ifdef CONFIG_X86_IO_APIC
583+ if (smp_found_config)
584+ if (!skip_ioapic_setup && nr_ioapics)
585+ setup_IO_APIC();
586+#endif
587+
588+ return 0;
589+}
590Index: head-2008-11-25/arch/x86/kernel/cpu/common-xen.c
591===================================================================
592--- /dev/null 1970-01-01 00:00:00.000000000 +0000
593+++ head-2008-11-25/arch/x86/kernel/cpu/common-xen.c 2007-12-10 08:47:31.000000000 +0100
594@@ -0,0 +1,743 @@
595+#include <linux/init.h>
596+#include <linux/string.h>
597+#include <linux/delay.h>
598+#include <linux/smp.h>
599+#include <linux/module.h>
600+#include <linux/percpu.h>
601+#include <linux/bootmem.h>
602+#include <asm/semaphore.h>
603+#include <asm/processor.h>
604+#include <asm/i387.h>
605+#include <asm/msr.h>
606+#include <asm/io.h>
607+#include <asm/mmu_context.h>
608+#include <asm/mtrr.h>
609+#include <asm/mce.h>
610+#ifdef CONFIG_X86_LOCAL_APIC
611+#include <asm/mpspec.h>
612+#include <asm/apic.h>
613+#include <mach_apic.h>
614+#else
615+#ifdef CONFIG_XEN
616+#define phys_pkg_id(a,b) a
617+#endif
618+#endif
619+#include <asm/hypervisor.h>
620+
621+#include "cpu.h"
622+
623+DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
624+EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
625+
626+#ifndef CONFIG_XEN
627+DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
628+EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
629+#endif
630+
631+static int cachesize_override __cpuinitdata = -1;
632+static int disable_x86_fxsr __cpuinitdata;
633+static int disable_x86_serial_nr __cpuinitdata = 1;
634+static int disable_x86_sep __cpuinitdata;
635+
636+struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
637+
638+extern int disable_pse;
639+
640+static void default_init(struct cpuinfo_x86 * c)
641+{
642+ /* Not much we can do here... */
643+ /* Check if at least it has cpuid */
644+ if (c->cpuid_level == -1) {
645+ /* No cpuid. It must be an ancient CPU */
646+ if (c->x86 == 4)
647+ strcpy(c->x86_model_id, "486");
648+ else if (c->x86 == 3)
649+ strcpy(c->x86_model_id, "386");
650+ }
651+}
652+
653+static struct cpu_dev default_cpu = {
654+ .c_init = default_init,
655+ .c_vendor = "Unknown",
656+};
657+static struct cpu_dev * this_cpu = &default_cpu;
658+
659+static int __init cachesize_setup(char *str)
660+{
661+ get_option (&str, &cachesize_override);
662+ return 1;
663+}
664+__setup("cachesize=", cachesize_setup);
665+
666+int __cpuinit get_model_name(struct cpuinfo_x86 *c)
667+{
668+ unsigned int *v;
669+ char *p, *q;
670+
671+ if (cpuid_eax(0x80000000) < 0x80000004)
672+ return 0;
673+
674+ v = (unsigned int *) c->x86_model_id;
675+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
676+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
677+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
678+ c->x86_model_id[48] = 0;
679+
680+ /* Intel chips right-justify this string for some dumb reason;
681+ undo that brain damage */
682+ p = q = &c->x86_model_id[0];
683+ while ( *p == ' ' )
684+ p++;
685+ if ( p != q ) {
686+ while ( *p )
687+ *q++ = *p++;
688+ while ( q <= &c->x86_model_id[48] )
689+ *q++ = '\0'; /* Zero-pad the rest */
690+ }
691+
692+ return 1;
693+}
694+
695+
696+void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
697+{
698+ unsigned int n, dummy, ecx, edx, l2size;
699+
700+ n = cpuid_eax(0x80000000);
701+
702+ if (n >= 0x80000005) {
703+ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
704+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
705+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
706+ c->x86_cache_size=(ecx>>24)+(edx>>24);
707+ }
708+
709+ if (n < 0x80000006) /* Some chips just has a large L1. */
710+ return;
711+
712+ ecx = cpuid_ecx(0x80000006);
713+ l2size = ecx >> 16;
714+
715+ /* do processor-specific cache resizing */
716+ if (this_cpu->c_size_cache)
717+ l2size = this_cpu->c_size_cache(c,l2size);
718+
719+ /* Allow user to override all this if necessary. */
720+ if (cachesize_override != -1)
721+ l2size = cachesize_override;
722+
723+ if ( l2size == 0 )
724+ return; /* Again, no L2 cache is possible */
725+
726+ c->x86_cache_size = l2size;
727+
728+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
729+ l2size, ecx & 0xFF);
730+}
731+
732+/* Naming convention should be: <Name> [(<Codename>)] */
733+/* This table only is used unless init_<vendor>() below doesn't set it; */
734+/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
735+
736+/* Look up CPU names by table lookup. */
737+static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
738+{
739+ struct cpu_model_info *info;
740+
741+ if ( c->x86_model >= 16 )
742+ return NULL; /* Range check */
743+
744+ if (!this_cpu)
745+ return NULL;
746+
747+ info = this_cpu->c_models;
748+
749+ while (info && info->family) {
750+ if (info->family == c->x86)
751+ return info->model_names[c->x86_model];
752+ info++;
753+ }
754+ return NULL; /* Not found */
755+}
756+
757+
758+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
759+{
760+ char *v = c->x86_vendor_id;
761+ int i;
762+ static int printed;
763+
764+ for (i = 0; i < X86_VENDOR_NUM; i++) {
765+ if (cpu_devs[i]) {
766+ if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
767+ (cpu_devs[i]->c_ident[1] &&
768+ !strcmp(v,cpu_devs[i]->c_ident[1]))) {
769+ c->x86_vendor = i;
770+ if (!early)
771+ this_cpu = cpu_devs[i];
772+ return;
773+ }
774+ }
775+ }
776+ if (!printed) {
777+ printed++;
778+ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
779+ printk(KERN_ERR "CPU: Your system may be unstable.\n");
780+ }
781+ c->x86_vendor = X86_VENDOR_UNKNOWN;
782+ this_cpu = &default_cpu;
783+}
784+
785+
786+static int __init x86_fxsr_setup(char * s)
787+{
788+ disable_x86_fxsr = 1;
789+ return 1;
790+}
791+__setup("nofxsr", x86_fxsr_setup);
792+
793+
794+static int __init x86_sep_setup(char * s)
795+{
796+ disable_x86_sep = 1;
797+ return 1;
798+}
799+__setup("nosep", x86_sep_setup);
800+
801+
802+/* Standard macro to see if a specific flag is changeable */
803+static inline int flag_is_changeable_p(u32 flag)
804+{
805+ u32 f1, f2;
806+
807+ asm("pushfl\n\t"
808+ "pushfl\n\t"
809+ "popl %0\n\t"
810+ "movl %0,%1\n\t"
811+ "xorl %2,%0\n\t"
812+ "pushl %0\n\t"
813+ "popfl\n\t"
814+ "pushfl\n\t"
815+ "popl %0\n\t"
816+ "popfl\n\t"
817+ : "=&r" (f1), "=&r" (f2)
818+ : "ir" (flag));
819+
820+ return ((f1^f2) & flag) != 0;
821+}
822+
823+
824+/* Probe for the CPUID instruction */
825+static int __cpuinit have_cpuid_p(void)
826+{
827+ return flag_is_changeable_p(X86_EFLAGS_ID);
828+}
829+
830+/* Do minimum CPU detection early.
831+ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
832+ The others are not touched to avoid unwanted side effects.
833+
834+ WARNING: this function is only called on the BP. Don't add code here
835+ that is supposed to run on all CPUs. */
836+static void __init early_cpu_detect(void)
837+{
838+ struct cpuinfo_x86 *c = &boot_cpu_data;
839+
840+ c->x86_cache_alignment = 32;
841+
842+ if (!have_cpuid_p())
843+ return;
844+
845+ /* Get vendor name */
846+ cpuid(0x00000000, &c->cpuid_level,
847+ (int *)&c->x86_vendor_id[0],
848+ (int *)&c->x86_vendor_id[8],
849+ (int *)&c->x86_vendor_id[4]);
850+
851+ get_cpu_vendor(c, 1);
852+
853+ c->x86 = 4;
854+ if (c->cpuid_level >= 0x00000001) {
855+ u32 junk, tfms, cap0, misc;
856+ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
857+ c->x86 = (tfms >> 8) & 15;
858+ c->x86_model = (tfms >> 4) & 15;
859+ if (c->x86 == 0xf)
860+ c->x86 += (tfms >> 20) & 0xff;
861+ if (c->x86 >= 0x6)
862+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
863+ c->x86_mask = tfms & 15;
864+ if (cap0 & (1<<19))
865+ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
866+ }
867+}
868+
869+void __cpuinit generic_identify(struct cpuinfo_x86 * c)
870+{
871+ u32 tfms, xlvl;
872+ int ebx;
873+
874+ if (have_cpuid_p()) {
875+ /* Get vendor name */
876+ cpuid(0x00000000, &c->cpuid_level,
877+ (int *)&c->x86_vendor_id[0],
878+ (int *)&c->x86_vendor_id[8],
879+ (int *)&c->x86_vendor_id[4]);
880+
881+ get_cpu_vendor(c, 0);
882+ /* Initialize the standard set of capabilities */
883+ /* Note that the vendor-specific code below might override */
884+
885+ /* Intel-defined flags: level 0x00000001 */
886+ if ( c->cpuid_level >= 0x00000001 ) {
887+ u32 capability, excap;
888+ cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
889+ c->x86_capability[0] = capability;
890+ c->x86_capability[4] = excap;
891+ c->x86 = (tfms >> 8) & 15;
892+ c->x86_model = (tfms >> 4) & 15;
893+ if (c->x86 == 0xf)
894+ c->x86 += (tfms >> 20) & 0xff;
895+ if (c->x86 >= 0x6)
896+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
897+ c->x86_mask = tfms & 15;
898+#ifdef CONFIG_X86_HT
899+ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
900+#else
901+ c->apicid = (ebx >> 24) & 0xFF;
902+#endif
903+ } else {
904+ /* Have CPUID level 0 only - unheard of */
905+ c->x86 = 4;
906+ }
907+
908+ /* AMD-defined flags: level 0x80000001 */
909+ xlvl = cpuid_eax(0x80000000);
910+ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
911+ if ( xlvl >= 0x80000001 ) {
912+ c->x86_capability[1] = cpuid_edx(0x80000001);
913+ c->x86_capability[6] = cpuid_ecx(0x80000001);
914+ }
915+ if ( xlvl >= 0x80000004 )
916+ get_model_name(c); /* Default name */
917+ }
918+ }
919+
920+ early_intel_workaround(c);
921+
922+#ifdef CONFIG_X86_HT
923+ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
924+#endif
925+}
926+
927+static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
928+{
929+ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
930+ /* Disable processor serial number */
931+ unsigned long lo,hi;
932+ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
933+ lo |= 0x200000;
934+ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
935+ printk(KERN_NOTICE "CPU serial number disabled.\n");
936+ clear_bit(X86_FEATURE_PN, c->x86_capability);
937+
938+ /* Disabling the serial number may affect the cpuid level */
939+ c->cpuid_level = cpuid_eax(0);
940+ }
941+}
942+
943+static int __init x86_serial_nr_setup(char *s)
944+{
945+ disable_x86_serial_nr = 0;
946+ return 1;
947+}
948+__setup("serialnumber", x86_serial_nr_setup);
949+
950+
951+
952+/*
953+ * This does the hard work of actually picking apart the CPU stuff...
954+ */
955+void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
956+{
957+ int i;
958+
959+ c->loops_per_jiffy = loops_per_jiffy;
960+ c->x86_cache_size = -1;
961+ c->x86_vendor = X86_VENDOR_UNKNOWN;
962+ c->cpuid_level = -1; /* CPUID not detected */
963+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
964+ c->x86_vendor_id[0] = '\0'; /* Unset */
965+ c->x86_model_id[0] = '\0'; /* Unset */
966+ c->x86_max_cores = 1;
967+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
968+
969+ if (!have_cpuid_p()) {
970+ /* First of all, decide if this is a 486 or higher */
971+ /* It's a 486 if we can modify the AC flag */
972+ if ( flag_is_changeable_p(X86_EFLAGS_AC) )
973+ c->x86 = 4;
974+ else
975+ c->x86 = 3;
976+ }
977+
978+ generic_identify(c);
979+
980+ printk(KERN_DEBUG "CPU: After generic identify, caps:");
981+ for (i = 0; i < NCAPINTS; i++)
982+ printk(" %08lx", c->x86_capability[i]);
983+ printk("\n");
984+
985+ if (this_cpu->c_identify) {
986+ this_cpu->c_identify(c);
987+
988+ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
989+ for (i = 0; i < NCAPINTS; i++)
990+ printk(" %08lx", c->x86_capability[i]);
991+ printk("\n");
992+ }
993+
994+ /*
995+ * Vendor-specific initialization. In this section we
996+ * canonicalize the feature flags, meaning if there are
997+ * features a certain CPU supports which CPUID doesn't
998+ * tell us, CPUID claiming incorrect flags, or other bugs,
999+ * we handle them here.
1000+ *
1001+ * At the end of this section, c->x86_capability better
1002+ * indicate the features this CPU genuinely supports!
1003+ */
1004+ if (this_cpu->c_init)
1005+ this_cpu->c_init(c);
1006+
1007+ /* Disable the PN if appropriate */
1008+ squash_the_stupid_serial_number(c);
1009+
1010+ /*
1011+ * The vendor-specific functions might have changed features. Now
1012+ * we do "generic changes."
1013+ */
1014+
1015+ /* TSC disabled? */
1016+ if ( tsc_disable )
1017+ clear_bit(X86_FEATURE_TSC, c->x86_capability);
1018+
1019+ /* FXSR disabled? */
1020+ if (disable_x86_fxsr) {
1021+ clear_bit(X86_FEATURE_FXSR, c->x86_capability);
1022+ clear_bit(X86_FEATURE_XMM, c->x86_capability);
1023+ }
1024+
1025+ /* SEP disabled? */
1026+ if (disable_x86_sep)
1027+ clear_bit(X86_FEATURE_SEP, c->x86_capability);
1028+
1029+ if (disable_pse)
1030+ clear_bit(X86_FEATURE_PSE, c->x86_capability);
1031+
1032+ /* If the model name is still unset, do table lookup. */
1033+ if ( !c->x86_model_id[0] ) {
1034+ char *p;
1035+ p = table_lookup_model(c);
1036+ if ( p )
1037+ strcpy(c->x86_model_id, p);
1038+ else
1039+ /* Last resort... */
1040+ sprintf(c->x86_model_id, "%02x/%02x",
1041+ c->x86, c->x86_model);
1042+ }
1043+
1044+ /* Now the feature flags better reflect actual CPU features! */
1045+
1046+ printk(KERN_DEBUG "CPU: After all inits, caps:");
1047+ for (i = 0; i < NCAPINTS; i++)
1048+ printk(" %08lx", c->x86_capability[i]);
1049+ printk("\n");
1050+
1051+ /*
1052+ * On SMP, boot_cpu_data holds the common feature set between
1053+ * all CPUs; so make sure that we indicate which features are
1054+ * common between the CPUs. The first time this routine gets
1055+ * executed, c == &boot_cpu_data.
1056+ */
1057+ if ( c != &boot_cpu_data ) {
1058+ /* AND the already accumulated flags with these */
1059+ for ( i = 0 ; i < NCAPINTS ; i++ )
1060+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1061+ }
1062+
1063+ /* Init Machine Check Exception if available. */
1064+ mcheck_init(c);
1065+
1066+ if (c == &boot_cpu_data)
1067+ sysenter_setup();
1068+ enable_sep_cpu();
1069+
1070+ if (c == &boot_cpu_data)
1071+ mtrr_bp_init();
1072+ else
1073+ mtrr_ap_init();
1074+}
1075+
1076+#ifdef CONFIG_X86_HT
1077+void __cpuinit detect_ht(struct cpuinfo_x86 *c)
1078+{
1079+ u32 eax, ebx, ecx, edx;
1080+ int index_msb, core_bits;
1081+
1082+ cpuid(1, &eax, &ebx, &ecx, &edx);
1083+
1084+ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
1085+ return;
1086+
1087+ smp_num_siblings = (ebx & 0xff0000) >> 16;
1088+
1089+ if (smp_num_siblings == 1) {
1090+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
1091+ } else if (smp_num_siblings > 1 ) {
1092+
1093+ if (smp_num_siblings > NR_CPUS) {
1094+ printk(KERN_WARNING "CPU: Unsupported number of the "
1095+ "siblings %d", smp_num_siblings);
1096+ smp_num_siblings = 1;
1097+ return;
1098+ }
1099+
1100+ index_msb = get_count_order(smp_num_siblings);
1101+ c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
1102+
1103+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
1104+ c->phys_proc_id);
1105+
1106+ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
1107+
1108+ index_msb = get_count_order(smp_num_siblings) ;
1109+
1110+ core_bits = get_count_order(c->x86_max_cores);
1111+
1112+ c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
1113+ ((1 << core_bits) - 1);
1114+
1115+ if (c->x86_max_cores > 1)
1116+ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
1117+ c->cpu_core_id);
1118+ }
1119+}
1120+#endif
1121+
1122+void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1123+{
1124+ char *vendor = NULL;
1125+
1126+ if (c->x86_vendor < X86_VENDOR_NUM)
1127+ vendor = this_cpu->c_vendor;
1128+ else if (c->cpuid_level >= 0)
1129+ vendor = c->x86_vendor_id;
1130+
1131+ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
1132+ printk("%s ", vendor);
1133+
1134+ if (!c->x86_model_id[0])
1135+ printk("%d86", c->x86);
1136+ else
1137+ printk("%s", c->x86_model_id);
1138+
1139+ if (c->x86_mask || c->cpuid_level >= 0)
1140+ printk(" stepping %02x\n", c->x86_mask);
1141+ else
1142+ printk("\n");
1143+}
1144+
1145+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
1146+
1147+/* This is hacky. :)
1148+ * We're emulating future behavior.
1149+ * In the future, the cpu-specific init functions will be called implicitly
1150+ * via the magic of initcalls.
1151+ * They will insert themselves into the cpu_devs structure.
1152+ * Then, when cpu_init() is called, we can just iterate over that array.
1153+ */
1154+
1155+extern int intel_cpu_init(void);
1156+extern int cyrix_init_cpu(void);
1157+extern int nsc_init_cpu(void);
1158+extern int amd_init_cpu(void);
1159+extern int centaur_init_cpu(void);
1160+extern int transmeta_init_cpu(void);
1161+extern int rise_init_cpu(void);
1162+extern int nexgen_init_cpu(void);
1163+extern int umc_init_cpu(void);
1164+
1165+void __init early_cpu_init(void)
1166+{
1167+ intel_cpu_init();
1168+ cyrix_init_cpu();
1169+ nsc_init_cpu();
1170+ amd_init_cpu();
1171+ centaur_init_cpu();
1172+ transmeta_init_cpu();
1173+ rise_init_cpu();
1174+ nexgen_init_cpu();
1175+ umc_init_cpu();
1176+ early_cpu_detect();
1177+
1178+#ifdef CONFIG_DEBUG_PAGEALLOC
1179+ /* pse is not compatible with on-the-fly unmapping,
1180+ * disable it even if the cpus claim to support it.
1181+ */
1182+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
1183+ disable_pse = 1;
1184+#endif
1185+}
1186+
1187+static void __cpuinit cpu_gdt_init(const struct Xgt_desc_struct *gdt_descr)
1188+{
1189+ unsigned long frames[16];
1190+ unsigned long va;
1191+ int f;
1192+
1193+ for (va = gdt_descr->address, f = 0;
1194+ va < gdt_descr->address + gdt_descr->size;
1195+ va += PAGE_SIZE, f++) {
1196+ frames[f] = virt_to_mfn(va);
1197+ make_lowmem_page_readonly(
1198+ (void *)va, XENFEAT_writable_descriptor_tables);
1199+ }
1200+ if (HYPERVISOR_set_gdt(frames, (gdt_descr->size + 1) / 8))
1201+ BUG();
1202+}
1203+
1204+/*
1205+ * cpu_init() initializes state that is per-CPU. Some data is already
1206+ * initialized (naturally) in the bootstrap process, such as the GDT
1207+ * and IDT. We reload them nevertheless, this function acts as a
1208+ * 'CPU state barrier', nothing should get across.
1209+ */
1210+void __cpuinit cpu_init(void)
1211+{
1212+ int cpu = smp_processor_id();
1213+#ifndef CONFIG_X86_NO_TSS
1214+ struct tss_struct * t = &per_cpu(init_tss, cpu);
1215+#endif
1216+ struct thread_struct *thread = &current->thread;
1217+ struct desc_struct *gdt;
1218+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
1219+
1220+ if (cpu_test_and_set(cpu, cpu_initialized)) {
1221+ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1222+ for (;;) local_irq_enable();
1223+ }
1224+ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
1225+
1226+ if (cpu_has_vme || cpu_has_de)
1227+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1228+ if (tsc_disable && cpu_has_tsc) {
1229+ printk(KERN_NOTICE "Disabling TSC...\n");
1230+ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
1231+ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
1232+ set_in_cr4(X86_CR4_TSD);
1233+ }
1234+
1235+#ifndef CONFIG_XEN
1236+ /* The CPU hotplug case */
1237+ if (cpu_gdt_descr->address) {
1238+ gdt = (struct desc_struct *)cpu_gdt_descr->address;
1239+ memset(gdt, 0, PAGE_SIZE);
1240+ goto old_gdt;
1241+ }
1242+ /*
1243+ * This is a horrible hack to allocate the GDT. The problem
1244+ * is that cpu_init() is called really early for the boot CPU
1245+ * (and hence needs bootmem) but much later for the secondary
1246+ * CPUs, when bootmem will have gone away
1247+ */
1248+ if (NODE_DATA(0)->bdata->node_bootmem_map) {
1249+ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
1250+ /* alloc_bootmem_pages panics on failure, so no check */
1251+ memset(gdt, 0, PAGE_SIZE);
1252+ } else {
1253+ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
1254+ if (unlikely(!gdt)) {
1255+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
1256+ for (;;)
1257+ local_irq_enable();
1258+ }
1259+ }
1260+old_gdt:
1261+ /*
1262+ * Initialize the per-CPU GDT with the boot GDT,
1263+ * and set up the GDT descriptor:
1264+ */
1265+ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
1266+
1267+ /* Set up GDT entry for 16bit stack */
1268+ *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
1269+ ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
1270+ ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
1271+ (CPU_16BIT_STACK_SIZE - 1);
1272+
1273+ cpu_gdt_descr->size = GDT_SIZE - 1;
1274+ cpu_gdt_descr->address = (unsigned long)gdt;
1275+#else
1276+ if (cpu == 0 && cpu_gdt_descr->address == 0) {
1277+ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
1278+ /* alloc_bootmem_pages panics on failure, so no check */
1279+ memset(gdt, 0, PAGE_SIZE);
1280+
1281+ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
1282+
1283+ cpu_gdt_descr->size = GDT_SIZE;
1284+ cpu_gdt_descr->address = (unsigned long)gdt;
1285+ }
1286+#endif
1287+
1288+ cpu_gdt_init(cpu_gdt_descr);
1289+
1290+ /*
1291+ * Set up and load the per-CPU TSS and LDT
1292+ */
1293+ atomic_inc(&init_mm.mm_count);
1294+ current->active_mm = &init_mm;
1295+ if (current->mm)
1296+ BUG();
1297+ enter_lazy_tlb(&init_mm, current);
1298+
1299+ load_esp0(t, thread);
1300+
1301+ load_LDT(&init_mm.context);
1302+
1303+#ifdef CONFIG_DOUBLEFAULT
1304+ /* Set up doublefault TSS pointer in the GDT */
1305+ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1306+#endif
1307+
1308+ /* Clear %fs and %gs. */
1309+ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
1310+
1311+ /* Clear all 6 debug registers: */
1312+ set_debugreg(0, 0);
1313+ set_debugreg(0, 1);
1314+ set_debugreg(0, 2);
1315+ set_debugreg(0, 3);
1316+ set_debugreg(0, 6);
1317+ set_debugreg(0, 7);
1318+
1319+ /*
1320+ * Force FPU initialization:
1321+ */
1322+ current_thread_info()->status = 0;
1323+ clear_used_math();
1324+ mxcsr_feature_mask_init();
1325+}
1326+
1327+#ifdef CONFIG_HOTPLUG_CPU
1328+void __cpuinit cpu_uninit(void)
1329+{
1330+ int cpu = raw_smp_processor_id();
1331+ cpu_clear(cpu, cpu_initialized);
1332+
1333+ /* lazy TLB state */
1334+ per_cpu(cpu_tlbstate, cpu).state = 0;
1335+ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
1336+}
1337+#endif
1338Index: head-2008-11-25/arch/x86/kernel/cpu/mtrr/main-xen.c
1339===================================================================
1340--- /dev/null 1970-01-01 00:00:00.000000000 +0000
1341+++ head-2008-11-25/arch/x86/kernel/cpu/mtrr/main-xen.c 2008-01-28 12:24:18.000000000 +0100
1342@@ -0,0 +1,198 @@
1343+#include <linux/init.h>
1344+#include <linux/proc_fs.h>
1345+#include <linux/ctype.h>
1346+#include <linux/module.h>
1347+#include <linux/seq_file.h>
1348+#include <asm/uaccess.h>
1349+#include <linux/mutex.h>
1350+
1351+#include <asm/mtrr.h>
1352+#include "mtrr.h"
1353+
1354+static DEFINE_MUTEX(mtrr_mutex);
1355+
1356+void generic_get_mtrr(unsigned int reg, unsigned long *base,
1357+ unsigned int *size, mtrr_type * type)
1358+{
1359+ struct xen_platform_op op;
1360+
1361+ op.cmd = XENPF_read_memtype;
1362+ op.u.read_memtype.reg = reg;
1363+ if (unlikely(HYPERVISOR_platform_op(&op)))
1364+ memset(&op.u.read_memtype, 0, sizeof(op.u.read_memtype));
1365+
1366+ *size = op.u.read_memtype.nr_mfns;
1367+ *base = op.u.read_memtype.mfn;
1368+ *type = op.u.read_memtype.type;
1369+}
1370+
1371+struct mtrr_ops generic_mtrr_ops = {
1372+ .use_intel_if = 1,
1373+ .get = generic_get_mtrr,
1374+};
1375+
1376+struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
1377+unsigned int num_var_ranges;
1378+unsigned int *usage_table;
1379+
1380+static void __init set_num_var_ranges(void)
1381+{
1382+ struct xen_platform_op op;
1383+
1384+ for (num_var_ranges = 0; ; num_var_ranges++) {
1385+ op.cmd = XENPF_read_memtype;
1386+ op.u.read_memtype.reg = num_var_ranges;
1387+ if (HYPERVISOR_platform_op(&op) != 0)
1388+ break;
1389+ }
1390+}
1391+
1392+static void __init init_table(void)
1393+{
1394+ int i, max;
1395+
1396+ max = num_var_ranges;
1397+ if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
1398+ == NULL) {
1399+ printk(KERN_ERR "mtrr: could not allocate\n");
1400+ return;
1401+ }
1402+ for (i = 0; i < max; i++)
1403+ usage_table[i] = 0;
1404+}
1405+
1406+int mtrr_add_page(unsigned long base, unsigned long size,
1407+ unsigned int type, char increment)
1408+{
1409+ int error;
1410+ struct xen_platform_op op;
1411+
1412+ mutex_lock(&mtrr_mutex);
1413+
1414+ op.cmd = XENPF_add_memtype;
1415+ op.u.add_memtype.mfn = base;
1416+ op.u.add_memtype.nr_mfns = size;
1417+ op.u.add_memtype.type = type;
1418+ error = HYPERVISOR_platform_op(&op);
1419+ if (error) {
1420+ mutex_unlock(&mtrr_mutex);
1421+ BUG_ON(error > 0);
1422+ return error;
1423+ }
1424+
1425+ if (increment)
1426+ ++usage_table[op.u.add_memtype.reg];
1427+
1428+ mutex_unlock(&mtrr_mutex);
1429+
1430+ return op.u.add_memtype.reg;
1431+}
1432+
1433+static int mtrr_check(unsigned long base, unsigned long size)
1434+{
1435+ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
1436+ printk(KERN_WARNING
1437+ "mtrr: size and base must be multiples of 4 kiB\n");
1438+ printk(KERN_DEBUG
1439+ "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
1440+ dump_stack();
1441+ return -1;
1442+ }
1443+ return 0;
1444+}
1445+
1446+int
1447+mtrr_add(unsigned long base, unsigned long size, unsigned int type,
1448+ char increment)
1449+{
1450+ if (mtrr_check(base, size))
1451+ return -EINVAL;
1452+ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
1453+ increment);
1454+}
1455+
1456+int mtrr_del_page(int reg, unsigned long base, unsigned long size)
1457+{
1458+ unsigned i;
1459+ mtrr_type ltype;
1460+ unsigned long lbase;
1461+ unsigned int lsize;
1462+ int error = -EINVAL;
1463+ struct xen_platform_op op;
1464+
1465+ mutex_lock(&mtrr_mutex);
1466+
1467+ if (reg < 0) {
1468+ /* Search for existing MTRR */
1469+ for (i = 0; i < num_var_ranges; ++i) {
1470+ mtrr_if->get(i, &lbase, &lsize, &ltype);
1471+ if (lbase == base && lsize == size) {
1472+ reg = i;
1473+ break;
1474+ }
1475+ }
1476+ if (reg < 0) {
1477+ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
1478+ size);
1479+ goto out;
1480+ }
1481+ }
1482+ if (usage_table[reg] < 1) {
1483+ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
1484+ goto out;
1485+ }
1486+ if (--usage_table[reg] < 1) {
1487+ op.cmd = XENPF_del_memtype;
1488+ op.u.del_memtype.handle = 0;
1489+ op.u.del_memtype.reg = reg;
1490+ error = HYPERVISOR_platform_op(&op);
1491+ if (error) {
1492+ BUG_ON(error > 0);
1493+ goto out;
1494+ }
1495+ }
1496+ error = reg;
1497+ out:
1498+ mutex_unlock(&mtrr_mutex);
1499+ return error;
1500+}
1501+
1502+int
1503+mtrr_del(int reg, unsigned long base, unsigned long size)
1504+{
1505+ if (mtrr_check(base, size))
1506+ return -EINVAL;
1507+ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
1508+}
1509+
1510+EXPORT_SYMBOL(mtrr_add);
1511+EXPORT_SYMBOL(mtrr_del);
1512+
1513+void __init mtrr_bp_init(void)
1514+{
1515+}
1516+
1517+void mtrr_ap_init(void)
1518+{
1519+}
1520+
1521+static int __init mtrr_init(void)
1522+{
1523+ struct cpuinfo_x86 *c = &boot_cpu_data;
1524+
1525+ if (!is_initial_xendomain())
1526+ return -ENODEV;
1527+
1528+ if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
1529+ (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
1530+ (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
1531+ (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
1532+ return -ENODEV;
1533+
1534+ set_num_var_ranges();
1535+ init_table();
1536+
1537+ return 0;
1538+}
1539+
1540+subsys_initcall(mtrr_init);
1541Index: head-2008-11-25/arch/x86/kernel/entry_32-xen.S
1542===================================================================
1543--- /dev/null 1970-01-01 00:00:00.000000000 +0000
1544+++ head-2008-11-25/arch/x86/kernel/entry_32-xen.S 2007-12-10 08:47:31.000000000 +0100
1545@@ -0,0 +1,1238 @@
1546+/*
1547+ * linux/arch/i386/entry.S
1548+ *
1549+ * Copyright (C) 1991, 1992 Linus Torvalds
1550+ */
1551+
1552+/*
1553+ * entry.S contains the system-call and fault low-level handling routines.
1554+ * This also contains the timer-interrupt handler, as well as all interrupts
1555+ * and faults that can result in a task-switch.
1556+ *
1557+ * NOTE: This code handles signal-recognition, which happens every time
1558+ * after a timer-interrupt and after each system call.
1559+ *
1560+ * I changed all the .align's to 4 (16 byte alignment), as that's faster
1561+ * on a 486.
1562+ *
1563+ * Stack layout in 'ret_from_system_call':
1564+ * ptrace needs to have all regs on the stack.
1565+ * if the order here is changed, it needs to be
1566+ * updated in fork.c:copy_process, signal.c:do_signal,
1567+ * ptrace.c and ptrace.h
1568+ *
1569+ * 0(%esp) - %ebx
1570+ * 4(%esp) - %ecx
1571+ * 8(%esp) - %edx
1572+ * C(%esp) - %esi
1573+ * 10(%esp) - %edi
1574+ * 14(%esp) - %ebp
1575+ * 18(%esp) - %eax
1576+ * 1C(%esp) - %ds
1577+ * 20(%esp) - %es
1578+ * 24(%esp) - orig_eax
1579+ * 28(%esp) - %eip
1580+ * 2C(%esp) - %cs
1581+ * 30(%esp) - %eflags
1582+ * 34(%esp) - %oldesp
1583+ * 38(%esp) - %oldss
1584+ *
1585+ * "current" is in register %ebx during any slow entries.
1586+ */
1587+
1588+#include <linux/linkage.h>
1589+#include <asm/thread_info.h>
1590+#include <asm/irqflags.h>
1591+#include <asm/errno.h>
1592+#include <asm/segment.h>
1593+#include <asm/smp.h>
1594+#include <asm/page.h>
1595+#include <asm/desc.h>
1596+#include <asm/dwarf2.h>
1597+#include "irq_vectors.h"
1598+#include <xen/interface/xen.h>
1599+
1600+#define nr_syscalls ((syscall_table_size)/4)
1601+
1602+EBX = 0x00
1603+ECX = 0x04
1604+EDX = 0x08
1605+ESI = 0x0C
1606+EDI = 0x10
1607+EBP = 0x14
1608+EAX = 0x18
1609+DS = 0x1C
1610+ES = 0x20
1611+ORIG_EAX = 0x24
1612+EIP = 0x28
1613+CS = 0x2C
1614+EFLAGS = 0x30
1615+OLDESP = 0x34
1616+OLDSS = 0x38
1617+
1618+CF_MASK = 0x00000001
1619+TF_MASK = 0x00000100
1620+IF_MASK = 0x00000200
1621+DF_MASK = 0x00000400
1622+NT_MASK = 0x00004000
1623+VM_MASK = 0x00020000
1624+/* Pseudo-eflags. */
1625+NMI_MASK = 0x80000000
1626+
1627+#ifndef CONFIG_XEN
1628+#define DISABLE_INTERRUPTS cli
1629+#define ENABLE_INTERRUPTS sti
1630+#else
1631+/* Offsets into shared_info_t. */
1632+#define evtchn_upcall_pending /* 0 */
1633+#define evtchn_upcall_mask 1
1634+
1635+#define sizeof_vcpu_shift 6
1636+
1637+#ifdef CONFIG_SMP
1638+#define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \
1639+ shl $sizeof_vcpu_shift,%esi ; \
1640+ addl HYPERVISOR_shared_info,%esi
1641+#else
1642+#define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
1643+#endif
1644+
1645+#define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
1646+#define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
1647+#define DISABLE_INTERRUPTS GET_VCPU_INFO ; \
1648+ __DISABLE_INTERRUPTS
1649+#define ENABLE_INTERRUPTS GET_VCPU_INFO ; \
1650+ __ENABLE_INTERRUPTS
1651+#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
1652+#endif
1653+
1654+#ifdef CONFIG_PREEMPT
1655+#define preempt_stop cli; TRACE_IRQS_OFF
1656+#else
1657+#define preempt_stop
1658+#define resume_kernel restore_nocheck
1659+#endif
1660+
1661+.macro TRACE_IRQS_IRET
1662+#ifdef CONFIG_TRACE_IRQFLAGS
1663+ testl $IF_MASK,EFLAGS(%esp) # interrupts off?
1664+ jz 1f
1665+ TRACE_IRQS_ON
1666+1:
1667+#endif
1668+.endm
1669+
1670+#ifdef CONFIG_VM86
1671+#define resume_userspace_sig check_userspace
1672+#else
1673+#define resume_userspace_sig resume_userspace
1674+#endif
1675+
1676+#define SAVE_ALL \
1677+ cld; \
1678+ pushl %es; \
1679+ CFI_ADJUST_CFA_OFFSET 4;\
1680+ /*CFI_REL_OFFSET es, 0;*/\
1681+ pushl %ds; \
1682+ CFI_ADJUST_CFA_OFFSET 4;\
1683+ /*CFI_REL_OFFSET ds, 0;*/\
1684+ pushl %eax; \
1685+ CFI_ADJUST_CFA_OFFSET 4;\
1686+ CFI_REL_OFFSET eax, 0;\
1687+ pushl %ebp; \
1688+ CFI_ADJUST_CFA_OFFSET 4;\
1689+ CFI_REL_OFFSET ebp, 0;\
1690+ pushl %edi; \
1691+ CFI_ADJUST_CFA_OFFSET 4;\
1692+ CFI_REL_OFFSET edi, 0;\
1693+ pushl %esi; \
1694+ CFI_ADJUST_CFA_OFFSET 4;\
1695+ CFI_REL_OFFSET esi, 0;\
1696+ pushl %edx; \
1697+ CFI_ADJUST_CFA_OFFSET 4;\
1698+ CFI_REL_OFFSET edx, 0;\
1699+ pushl %ecx; \
1700+ CFI_ADJUST_CFA_OFFSET 4;\
1701+ CFI_REL_OFFSET ecx, 0;\
1702+ pushl %ebx; \
1703+ CFI_ADJUST_CFA_OFFSET 4;\
1704+ CFI_REL_OFFSET ebx, 0;\
1705+ movl $(__USER_DS), %edx; \
1706+ movl %edx, %ds; \
1707+ movl %edx, %es;
1708+
1709+#define RESTORE_INT_REGS \
1710+ popl %ebx; \
1711+ CFI_ADJUST_CFA_OFFSET -4;\
1712+ CFI_RESTORE ebx;\
1713+ popl %ecx; \
1714+ CFI_ADJUST_CFA_OFFSET -4;\
1715+ CFI_RESTORE ecx;\
1716+ popl %edx; \
1717+ CFI_ADJUST_CFA_OFFSET -4;\
1718+ CFI_RESTORE edx;\
1719+ popl %esi; \
1720+ CFI_ADJUST_CFA_OFFSET -4;\
1721+ CFI_RESTORE esi;\
1722+ popl %edi; \
1723+ CFI_ADJUST_CFA_OFFSET -4;\
1724+ CFI_RESTORE edi;\
1725+ popl %ebp; \
1726+ CFI_ADJUST_CFA_OFFSET -4;\
1727+ CFI_RESTORE ebp;\
1728+ popl %eax; \
1729+ CFI_ADJUST_CFA_OFFSET -4;\
1730+ CFI_RESTORE eax
1731+
1732+#define RESTORE_REGS \
1733+ RESTORE_INT_REGS; \
1734+1: popl %ds; \
1735+ CFI_ADJUST_CFA_OFFSET -4;\
1736+ /*CFI_RESTORE ds;*/\
1737+2: popl %es; \
1738+ CFI_ADJUST_CFA_OFFSET -4;\
1739+ /*CFI_RESTORE es;*/\
1740+.section .fixup,"ax"; \
1741+3: movl $0,(%esp); \
1742+ jmp 1b; \
1743+4: movl $0,(%esp); \
1744+ jmp 2b; \
1745+.previous; \
1746+.section __ex_table,"a";\
1747+ .align 4; \
1748+ .long 1b,3b; \
1749+ .long 2b,4b; \
1750+.previous
1751+
1752+#define RING0_INT_FRAME \
1753+ CFI_STARTPROC simple;\
1754+ CFI_DEF_CFA esp, 3*4;\
1755+ /*CFI_OFFSET cs, -2*4;*/\
1756+ CFI_OFFSET eip, -3*4
1757+
1758+#define RING0_EC_FRAME \
1759+ CFI_STARTPROC simple;\
1760+ CFI_DEF_CFA esp, 4*4;\
1761+ /*CFI_OFFSET cs, -2*4;*/\
1762+ CFI_OFFSET eip, -3*4
1763+
1764+#define RING0_PTREGS_FRAME \
1765+ CFI_STARTPROC simple;\
1766+ CFI_DEF_CFA esp, OLDESP-EBX;\
1767+ /*CFI_OFFSET cs, CS-OLDESP;*/\
1768+ CFI_OFFSET eip, EIP-OLDESP;\
1769+ /*CFI_OFFSET es, ES-OLDESP;*/\
1770+ /*CFI_OFFSET ds, DS-OLDESP;*/\
1771+ CFI_OFFSET eax, EAX-OLDESP;\
1772+ CFI_OFFSET ebp, EBP-OLDESP;\
1773+ CFI_OFFSET edi, EDI-OLDESP;\
1774+ CFI_OFFSET esi, ESI-OLDESP;\
1775+ CFI_OFFSET edx, EDX-OLDESP;\
1776+ CFI_OFFSET ecx, ECX-OLDESP;\
1777+ CFI_OFFSET ebx, EBX-OLDESP
1778+
1779+ENTRY(ret_from_fork)
1780+ CFI_STARTPROC
1781+ pushl %eax
1782+ CFI_ADJUST_CFA_OFFSET 4
1783+ call schedule_tail
1784+ GET_THREAD_INFO(%ebp)
1785+ popl %eax
1786+ CFI_ADJUST_CFA_OFFSET -4
1787+ pushl $0x0202 # Reset kernel eflags
1788+ CFI_ADJUST_CFA_OFFSET 4
1789+ popfl
1790+ CFI_ADJUST_CFA_OFFSET -4
1791+ jmp syscall_exit
1792+ CFI_ENDPROC
1793+
1794+/*
1795+ * Return to user mode is not as complex as all this looks,
1796+ * but we want the default path for a system call return to
1797+ * go as quickly as possible which is why some of this is
1798+ * less clear than it otherwise should be.
1799+ */
1800+
1801+ # userspace resumption stub bypassing syscall exit tracing
1802+ ALIGN
1803+ RING0_PTREGS_FRAME
1804+ret_from_exception:
1805+ preempt_stop
1806+ret_from_intr:
1807+ GET_THREAD_INFO(%ebp)
1808+check_userspace:
1809+ movl EFLAGS(%esp), %eax # mix EFLAGS and CS
1810+ movb CS(%esp), %al
1811+ testl $(VM_MASK | 2), %eax
1812+ jz resume_kernel
1813+ENTRY(resume_userspace)
1814+ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1815+ # setting need_resched or sigpending
1816+ # between sampling and the iret
1817+ movl TI_flags(%ebp), %ecx
1818+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
1819+ # int/exception return?
1820+ jne work_pending
1821+ jmp restore_all
1822+
1823+#ifdef CONFIG_PREEMPT
1824+ENTRY(resume_kernel)
1825+ cli
1826+ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
1827+ jnz restore_nocheck
1828+need_resched:
1829+ movl TI_flags(%ebp), %ecx # need_resched set ?
1830+ testb $_TIF_NEED_RESCHED, %cl
1831+ jz restore_all
1832+ testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
1833+ jz restore_all
1834+ call preempt_schedule_irq
1835+ jmp need_resched
1836+#endif
1837+ CFI_ENDPROC
1838+
1839+/* SYSENTER_RETURN points to after the "sysenter" instruction in
1840+ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
1841+
1842+ # sysenter call handler stub
1843+ENTRY(sysenter_entry)
1844+ CFI_STARTPROC simple
1845+ CFI_DEF_CFA esp, 0
1846+ CFI_REGISTER esp, ebp
1847+ movl SYSENTER_stack_esp0(%esp),%esp
1848+sysenter_past_esp:
1849+ /*
1850+ * No need to follow this irqs on/off section: the syscall
1851+ * disabled irqs and here we enable it straight after entry:
1852+ */
1853+ sti
1854+ pushl $(__USER_DS)
1855+ CFI_ADJUST_CFA_OFFSET 4
1856+ /*CFI_REL_OFFSET ss, 0*/
1857+ pushl %ebp
1858+ CFI_ADJUST_CFA_OFFSET 4
1859+ CFI_REL_OFFSET esp, 0
1860+ pushfl
1861+ CFI_ADJUST_CFA_OFFSET 4
1862+ pushl $(__USER_CS)
1863+ CFI_ADJUST_CFA_OFFSET 4
1864+ /*CFI_REL_OFFSET cs, 0*/
1865+ /*
1866+ * Push current_thread_info()->sysenter_return to the stack.
1867+ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
1868+ * pushed above; +8 corresponds to copy_thread's esp0 setting.
1869+ */
1870+ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
1871+ CFI_ADJUST_CFA_OFFSET 4
1872+ CFI_REL_OFFSET eip, 0
1873+
1874+/*
1875+ * Load the potential sixth argument from user stack.
1876+ * Careful about security.
1877+ */
1878+ cmpl $__PAGE_OFFSET-3,%ebp
1879+ jae syscall_fault
1880+1: movl (%ebp),%ebp
1881+.section __ex_table,"a"
1882+ .align 4
1883+ .long 1b,syscall_fault
1884+.previous
1885+
1886+ pushl %eax
1887+ CFI_ADJUST_CFA_OFFSET 4
1888+ SAVE_ALL
1889+ GET_THREAD_INFO(%ebp)
1890+
1891+ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
1892+ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1893+ jnz syscall_trace_entry
1894+ cmpl $(nr_syscalls), %eax
1895+ jae syscall_badsys
1896+ call *sys_call_table(,%eax,4)
1897+ movl %eax,EAX(%esp)
1898+ DISABLE_INTERRUPTS
1899+ TRACE_IRQS_OFF
1900+ movl TI_flags(%ebp), %ecx
1901+ testw $_TIF_ALLWORK_MASK, %cx
1902+ jne syscall_exit_work
1903+/* if something modifies registers it must also disable sysexit */
1904+ movl EIP(%esp), %edx
1905+ movl OLDESP(%esp), %ecx
1906+ xorl %ebp,%ebp
1907+#ifdef CONFIG_XEN
1908+ TRACE_IRQS_ON
1909+ __ENABLE_INTERRUPTS
1910+sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/
1911+ __TEST_PENDING
1912+ jnz 14f # process more events if necessary...
1913+ movl ESI(%esp), %esi
1914+ sysexit
1915+14: __DISABLE_INTERRUPTS
1916+ TRACE_IRQS_OFF
1917+sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/
1918+ push %esp
1919+ call evtchn_do_upcall
1920+ add $4,%esp
1921+ jmp ret_from_intr
1922+#else
1923+ TRACE_IRQS_ON
1924+ sti
1925+ sysexit
1926+#endif /* !CONFIG_XEN */
1927+ CFI_ENDPROC
1928+
1929+ # pv sysenter call handler stub
1930+ENTRY(sysenter_entry_pv)
1931+ RING0_INT_FRAME
1932+ movl $__USER_DS,16(%esp)
1933+ movl %ebp,12(%esp)
1934+ movl $__USER_CS,4(%esp)
1935+ addl $4,%esp
1936+ /* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */
1937+ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
1938+/*
1939+ * Load the potential sixth argument from user stack.
1940+ * Careful about security.
1941+ */
1942+ cmpl $__PAGE_OFFSET-3,%ebp
1943+ jae syscall_fault
1944+1: movl (%ebp),%ebp
1945+.section __ex_table,"a"
1946+ .align 4
1947+ .long 1b,syscall_fault
1948+.previous
1949+ /* fall through */
1950+ CFI_ENDPROC
1951+ENDPROC(sysenter_entry_pv)
1952+
1953+ # system call handler stub
1954+ENTRY(system_call)
1955+ RING0_INT_FRAME # can't unwind into user space anyway
1956+ pushl %eax # save orig_eax
1957+ CFI_ADJUST_CFA_OFFSET 4
1958+ SAVE_ALL
1959+ GET_THREAD_INFO(%ebp)
1960+ testl $TF_MASK,EFLAGS(%esp)
1961+ jz no_singlestep
1962+ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
1963+no_singlestep:
1964+ # system call tracing in operation / emulation
1965+ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
1966+ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1967+ jnz syscall_trace_entry
1968+ cmpl $(nr_syscalls), %eax
1969+ jae syscall_badsys
1970+syscall_call:
1971+ call *sys_call_table(,%eax,4)
1972+ movl %eax,EAX(%esp) # store the return value
1973+syscall_exit:
1974+ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
1975+ # setting need_resched or sigpending
1976+ # between sampling and the iret
1977+ TRACE_IRQS_OFF
1978+ movl TI_flags(%ebp), %ecx
1979+ testw $_TIF_ALLWORK_MASK, %cx # current->work
1980+ jne syscall_exit_work
1981+
1982+restore_all:
1983+#ifndef CONFIG_XEN
1984+ movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
1985+ # Warning: OLDSS(%esp) contains the wrong/random values if we
1986+ # are returning to the kernel.
1987+ # See comments in process.c:copy_thread() for details.
1988+ movb OLDSS(%esp), %ah
1989+ movb CS(%esp), %al
1990+ andl $(VM_MASK | (4 << 8) | 3), %eax
1991+ cmpl $((4 << 8) | 3), %eax
1992+ CFI_REMEMBER_STATE
1993+ je ldt_ss # returning to user-space with LDT SS
1994+restore_nocheck:
1995+#else
1996+restore_nocheck:
1997+ movl EFLAGS(%esp), %eax
1998+ testl $(VM_MASK|NMI_MASK), %eax
1999+ CFI_REMEMBER_STATE
2000+ jnz hypervisor_iret
2001+ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
2002+ GET_VCPU_INFO
2003+ andb evtchn_upcall_mask(%esi),%al
2004+ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
2005+ CFI_REMEMBER_STATE
2006+ jnz restore_all_enable_events # != 0 => enable event delivery
2007+#endif
2008+ TRACE_IRQS_IRET
2009+restore_nocheck_notrace:
2010+ RESTORE_REGS
2011+ addl $4, %esp
2012+ CFI_ADJUST_CFA_OFFSET -4
2013+1: iret
2014+.section .fixup,"ax"
2015+iret_exc:
2016+#ifndef CONFIG_XEN
2017+ TRACE_IRQS_ON
2018+ sti
2019+#endif
2020+ pushl $0 # no error code
2021+ pushl $do_iret_error
2022+ jmp error_code
2023+.previous
2024+.section __ex_table,"a"
2025+ .align 4
2026+ .long 1b,iret_exc
2027+.previous
2028+
2029+ CFI_RESTORE_STATE
2030+#ifndef CONFIG_XEN
2031+ldt_ss:
2032+ larl OLDSS(%esp), %eax
2033+ jnz restore_nocheck
2034+ testl $0x00400000, %eax # returning to 32bit stack?
2035+ jnz restore_nocheck # allright, normal return
2036+ /* If returning to userspace with 16bit stack,
2037+ * try to fix the higher word of ESP, as the CPU
2038+ * won't restore it.
2039+ * This is an "official" bug of all the x86-compatible
2040+ * CPUs, which we can try to work around to make
2041+ * dosemu and wine happy. */
2042+ subl $8, %esp # reserve space for switch16 pointer
2043+ CFI_ADJUST_CFA_OFFSET 8
2044+ cli
2045+ TRACE_IRQS_OFF
2046+ movl %esp, %eax
2047+ /* Set up the 16bit stack frame with switch32 pointer on top,
2048+ * and a switch16 pointer on top of the current frame. */
2049+ call setup_x86_bogus_stack
2050+ CFI_ADJUST_CFA_OFFSET -8 # frame has moved
2051+ TRACE_IRQS_IRET
2052+ RESTORE_REGS
2053+ lss 20+4(%esp), %esp # switch to 16bit stack
2054+1: iret
2055+.section __ex_table,"a"
2056+ .align 4
2057+ .long 1b,iret_exc
2058+.previous
2059+#else
2060+ ALIGN
2061+restore_all_enable_events:
2062+ TRACE_IRQS_ON
2063+ __ENABLE_INTERRUPTS
2064+scrit: /**** START OF CRITICAL REGION ****/
2065+ __TEST_PENDING
2066+ jnz 14f # process more events if necessary...
2067+ RESTORE_REGS
2068+ addl $4, %esp
2069+ CFI_ADJUST_CFA_OFFSET -4
2070+1: iret
2071+.section __ex_table,"a"
2072+ .align 4
2073+ .long 1b,iret_exc
2074+.previous
2075+14: __DISABLE_INTERRUPTS
2076+ TRACE_IRQS_OFF
2077+ jmp 11f
2078+ecrit: /**** END OF CRITICAL REGION ****/
2079+
2080+ CFI_RESTORE_STATE
2081+hypervisor_iret:
2082+ andl $~NMI_MASK, EFLAGS(%esp)
2083+ RESTORE_REGS
2084+ addl $4, %esp
2085+ CFI_ADJUST_CFA_OFFSET -4
2086+ jmp hypercall_page + (__HYPERVISOR_iret * 32)
2087+#endif
2088+ CFI_ENDPROC
2089+
2090+ # perform work that needs to be done immediately before resumption
2091+ ALIGN
2092+ RING0_PTREGS_FRAME # can't unwind into user space anyway
2093+work_pending:
2094+ testb $_TIF_NEED_RESCHED, %cl
2095+ jz work_notifysig
2096+work_resched:
2097+ call schedule
2098+ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
2099+ # setting need_resched or sigpending
2100+ # between sampling and the iret
2101+ TRACE_IRQS_OFF
2102+ movl TI_flags(%ebp), %ecx
2103+ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
2104+ # than syscall tracing?
2105+ jz restore_all
2106+ testb $_TIF_NEED_RESCHED, %cl
2107+ jnz work_resched
2108+
2109+work_notifysig: # deal with pending signals and
2110+ # notify-resume requests
2111+ testl $VM_MASK, EFLAGS(%esp)
2112+ movl %esp, %eax
2113+ jne work_notifysig_v86 # returning to kernel-space or
2114+ # vm86-space
2115+ xorl %edx, %edx
2116+ call do_notify_resume
2117+ jmp resume_userspace_sig
2118+
2119+ ALIGN
2120+work_notifysig_v86:
2121+#ifdef CONFIG_VM86
2122+ pushl %ecx # save ti_flags for do_notify_resume
2123+ CFI_ADJUST_CFA_OFFSET 4
2124+ call save_v86_state # %eax contains pt_regs pointer
2125+ popl %ecx
2126+ CFI_ADJUST_CFA_OFFSET -4
2127+ movl %eax, %esp
2128+ xorl %edx, %edx
2129+ call do_notify_resume
2130+ jmp resume_userspace_sig
2131+#endif
2132+
2133+ # perform syscall exit tracing
2134+ ALIGN
2135+syscall_trace_entry:
2136+ movl $-ENOSYS,EAX(%esp)
2137+ movl %esp, %eax
2138+ xorl %edx,%edx
2139+ call do_syscall_trace
2140+ cmpl $0, %eax
2141+ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
2142+ # so must skip actual syscall
2143+ movl ORIG_EAX(%esp), %eax
2144+ cmpl $(nr_syscalls), %eax
2145+ jnae syscall_call
2146+ jmp syscall_exit
2147+
2148+ # perform syscall exit tracing
2149+ ALIGN
2150+syscall_exit_work:
2151+ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
2152+ jz work_pending
2153+ TRACE_IRQS_ON
2154+ ENABLE_INTERRUPTS # could let do_syscall_trace() call
2155+ # schedule() instead
2156+ movl %esp, %eax
2157+ movl $1, %edx
2158+ call do_syscall_trace
2159+ jmp resume_userspace
2160+ CFI_ENDPROC
2161+
2162+ RING0_INT_FRAME # can't unwind into user space anyway
2163+syscall_fault:
2164+ pushl %eax # save orig_eax
2165+ CFI_ADJUST_CFA_OFFSET 4
2166+ SAVE_ALL
2167+ GET_THREAD_INFO(%ebp)
2168+ movl $-EFAULT,EAX(%esp)
2169+ jmp resume_userspace
2170+
2171+syscall_badsys:
2172+ movl $-ENOSYS,EAX(%esp)
2173+ jmp resume_userspace
2174+ CFI_ENDPROC
2175+
2176+#ifndef CONFIG_XEN
2177+#define FIXUP_ESPFIX_STACK \
2178+ movl %esp, %eax; \
2179+ /* switch to 32bit stack using the pointer on top of 16bit stack */ \
2180+ lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
2181+ /* copy data from 16bit stack to 32bit stack */ \
2182+ call fixup_x86_bogus_stack; \
2183+ /* put ESP to the proper location */ \
2184+ movl %eax, %esp;
2185+#define UNWIND_ESPFIX_STACK \
2186+ pushl %eax; \
2187+ CFI_ADJUST_CFA_OFFSET 4; \
2188+ movl %ss, %eax; \
2189+ /* see if on 16bit stack */ \
2190+ cmpw $__ESPFIX_SS, %ax; \
2191+ je 28f; \
2192+27: popl %eax; \
2193+ CFI_ADJUST_CFA_OFFSET -4; \
2194+.section .fixup,"ax"; \
2195+28: movl $__KERNEL_DS, %eax; \
2196+ movl %eax, %ds; \
2197+ movl %eax, %es; \
2198+ /* switch to 32bit stack */ \
2199+ FIXUP_ESPFIX_STACK; \
2200+ jmp 27b; \
2201+.previous
2202+
2203+/*
2204+ * Build the entry stubs and pointer table with
2205+ * some assembler magic.
2206+ */
2207+.data
2208+ENTRY(interrupt)
2209+.text
2210+
2211+vector=0
2212+ENTRY(irq_entries_start)
2213+ RING0_INT_FRAME
2214+.rept NR_IRQS
2215+ ALIGN
2216+ .if vector
2217+ CFI_ADJUST_CFA_OFFSET -4
2218+ .endif
2219+1: pushl $~(vector)
2220+ CFI_ADJUST_CFA_OFFSET 4
2221+ jmp common_interrupt
2222+.data
2223+ .long 1b
2224+.text
2225+vector=vector+1
2226+.endr
2227+
2228+/*
2229+ * the CPU automatically disables interrupts when executing an IRQ vector,
2230+ * so IRQ-flags tracing has to follow that:
2231+ */
2232+ ALIGN
2233+common_interrupt:
2234+ SAVE_ALL
2235+ TRACE_IRQS_OFF
2236+ movl %esp,%eax
2237+ call do_IRQ
2238+ jmp ret_from_intr
2239+ CFI_ENDPROC
2240+
2241+#define BUILD_INTERRUPT(name, nr) \
2242+ENTRY(name) \
2243+ RING0_INT_FRAME; \
2244+ pushl $~(nr); \
2245+ CFI_ADJUST_CFA_OFFSET 4; \
2246+ SAVE_ALL; \
2247+ TRACE_IRQS_OFF \
2248+ movl %esp,%eax; \
2249+ call smp_/**/name; \
2250+ jmp ret_from_intr; \
2251+ CFI_ENDPROC
2252+
2253+/* The include is where all of the SMP etc. interrupts come from */
2254+#include "entry_arch.h"
2255+#else
2256+#define UNWIND_ESPFIX_STACK
2257+#endif
2258+
2259+ENTRY(divide_error)
2260+ RING0_INT_FRAME
2261+ pushl $0 # no error code
2262+ CFI_ADJUST_CFA_OFFSET 4
2263+ pushl $do_divide_error
2264+ CFI_ADJUST_CFA_OFFSET 4
2265+ ALIGN
2266+error_code:
2267+ pushl %ds
2268+ CFI_ADJUST_CFA_OFFSET 4
2269+ /*CFI_REL_OFFSET ds, 0*/
2270+ pushl %eax
2271+ CFI_ADJUST_CFA_OFFSET 4
2272+ CFI_REL_OFFSET eax, 0
2273+ xorl %eax, %eax
2274+ pushl %ebp
2275+ CFI_ADJUST_CFA_OFFSET 4
2276+ CFI_REL_OFFSET ebp, 0
2277+ pushl %edi
2278+ CFI_ADJUST_CFA_OFFSET 4
2279+ CFI_REL_OFFSET edi, 0
2280+ pushl %esi
2281+ CFI_ADJUST_CFA_OFFSET 4
2282+ CFI_REL_OFFSET esi, 0
2283+ pushl %edx
2284+ CFI_ADJUST_CFA_OFFSET 4
2285+ CFI_REL_OFFSET edx, 0
2286+ decl %eax # eax = -1
2287+ pushl %ecx
2288+ CFI_ADJUST_CFA_OFFSET 4
2289+ CFI_REL_OFFSET ecx, 0
2290+ pushl %ebx
2291+ CFI_ADJUST_CFA_OFFSET 4
2292+ CFI_REL_OFFSET ebx, 0
2293+ cld
2294+ pushl %es
2295+ CFI_ADJUST_CFA_OFFSET 4
2296+ /*CFI_REL_OFFSET es, 0*/
2297+ UNWIND_ESPFIX_STACK
2298+ popl %ecx
2299+ CFI_ADJUST_CFA_OFFSET -4
2300+ /*CFI_REGISTER es, ecx*/
2301+ movl ES(%esp), %edi # get the function address
2302+ movl ORIG_EAX(%esp), %edx # get the error code
2303+ movl %eax, ORIG_EAX(%esp)
2304+ movl %ecx, ES(%esp)
2305+ /*CFI_REL_OFFSET es, ES*/
2306+ movl $(__USER_DS), %ecx
2307+ movl %ecx, %ds
2308+ movl %ecx, %es
2309+ movl %esp,%eax # pt_regs pointer
2310+ call *%edi
2311+ jmp ret_from_exception
2312+ CFI_ENDPROC
2313+
2314+#ifdef CONFIG_XEN
2315+# A note on the "critical region" in our callback handler.
2316+# We want to avoid stacking callback handlers due to events occurring
2317+# during handling of the last event. To do this, we keep events disabled
2318+# until we've done all processing. HOWEVER, we must enable events before
2319+# popping the stack frame (can't be done atomically) and so it would still
2320+# be possible to get enough handler activations to overflow the stack.
2321+# Although unlikely, bugs of that kind are hard to track down, so we'd
2322+# like to avoid the possibility.
2323+# So, on entry to the handler we detect whether we interrupted an
2324+# existing activation in its critical region -- if so, we pop the current
2325+# activation and restart the handler using the previous one.
2326+#
2327+# The sysexit critical region is slightly different. sysexit
2328+# atomically removes the entire stack frame. If we interrupt in the
2329+# critical region we know that the entire frame is present and correct
2330+# so we can simply throw away the new one.
2331+ENTRY(hypervisor_callback)
2332+ RING0_INT_FRAME
2333+ pushl %eax
2334+ CFI_ADJUST_CFA_OFFSET 4
2335+ SAVE_ALL
2336+ movl EIP(%esp),%eax
2337+ cmpl $scrit,%eax
2338+ jb 11f
2339+ cmpl $ecrit,%eax
2340+ jb critical_region_fixup
2341+ cmpl $sysexit_scrit,%eax
2342+ jb 11f
2343+ cmpl $sysexit_ecrit,%eax
2344+ ja 11f
2345+ addl $OLDESP,%esp # Remove eflags...ebx from stack frame.
2346+11: push %esp
2347+ CFI_ADJUST_CFA_OFFSET 4
2348+ call evtchn_do_upcall
2349+ add $4,%esp
2350+ CFI_ADJUST_CFA_OFFSET -4
2351+ jmp ret_from_intr
2352+ CFI_ENDPROC
2353+
2354+# [How we do the fixup]. We want to merge the current stack frame with the
2355+# just-interrupted frame. How we do this depends on where in the critical
2356+# region the interrupted handler was executing, and so how many saved
2357+# registers are in each frame. We do this quickly using the lookup table
2358+# 'critical_fixup_table'. For each byte offset in the critical region, it
2359+# provides the number of bytes which have already been popped from the
2360+# interrupted stack frame.
2361+critical_region_fixup:
2362+ movzbl critical_fixup_table-scrit(%eax),%ecx # %eax contains num bytes popped
2363+ cmpb $0xff,%cl # 0xff => vcpu_info critical region
2364+ jne 15f
2365+ xorl %ecx,%ecx
2366+15: leal (%esp,%ecx),%esi # %esi points at end of src region
2367+ leal OLDESP(%esp),%edi # %edi points at end of dst region
2368+ shrl $2,%ecx # convert words to bytes
2369+ je 17f # skip loop if nothing to copy
2370+16: subl $4,%esi # pre-decrementing copy loop
2371+ subl $4,%edi
2372+ movl (%esi),%eax
2373+ movl %eax,(%edi)
2374+ loop 16b
2375+17: movl %edi,%esp # final %edi is top of merged stack
2376+ jmp 11b
2377+
2378+.section .rodata,"a"
2379+critical_fixup_table:
2380+ .byte 0xff,0xff,0xff # testb $0xff,(%esi) = __TEST_PENDING
2381+ .byte 0xff,0xff # jnz 14f
2382+ .byte 0x00 # pop %ebx
2383+ .byte 0x04 # pop %ecx
2384+ .byte 0x08 # pop %edx
2385+ .byte 0x0c # pop %esi
2386+ .byte 0x10 # pop %edi
2387+ .byte 0x14 # pop %ebp
2388+ .byte 0x18 # pop %eax
2389+ .byte 0x1c # pop %ds
2390+ .byte 0x20 # pop %es
2391+ .byte 0x24,0x24,0x24 # add $4,%esp
2392+ .byte 0x28 # iret
2393+ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
2394+ .byte 0x00,0x00 # jmp 11b
2395+.previous
2396+
2397+# Hypervisor uses this for application faults while it executes.
2398+# We get here for two reasons:
2399+# 1. Fault while reloading DS, ES, FS or GS
2400+# 2. Fault while executing IRET
2401+# Category 1 we fix up by reattempting the load, and zeroing the segment
2402+# register if the load fails.
2403+# Category 2 we fix up by jumping to do_iret_error. We cannot use the
2404+# normal Linux return path in this case because if we use the IRET hypercall
2405+# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
2406+# We distinguish between categories by maintaining a status value in EAX.
2407+ENTRY(failsafe_callback)
2408+ pushl %eax
2409+ movl $1,%eax
2410+1: mov 4(%esp),%ds
2411+2: mov 8(%esp),%es
2412+3: mov 12(%esp),%fs
2413+4: mov 16(%esp),%gs
2414+ testl %eax,%eax
2415+ popl %eax
2416+ jz 5f
2417+ addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
2418+ jmp iret_exc
2419+5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
2420+ RING0_INT_FRAME
2421+ pushl $0
2422+ SAVE_ALL
2423+ jmp ret_from_exception
2424+.section .fixup,"ax"; \
2425+6: xorl %eax,%eax; \
2426+ movl %eax,4(%esp); \
2427+ jmp 1b; \
2428+7: xorl %eax,%eax; \
2429+ movl %eax,8(%esp); \
2430+ jmp 2b; \
2431+8: xorl %eax,%eax; \
2432+ movl %eax,12(%esp); \
2433+ jmp 3b; \
2434+9: xorl %eax,%eax; \
2435+ movl %eax,16(%esp); \
2436+ jmp 4b; \
2437+.previous; \
2438+.section __ex_table,"a"; \
2439+ .align 4; \
2440+ .long 1b,6b; \
2441+ .long 2b,7b; \
2442+ .long 3b,8b; \
2443+ .long 4b,9b; \
2444+.previous
2445+#endif
2446+ CFI_ENDPROC
2447+
2448+ENTRY(coprocessor_error)
2449+ RING0_INT_FRAME
2450+ pushl $0
2451+ CFI_ADJUST_CFA_OFFSET 4
2452+ pushl $do_coprocessor_error
2453+ CFI_ADJUST_CFA_OFFSET 4
2454+ jmp error_code
2455+ CFI_ENDPROC
2456+
2457+ENTRY(simd_coprocessor_error)
2458+ RING0_INT_FRAME
2459+ pushl $0
2460+ CFI_ADJUST_CFA_OFFSET 4
2461+ pushl $do_simd_coprocessor_error
2462+ CFI_ADJUST_CFA_OFFSET 4
2463+ jmp error_code
2464+ CFI_ENDPROC
2465+
2466+ENTRY(device_not_available)
2467+ RING0_INT_FRAME
2468+ pushl $-1 # mark this as an int
2469+ CFI_ADJUST_CFA_OFFSET 4
2470+ SAVE_ALL
2471+#ifndef CONFIG_XEN
2472+ movl %cr0, %eax
2473+ testl $0x4, %eax # EM (math emulation bit)
2474+ je device_available_emulate
2475+ pushl $0 # temporary storage for ORIG_EIP
2476+ CFI_ADJUST_CFA_OFFSET 4
2477+ call math_emulate
2478+ addl $4, %esp
2479+ CFI_ADJUST_CFA_OFFSET -4
2480+ jmp ret_from_exception
2481+device_available_emulate:
2482+#endif
2483+ preempt_stop
2484+ call math_state_restore
2485+ jmp ret_from_exception
2486+ CFI_ENDPROC
2487+
2488+#ifndef CONFIG_XEN
2489+/*
2490+ * Debug traps and NMI can happen at the one SYSENTER instruction
2491+ * that sets up the real kernel stack. Check here, since we can't
2492+ * allow the wrong stack to be used.
2493+ *
2494+ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
2495+ * already pushed 3 words if it hits on the sysenter instruction:
2496+ * eflags, cs and eip.
2497+ *
2498+ * We just load the right stack, and push the three (known) values
2499+ * by hand onto the new stack - while updating the return eip past
2500+ * the instruction that would have done it for sysenter.
2501+ */
2502+#define FIX_STACK(offset, ok, label) \
2503+ cmpw $__KERNEL_CS,4(%esp); \
2504+ jne ok; \
2505+label: \
2506+ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
2507+ pushfl; \
2508+ pushl $__KERNEL_CS; \
2509+ pushl $sysenter_past_esp
2510+#endif /* CONFIG_XEN */
2511+
2512+KPROBE_ENTRY(debug)
2513+ RING0_INT_FRAME
2514+#ifndef CONFIG_XEN
2515+ cmpl $sysenter_entry,(%esp)
2516+ jne debug_stack_correct
2517+ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
2518+debug_stack_correct:
2519+#endif /* !CONFIG_XEN */
2520+ pushl $-1 # mark this as an int
2521+ CFI_ADJUST_CFA_OFFSET 4
2522+ SAVE_ALL
2523+ xorl %edx,%edx # error code 0
2524+ movl %esp,%eax # pt_regs pointer
2525+ call do_debug
2526+ jmp ret_from_exception
2527+ CFI_ENDPROC
2528+ .previous .text
2529+#ifndef CONFIG_XEN
2530+/*
2531+ * NMI is doubly nasty. It can happen _while_ we're handling
2532+ * a debug fault, and the debug fault hasn't yet been able to
2533+ * clear up the stack. So we first check whether we got an
2534+ * NMI on the sysenter entry path, but after that we need to
2535+ * check whether we got an NMI on the debug path where the debug
2536+ * fault happened on the sysenter path.
2537+ */
2538+ENTRY(nmi)
2539+ RING0_INT_FRAME
2540+ pushl %eax
2541+ CFI_ADJUST_CFA_OFFSET 4
2542+ movl %ss, %eax
2543+ cmpw $__ESPFIX_SS, %ax
2544+ popl %eax
2545+ CFI_ADJUST_CFA_OFFSET -4
2546+ je nmi_16bit_stack
2547+ cmpl $sysenter_entry,(%esp)
2548+ je nmi_stack_fixup
2549+ pushl %eax
2550+ CFI_ADJUST_CFA_OFFSET 4
2551+ movl %esp,%eax
2552+ /* Do not access memory above the end of our stack page,
2553+ * it might not exist.
2554+ */
2555+ andl $(THREAD_SIZE-1),%eax
2556+ cmpl $(THREAD_SIZE-20),%eax
2557+ popl %eax
2558+ CFI_ADJUST_CFA_OFFSET -4
2559+ jae nmi_stack_correct
2560+ cmpl $sysenter_entry,12(%esp)
2561+ je nmi_debug_stack_check
2562+nmi_stack_correct:
2563+ pushl %eax
2564+ CFI_ADJUST_CFA_OFFSET 4
2565+ SAVE_ALL
2566+ xorl %edx,%edx # zero error code
2567+ movl %esp,%eax # pt_regs pointer
2568+ call do_nmi
2569+ jmp restore_nocheck_notrace
2570+ CFI_ENDPROC
2571+
2572+nmi_stack_fixup:
2573+ FIX_STACK(12,nmi_stack_correct, 1)
2574+ jmp nmi_stack_correct
2575+nmi_debug_stack_check:
2576+ cmpw $__KERNEL_CS,16(%esp)
2577+ jne nmi_stack_correct
2578+ cmpl $debug,(%esp)
2579+ jb nmi_stack_correct
2580+ cmpl $debug_esp_fix_insn,(%esp)
2581+ ja nmi_stack_correct
2582+ FIX_STACK(24,nmi_stack_correct, 1)
2583+ jmp nmi_stack_correct
2584+
2585+nmi_16bit_stack:
2586+ RING0_INT_FRAME
2587+ /* create the pointer to lss back */
2588+ pushl %ss
2589+ CFI_ADJUST_CFA_OFFSET 4
2590+ pushl %esp
2591+ CFI_ADJUST_CFA_OFFSET 4
2592+ movzwl %sp, %esp
2593+ addw $4, (%esp)
2594+ /* copy the iret frame of 12 bytes */
2595+ .rept 3
2596+ pushl 16(%esp)
2597+ CFI_ADJUST_CFA_OFFSET 4
2598+ .endr
2599+ pushl %eax
2600+ CFI_ADJUST_CFA_OFFSET 4
2601+ SAVE_ALL
2602+ FIXUP_ESPFIX_STACK # %eax == %esp
2603+ CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
2604+ xorl %edx,%edx # zero error code
2605+ call do_nmi
2606+ RESTORE_REGS
2607+ lss 12+4(%esp), %esp # back to 16bit stack
2608+1: iret
2609+ CFI_ENDPROC
2610+.section __ex_table,"a"
2611+ .align 4
2612+ .long 1b,iret_exc
2613+.previous
2614+#else
2615+ENTRY(nmi)
2616+ RING0_INT_FRAME
2617+ pushl %eax
2618+ CFI_ADJUST_CFA_OFFSET 4
2619+ SAVE_ALL
2620+ xorl %edx,%edx # zero error code
2621+ movl %esp,%eax # pt_regs pointer
2622+ call do_nmi
2623+ orl $NMI_MASK, EFLAGS(%esp)
2624+ jmp restore_all
2625+ CFI_ENDPROC
2626+#endif
2627+
2628+KPROBE_ENTRY(int3)
2629+ RING0_INT_FRAME
2630+ pushl $-1 # mark this as an int
2631+ CFI_ADJUST_CFA_OFFSET 4
2632+ SAVE_ALL
2633+ xorl %edx,%edx # zero error code
2634+ movl %esp,%eax # pt_regs pointer
2635+ call do_int3
2636+ jmp ret_from_exception
2637+ CFI_ENDPROC
2638+ .previous .text
2639+
2640+ENTRY(overflow)
2641+ RING0_INT_FRAME
2642+ pushl $0
2643+ CFI_ADJUST_CFA_OFFSET 4
2644+ pushl $do_overflow
2645+ CFI_ADJUST_CFA_OFFSET 4
2646+ jmp error_code
2647+ CFI_ENDPROC
2648+
2649+ENTRY(bounds)
2650+ RING0_INT_FRAME
2651+ pushl $0
2652+ CFI_ADJUST_CFA_OFFSET 4
2653+ pushl $do_bounds
2654+ CFI_ADJUST_CFA_OFFSET 4
2655+ jmp error_code
2656+ CFI_ENDPROC
2657+
2658+ENTRY(invalid_op)
2659+ RING0_INT_FRAME
2660+ pushl $0
2661+ CFI_ADJUST_CFA_OFFSET 4
2662+ pushl $do_invalid_op
2663+ CFI_ADJUST_CFA_OFFSET 4
2664+ jmp error_code
2665+ CFI_ENDPROC
2666+
2667+ENTRY(coprocessor_segment_overrun)
2668+ RING0_INT_FRAME
2669+ pushl $0
2670+ CFI_ADJUST_CFA_OFFSET 4
2671+ pushl $do_coprocessor_segment_overrun
2672+ CFI_ADJUST_CFA_OFFSET 4
2673+ jmp error_code
2674+ CFI_ENDPROC
2675+
2676+ENTRY(invalid_TSS)
2677+ RING0_EC_FRAME
2678+ pushl $do_invalid_TSS
2679+ CFI_ADJUST_CFA_OFFSET 4
2680+ jmp error_code
2681+ CFI_ENDPROC
2682+
2683+ENTRY(segment_not_present)
2684+ RING0_EC_FRAME
2685+ pushl $do_segment_not_present
2686+ CFI_ADJUST_CFA_OFFSET 4
2687+ jmp error_code
2688+ CFI_ENDPROC
2689+
2690+ENTRY(stack_segment)
2691+ RING0_EC_FRAME
2692+ pushl $do_stack_segment
2693+ CFI_ADJUST_CFA_OFFSET 4
2694+ jmp error_code
2695+ CFI_ENDPROC
2696+
2697+KPROBE_ENTRY(general_protection)
2698+ RING0_EC_FRAME
2699+ pushl $do_general_protection
2700+ CFI_ADJUST_CFA_OFFSET 4
2701+ jmp error_code
2702+ CFI_ENDPROC
2703+ .previous .text
2704+
2705+ENTRY(alignment_check)
2706+ RING0_EC_FRAME
2707+ pushl $do_alignment_check
2708+ CFI_ADJUST_CFA_OFFSET 4
2709+ jmp error_code
2710+ CFI_ENDPROC
2711+
2712+KPROBE_ENTRY(page_fault)
2713+ RING0_EC_FRAME
2714+ pushl $do_page_fault
2715+ CFI_ADJUST_CFA_OFFSET 4
2716+ jmp error_code
2717+ CFI_ENDPROC
2718+ .previous .text
2719+
2720+#ifdef CONFIG_X86_MCE
2721+ENTRY(machine_check)
2722+ RING0_INT_FRAME
2723+ pushl $0
2724+ CFI_ADJUST_CFA_OFFSET 4
2725+ pushl machine_check_vector
2726+ CFI_ADJUST_CFA_OFFSET 4
2727+ jmp error_code
2728+ CFI_ENDPROC
2729+#endif
2730+
2731+#ifndef CONFIG_XEN
2732+ENTRY(spurious_interrupt_bug)
2733+ RING0_INT_FRAME
2734+ pushl $0
2735+ CFI_ADJUST_CFA_OFFSET 4
2736+ pushl $do_spurious_interrupt_bug
2737+ CFI_ADJUST_CFA_OFFSET 4
2738+ jmp error_code
2739+ CFI_ENDPROC
2740+#endif /* !CONFIG_XEN */
2741+
2742+#ifdef CONFIG_STACK_UNWIND
2743+ENTRY(arch_unwind_init_running)
2744+ CFI_STARTPROC
2745+ movl 4(%esp), %edx
2746+ movl (%esp), %ecx
2747+ leal 4(%esp), %eax
2748+ movl %ebx, EBX(%edx)
2749+ xorl %ebx, %ebx
2750+ movl %ebx, ECX(%edx)
2751+ movl %ebx, EDX(%edx)
2752+ movl %esi, ESI(%edx)
2753+ movl %edi, EDI(%edx)
2754+ movl %ebp, EBP(%edx)
2755+ movl %ebx, EAX(%edx)
2756+ movl $__USER_DS, DS(%edx)
2757+ movl $__USER_DS, ES(%edx)
2758+ movl %ebx, ORIG_EAX(%edx)
2759+ movl %ecx, EIP(%edx)
2760+ movl 12(%esp), %ecx
2761+ movl $__KERNEL_CS, CS(%edx)
2762+ movl %ebx, EFLAGS(%edx)
2763+ movl %eax, OLDESP(%edx)
2764+ movl 8(%esp), %eax
2765+ movl %ecx, 8(%esp)
2766+ movl EBX(%edx), %ebx
2767+ movl $__KERNEL_DS, OLDSS(%edx)
2768+ jmpl *%eax
2769+ CFI_ENDPROC
2770+ENDPROC(arch_unwind_init_running)
2771+#endif
2772+
2773+ENTRY(fixup_4gb_segment)
2774+ RING0_EC_FRAME
2775+ pushl $do_fixup_4gb_segment
2776+ CFI_ADJUST_CFA_OFFSET 4
2777+ jmp error_code
2778+ CFI_ENDPROC
2779+
2780+.section .rodata,"a"
2781+#include "syscall_table.S"
2782+
2783+syscall_table_size=(.-sys_call_table)
2784Index: head-2008-11-25/arch/x86/kernel/fixup.c
2785===================================================================
2786--- /dev/null 1970-01-01 00:00:00.000000000 +0000
2787+++ head-2008-11-25/arch/x86/kernel/fixup.c 2008-01-28 12:24:18.000000000 +0100
2788@@ -0,0 +1,88 @@
2789+/******************************************************************************
2790+ * fixup.c
2791+ *
2792+ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
2793+ * Used to avoid repeated slow emulation of common instructions used by the
2794+ * user-space TLS (Thread-Local Storage) libraries.
2795+ *
2796+ * **** NOTE ****
2797+ * Issues with the binary rewriting have caused it to be removed. Instead
2798+ * we rely on Xen's emulator to boot the kernel, and then print a banner
2799+ * message recommending that the user disables /lib/tls.
2800+ *
2801+ * Copyright (c) 2004, K A Fraser
2802+ *
2803+ * This program is free software; you can redistribute it and/or modify
2804+ * it under the terms of the GNU General Public License as published by
2805+ * the Free Software Foundation; either version 2 of the License, or
2806+ * (at your option) any later version.
2807+ *
2808+ * This program is distributed in the hope that it will be useful,
2809+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
2810+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2811+ * GNU General Public License for more details.
2812+ *
2813+ * You should have received a copy of the GNU General Public License
2814+ * along with this program; if not, write to the Free Software
2815+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
2816+ */
2817+
2818+#include <linux/init.h>
2819+#include <linux/sched.h>
2820+#include <linux/slab.h>
2821+#include <linux/kernel.h>
2822+#include <linux/delay.h>
2823+#include <linux/version.h>
2824+
2825+#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args )
2826+
2827+fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
2828+{
2829+ static unsigned long printed = 0;
2830+ char info[100];
2831+ int i;
2832+
2833+ /* Ignore statically-linked init. */
2834+ if (current->tgid == 1)
2835+ return;
2836+
2837+ VOID(HYPERVISOR_vm_assist(VMASST_CMD_disable,
2838+ VMASST_TYPE_4gb_segments_notify));
2839+
2840+ if (test_and_set_bit(0, &printed))
2841+ return;
2842+
2843+ sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
2844+
2845+ DP("");
2846+ DP("***************************************************************");
2847+ DP("***************************************************************");
2848+ DP("** WARNING: Currently emulating unsupported memory accesses **");
2849+ DP("** in /lib/tls glibc libraries. The emulation is **");
2850+ DP("** slow. To ensure full performance you should **");
2851+ DP("** install a 'xen-friendly' (nosegneg) version of **");
2852+ DP("** the library, or disable tls support by executing **");
2853+ DP("** the following as root: **");
2854+ DP("** mv /lib/tls /lib/tls.disabled **");
2855+ DP("** Offending process: %-38.38s **", info);
2856+ DP("***************************************************************");
2857+ DP("***************************************************************");
2858+ DP("");
2859+
2860+ for (i = 5; i > 0; i--) {
2861+ touch_softlockup_watchdog();
2862+ printk("Pausing... %d", i);
2863+ mdelay(1000);
2864+ printk("\b\b\b\b\b\b\b\b\b\b\b\b");
2865+ }
2866+
2867+ printk("Continuing...\n\n");
2868+}
2869+
2870+static int __init fixup_init(void)
2871+{
2872+ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
2873+ VMASST_TYPE_4gb_segments_notify));
2874+ return 0;
2875+}
2876+__initcall(fixup_init);
2877Index: head-2008-11-25/arch/x86/kernel/head_32-xen.S
2878===================================================================
2879--- /dev/null 1970-01-01 00:00:00.000000000 +0000
2880+++ head-2008-11-25/arch/x86/kernel/head_32-xen.S 2007-06-12 13:12:48.000000000 +0200
2881@@ -0,0 +1,207 @@
2882+
2883+
2884+.text
2885+#include <linux/elfnote.h>
2886+#include <linux/threads.h>
2887+#include <linux/linkage.h>
2888+#include <asm/segment.h>
2889+#include <asm/page.h>
2890+#include <asm/cache.h>
2891+#include <asm/thread_info.h>
2892+#include <asm/asm-offsets.h>
2893+#include <asm/dwarf2.h>
2894+#include <xen/interface/xen.h>
2895+#include <xen/interface/elfnote.h>
2896+
2897+/*
2898+ * References to members of the new_cpu_data structure.
2899+ */
2900+
2901+#define X86 new_cpu_data+CPUINFO_x86
2902+#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
2903+#define X86_MODEL new_cpu_data+CPUINFO_x86_model
2904+#define X86_MASK new_cpu_data+CPUINFO_x86_mask
2905+#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
2906+#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
2907+#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
2908+#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
2909+
2910+#define VIRT_ENTRY_OFFSET 0x0
2911+.org VIRT_ENTRY_OFFSET
2912+ENTRY(startup_32)
2913+ movl %esi,xen_start_info
2914+ cld
2915+
2916+ /* Set up the stack pointer */
2917+ movl $(init_thread_union+THREAD_SIZE),%esp
2918+
2919+ /* get vendor info */
2920+ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
2921+ XEN_CPUID
2922+ movl %eax,X86_CPUID # save CPUID level
2923+ movl %ebx,X86_VENDOR_ID # lo 4 chars
2924+ movl %edx,X86_VENDOR_ID+4 # next 4 chars
2925+ movl %ecx,X86_VENDOR_ID+8 # last 4 chars
2926+
2927+ movl $1,%eax # Use the CPUID instruction to get CPU type
2928+ XEN_CPUID
2929+ movb %al,%cl # save reg for future use
2930+ andb $0x0f,%ah # mask processor family
2931+ movb %ah,X86
2932+ andb $0xf0,%al # mask model
2933+ shrb $4,%al
2934+ movb %al,X86_MODEL
2935+ andb $0x0f,%cl # mask mask revision
2936+ movb %cl,X86_MASK
2937+ movl %edx,X86_CAPABILITY
2938+
2939+ movb $1,X86_HARD_MATH
2940+
2941+ xorl %eax,%eax # Clear FS/GS and LDT
2942+ movl %eax,%fs
2943+ movl %eax,%gs
2944+ cld # gcc2 wants the direction flag cleared at all times
2945+
2946+ pushl %eax # fake return address
2947+ jmp start_kernel
2948+
2949+#define HYPERCALL_PAGE_OFFSET 0x1000
2950+.org HYPERCALL_PAGE_OFFSET
2951+ENTRY(hypercall_page)
2952+ CFI_STARTPROC
2953+.skip 0x1000
2954+ CFI_ENDPROC
2955+
2956+/*
2957+ * Real beginning of normal "text" segment
2958+ */
2959+ENTRY(stext)
2960+ENTRY(_stext)
2961+
2962+/*
2963+ * BSS section
2964+ */
2965+.section ".bss.page_aligned","w"
2966+ENTRY(empty_zero_page)
2967+ .fill 4096,1,0
2968+
2969+/*
2970+ * This starts the data section.
2971+ */
2972+.data
2973+
2974+/*
2975+ * The Global Descriptor Table contains 28 quadwords, per-CPU.
2976+ */
2977+ .align L1_CACHE_BYTES
2978+ENTRY(cpu_gdt_table)
2979+ .quad 0x0000000000000000 /* NULL descriptor */
2980+ .quad 0x0000000000000000 /* 0x0b reserved */
2981+ .quad 0x0000000000000000 /* 0x13 reserved */
2982+ .quad 0x0000000000000000 /* 0x1b reserved */
2983+ .quad 0x0000000000000000 /* 0x20 unused */
2984+ .quad 0x0000000000000000 /* 0x28 unused */
2985+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
2986+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
2987+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
2988+ .quad 0x0000000000000000 /* 0x4b reserved */
2989+ .quad 0x0000000000000000 /* 0x53 reserved */
2990+ .quad 0x0000000000000000 /* 0x5b reserved */
2991+
2992+ .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
2993+ .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
2994+ .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
2995+ .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
2996+
2997+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
2998+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
2999+
3000+ /*
3001+ * Segments used for calling PnP BIOS have byte granularity.
3002+ * They code segments and data segments have fixed 64k limits,
3003+ * the transfer segment sizes are set at run time.
3004+ */
3005+ .quad 0x0000000000000000 /* 0x90 32-bit code */
3006+ .quad 0x0000000000000000 /* 0x98 16-bit code */
3007+ .quad 0x0000000000000000 /* 0xa0 16-bit data */
3008+ .quad 0x0000000000000000 /* 0xa8 16-bit data */
3009+ .quad 0x0000000000000000 /* 0xb0 16-bit data */
3010+
3011+ /*
3012+ * The APM segments have byte granularity and their bases
3013+ * are set at run time. All have 64k limits.
3014+ */
3015+ .quad 0x0000000000000000 /* 0xb8 APM CS code */
3016+ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
3017+ .quad 0x0000000000000000 /* 0xc8 APM DS data */
3018+
3019+ .quad 0x0000000000000000 /* 0xd0 - ESPFIX 16-bit SS */
3020+ .quad 0x0000000000000000 /* 0xd8 - unused */
3021+ .quad 0x0000000000000000 /* 0xe0 - unused */
3022+ .quad 0x0000000000000000 /* 0xe8 - unused */
3023+ .quad 0x0000000000000000 /* 0xf0 - unused */
3024+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
3025+
3026+#if CONFIG_XEN_COMPAT <= 0x030002
3027+/*
3028+ * __xen_guest information
3029+ */
3030+.macro utoa value
3031+ .if (\value) < 0 || (\value) >= 0x10
3032+ utoa (((\value)>>4)&0x0fffffff)
3033+ .endif
3034+ .if ((\value) & 0xf) < 10
3035+ .byte '0' + ((\value) & 0xf)
3036+ .else
3037+ .byte 'A' + ((\value) & 0xf) - 10
3038+ .endif
3039+.endm
3040+
3041+.section __xen_guest
3042+ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
3043+ .ascii ",XEN_VER=xen-3.0"
3044+ .ascii ",VIRT_BASE=0x"
3045+ utoa __PAGE_OFFSET
3046+ .ascii ",ELF_PADDR_OFFSET=0x"
3047+ utoa __PAGE_OFFSET
3048+ .ascii ",VIRT_ENTRY=0x"
3049+ utoa (__PAGE_OFFSET + __PHYSICAL_START + VIRT_ENTRY_OFFSET)
3050+ .ascii ",HYPERCALL_PAGE=0x"
3051+ utoa ((__PHYSICAL_START+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
3052+ .ascii ",FEATURES=writable_page_tables"
3053+ .ascii "|writable_descriptor_tables"
3054+ .ascii "|auto_translated_physmap"
3055+ .ascii "|pae_pgdir_above_4gb"
3056+ .ascii "|supervisor_mode_kernel"
3057+#ifdef CONFIG_X86_PAE
3058+ .ascii ",PAE=yes[extended-cr3]"
3059+#else
3060+ .ascii ",PAE=no"
3061+#endif
3062+ .ascii ",LOADER=generic"
3063+ .byte 0
3064+#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
3065+
3066+
3067+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
3068+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
3069+ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
3070+ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, __PAGE_OFFSET)
3071+#if CONFIG_XEN_COMPAT <= 0x030002
3072+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, __PAGE_OFFSET)
3073+#else
3074+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, 0)
3075+#endif
3076+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, startup_32)
3077+ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page)
3078+ ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long, HYPERVISOR_VIRT_START)
3079+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
3080+#ifdef CONFIG_X86_PAE
3081+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes")
3082+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
3083+#else
3084+ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
3085+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, _PAGE_PRESENT,_PAGE_PRESENT)
3086+#endif
3087+ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
3088+ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
3089Index: head-2008-11-25/arch/x86/kernel/init_task-xen.c
3090===================================================================
3091--- /dev/null 1970-01-01 00:00:00.000000000 +0000
3092+++ head-2008-11-25/arch/x86/kernel/init_task-xen.c 2007-06-12 13:12:48.000000000 +0200
3093@@ -0,0 +1,51 @@
3094+#include <linux/mm.h>
3095+#include <linux/module.h>
3096+#include <linux/sched.h>
3097+#include <linux/init.h>
3098+#include <linux/init_task.h>
3099+#include <linux/fs.h>
3100+#include <linux/mqueue.h>
3101+
3102+#include <asm/uaccess.h>
3103+#include <asm/pgtable.h>
3104+#include <asm/desc.h>
3105+
3106+static struct fs_struct init_fs = INIT_FS;
3107+static struct files_struct init_files = INIT_FILES;
3108+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
3109+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
3110+
3111+#define swapper_pg_dir ((pgd_t *)NULL)
3112+struct mm_struct init_mm = INIT_MM(init_mm);
3113+#undef swapper_pg_dir
3114+
3115+EXPORT_SYMBOL(init_mm);
3116+
3117+/*
3118+ * Initial thread structure.
3119+ *
3120+ * We need to make sure that this is THREAD_SIZE aligned due to the
3121+ * way process stacks are handled. This is done by having a special
3122+ * "init_task" linker map entry..
3123+ */
3124+union thread_union init_thread_union
3125+ __attribute__((__section__(".data.init_task"))) =
3126+ { INIT_THREAD_INFO(init_task) };
3127+
3128+/*
3129+ * Initial task structure.
3130+ *
3131+ * All other task structs will be allocated on slabs in fork.c
3132+ */
3133+struct task_struct init_task = INIT_TASK(init_task);
3134+
3135+EXPORT_SYMBOL(init_task);
3136+
3137+#ifndef CONFIG_X86_NO_TSS
3138+/*
3139+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
3140+ * no more per-task TSS's.
3141+ */
3142+DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
3143+#endif
3144+
3145Index: head-2008-11-25/arch/x86/kernel/io_apic_32-xen.c
3146===================================================================
3147--- /dev/null 1970-01-01 00:00:00.000000000 +0000
3148+++ head-2008-11-25/arch/x86/kernel/io_apic_32-xen.c 2008-11-25 12:22:34.000000000 +0100
3149@@ -0,0 +1,2776 @@
3150+/*
3151+ * Intel IO-APIC support for multi-Pentium hosts.
3152+ *
3153+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
3154+ *
3155+ * Many thanks to Stig Venaas for trying out countless experimental
3156+ * patches and reporting/debugging problems patiently!
3157+ *
3158+ * (c) 1999, Multiple IO-APIC support, developed by
3159+ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
3160+ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
3161+ * further tested and cleaned up by Zach Brown <zab@redhat.com>
3162+ * and Ingo Molnar <mingo@redhat.com>
3163+ *
3164+ * Fixes
3165+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
3166+ * thanks to Eric Gilmore
3167+ * and Rolf G. Tews
3168+ * for testing these extensively
3169+ * Paul Diefenbaugh : Added full ACPI support
3170+ */
3171+
3172+#include <linux/mm.h>
3173+#include <linux/interrupt.h>
3174+#include <linux/init.h>
3175+#include <linux/delay.h>
3176+#include <linux/sched.h>
3177+#include <linux/smp_lock.h>
3178+#include <linux/mc146818rtc.h>
3179+#include <linux/compiler.h>
3180+#include <linux/acpi.h>
3181+#include <linux/module.h>
3182+#include <linux/sysdev.h>
3183+
3184+#include <asm/io.h>
3185+#include <asm/smp.h>
3186+#include <asm/desc.h>
3187+#include <asm/timer.h>
3188+#include <asm/i8259.h>
3189+#include <asm/nmi.h>
3190+
3191+#include <mach_apic.h>
3192+
3193+#include "io_ports.h"
3194+
3195+#ifdef CONFIG_XEN
3196+
3197+#include <xen/interface/xen.h>
3198+#include <xen/interface/physdev.h>
3199+#include <xen/evtchn.h>
3200+
3201+/* Fake i8259 */
3202+#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
3203+#define disable_8259A_irq(_irq) ((void)0)
3204+#define i8259A_irq_pending(_irq) (0)
3205+
3206+unsigned long io_apic_irqs;
3207+
3208+static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
3209+{
3210+ struct physdev_apic apic_op;
3211+ int ret;
3212+
3213+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
3214+ apic_op.reg = reg;
3215+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
3216+ if (ret)
3217+ return ret;
3218+ return apic_op.value;
3219+}
3220+
3221+static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
3222+{
3223+ struct physdev_apic apic_op;
3224+
3225+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
3226+ apic_op.reg = reg;
3227+ apic_op.value = value;
3228+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
3229+}
3230+
3231+#define io_apic_read(a,r) xen_io_apic_read(a,r)
3232+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
3233+
3234+#endif /* CONFIG_XEN */
3235+
3236+int (*ioapic_renumber_irq)(int ioapic, int irq);
3237+atomic_t irq_mis_count;
3238+
3239+/* Where if anywhere is the i8259 connect in external int mode */
3240+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
3241+
3242+static DEFINE_SPINLOCK(ioapic_lock);
3243+static DEFINE_SPINLOCK(vector_lock);
3244+
3245+int timer_over_8254 __initdata = 1;
3246+
3247+/*
3248+ * Is the SiS APIC rmw bug present ?
3249+ * -1 = don't know, 0 = no, 1 = yes
3250+ */
3251+int sis_apic_bug = -1;
3252+
3253+/*
3254+ * # of IRQ routing registers
3255+ */
3256+int nr_ioapic_registers[MAX_IO_APICS];
3257+
3258+int disable_timer_pin_1 __initdata;
3259+
3260+/*
3261+ * Rough estimation of how many shared IRQs there are, can
3262+ * be changed anytime.
3263+ */
3264+#define MAX_PLUS_SHARED_IRQS NR_IRQS
3265+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
3266+
3267+/*
3268+ * This is performance-critical, we want to do it O(1)
3269+ *
3270+ * the indexing order of this array favors 1:1 mappings
3271+ * between pins and IRQs.
3272+ */
3273+
3274+static struct irq_pin_list {
3275+ int apic, pin, next;
3276+} irq_2_pin[PIN_MAP_SIZE];
3277+
3278+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
3279+#ifdef CONFIG_PCI_MSI
3280+#define vector_to_irq(vector) \
3281+ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
3282+#else
3283+#define vector_to_irq(vector) (vector)
3284+#endif
3285+
3286+/*
3287+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
3288+ * shared ISA-space IRQs, so we have to support them. We are super
3289+ * fast in the common case, and fast for shared ISA-space IRQs.
3290+ */
3291+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
3292+{
3293+ static int first_free_entry = NR_IRQS;
3294+ struct irq_pin_list *entry = irq_2_pin + irq;
3295+
3296+ while (entry->next)
3297+ entry = irq_2_pin + entry->next;
3298+
3299+ if (entry->pin != -1) {
3300+ entry->next = first_free_entry;
3301+ entry = irq_2_pin + entry->next;
3302+ if (++first_free_entry >= PIN_MAP_SIZE)
3303+ panic("io_apic.c: whoops");
3304+ }
3305+ entry->apic = apic;
3306+ entry->pin = pin;
3307+}
3308+
3309+#ifdef CONFIG_XEN
3310+#define clear_IO_APIC() ((void)0)
3311+#else
3312+/*
3313+ * Reroute an IRQ to a different pin.
3314+ */
3315+static void __init replace_pin_at_irq(unsigned int irq,
3316+ int oldapic, int oldpin,
3317+ int newapic, int newpin)
3318+{
3319+ struct irq_pin_list *entry = irq_2_pin + irq;
3320+
3321+ while (1) {
3322+ if (entry->apic == oldapic && entry->pin == oldpin) {
3323+ entry->apic = newapic;
3324+ entry->pin = newpin;
3325+ }
3326+ if (!entry->next)
3327+ break;
3328+ entry = irq_2_pin + entry->next;
3329+ }
3330+}
3331+
3332+static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
3333+{
3334+ struct irq_pin_list *entry = irq_2_pin + irq;
3335+ unsigned int pin, reg;
3336+
3337+ for (;;) {
3338+ pin = entry->pin;
3339+ if (pin == -1)
3340+ break;
3341+ reg = io_apic_read(entry->apic, 0x10 + pin*2);
3342+ reg &= ~disable;
3343+ reg |= enable;
3344+ io_apic_modify(entry->apic, 0x10 + pin*2, reg);
3345+ if (!entry->next)
3346+ break;
3347+ entry = irq_2_pin + entry->next;
3348+ }
3349+}
3350+
3351+/* mask = 1 */
3352+static void __mask_IO_APIC_irq (unsigned int irq)
3353+{
3354+ __modify_IO_APIC_irq(irq, 0x00010000, 0);
3355+}
3356+
3357+/* mask = 0 */
3358+static void __unmask_IO_APIC_irq (unsigned int irq)
3359+{
3360+ __modify_IO_APIC_irq(irq, 0, 0x00010000);
3361+}
3362+
3363+/* mask = 1, trigger = 0 */
3364+static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
3365+{
3366+ __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
3367+}
3368+
3369+/* mask = 0, trigger = 1 */
3370+static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
3371+{
3372+ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
3373+}
3374+
3375+static void mask_IO_APIC_irq (unsigned int irq)
3376+{
3377+ unsigned long flags;
3378+
3379+ spin_lock_irqsave(&ioapic_lock, flags);
3380+ __mask_IO_APIC_irq(irq);
3381+ spin_unlock_irqrestore(&ioapic_lock, flags);
3382+}
3383+
3384+static void unmask_IO_APIC_irq (unsigned int irq)
3385+{
3386+ unsigned long flags;
3387+
3388+ spin_lock_irqsave(&ioapic_lock, flags);
3389+ __unmask_IO_APIC_irq(irq);
3390+ spin_unlock_irqrestore(&ioapic_lock, flags);
3391+}
3392+
3393+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
3394+{
3395+ struct IO_APIC_route_entry entry;
3396+ unsigned long flags;
3397+
3398+ /* Check delivery_mode to be sure we're not clearing an SMI pin */
3399+ spin_lock_irqsave(&ioapic_lock, flags);
3400+ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
3401+ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
3402+ spin_unlock_irqrestore(&ioapic_lock, flags);
3403+ if (entry.delivery_mode == dest_SMI)
3404+ return;
3405+
3406+ /*
3407+ * Disable it in the IO-APIC irq-routing table:
3408+ */
3409+ memset(&entry, 0, sizeof(entry));
3410+ entry.mask = 1;
3411+ spin_lock_irqsave(&ioapic_lock, flags);
3412+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
3413+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
3414+ spin_unlock_irqrestore(&ioapic_lock, flags);
3415+}
3416+
3417+static void clear_IO_APIC (void)
3418+{
3419+ int apic, pin;
3420+
3421+ for (apic = 0; apic < nr_ioapics; apic++)
3422+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
3423+ clear_IO_APIC_pin(apic, pin);
3424+}
3425+
3426+#ifdef CONFIG_SMP
3427+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
3428+{
3429+ unsigned long flags;
3430+ int pin;
3431+ struct irq_pin_list *entry = irq_2_pin + irq;
3432+ unsigned int apicid_value;
3433+ cpumask_t tmp;
3434+
3435+ cpus_and(tmp, cpumask, cpu_online_map);
3436+ if (cpus_empty(tmp))
3437+ tmp = TARGET_CPUS;
3438+
3439+ cpus_and(cpumask, tmp, CPU_MASK_ALL);
3440+
3441+ apicid_value = cpu_mask_to_apicid(cpumask);
3442+ /* Prepare to do the io_apic_write */
3443+ apicid_value = apicid_value << 24;
3444+ spin_lock_irqsave(&ioapic_lock, flags);
3445+ for (;;) {
3446+ pin = entry->pin;
3447+ if (pin == -1)
3448+ break;
3449+ io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
3450+ if (!entry->next)
3451+ break;
3452+ entry = irq_2_pin + entry->next;
3453+ }
3454+ set_irq_info(irq, cpumask);
3455+ spin_unlock_irqrestore(&ioapic_lock, flags);
3456+}
3457+
3458+#if defined(CONFIG_IRQBALANCE)
3459+# include <asm/processor.h> /* kernel_thread() */
3460+# include <linux/kernel_stat.h> /* kstat */
3461+# include <linux/slab.h> /* kmalloc() */
3462+# include <linux/timer.h> /* time_after() */
3463+
3464+#ifdef CONFIG_BALANCED_IRQ_DEBUG
3465+# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
3466+# define Dprintk(x...) do { TDprintk(x); } while (0)
3467+# else
3468+# define TDprintk(x...)
3469+# define Dprintk(x...)
3470+# endif
3471+
3472+#define IRQBALANCE_CHECK_ARCH -999
3473+#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
3474+#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
3475+#define BALANCED_IRQ_MORE_DELTA (HZ/10)
3476+#define BALANCED_IRQ_LESS_DELTA (HZ)
3477+
3478+static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
3479+static int physical_balance __read_mostly;
3480+static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
3481+
3482+static struct irq_cpu_info {
3483+ unsigned long * last_irq;
3484+ unsigned long * irq_delta;
3485+ unsigned long irq;
3486+} irq_cpu_data[NR_CPUS];
3487+
3488+#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
3489+#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq])
3490+#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq])
3491+
3492+#define IDLE_ENOUGH(cpu,now) \
3493+ (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
3494+
3495+#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
3496+
3497+#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
3498+
3499+static cpumask_t balance_irq_affinity[NR_IRQS] = {
3500+ [0 ... NR_IRQS-1] = CPU_MASK_ALL
3501+};
3502+
3503+void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
3504+{
3505+ balance_irq_affinity[irq] = mask;
3506+}
3507+
3508+static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
3509+ unsigned long now, int direction)
3510+{
3511+ int search_idle = 1;
3512+ int cpu = curr_cpu;
3513+
3514+ goto inside;
3515+
3516+ do {
3517+ if (unlikely(cpu == curr_cpu))
3518+ search_idle = 0;
3519+inside:
3520+ if (direction == 1) {
3521+ cpu++;
3522+ if (cpu >= NR_CPUS)
3523+ cpu = 0;
3524+ } else {
3525+ cpu--;
3526+ if (cpu == -1)
3527+ cpu = NR_CPUS-1;
3528+ }
3529+ } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
3530+ (search_idle && !IDLE_ENOUGH(cpu,now)));
3531+
3532+ return cpu;
3533+}
3534+
3535+static inline void balance_irq(int cpu, int irq)
3536+{
3537+ unsigned long now = jiffies;
3538+ cpumask_t allowed_mask;
3539+ unsigned int new_cpu;
3540+
3541+ if (irqbalance_disabled)
3542+ return;
3543+
3544+ cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
3545+ new_cpu = move(cpu, allowed_mask, now, 1);
3546+ if (cpu != new_cpu) {
3547+ set_pending_irq(irq, cpumask_of_cpu(new_cpu));
3548+ }
3549+}
3550+
3551+static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
3552+{
3553+ int i, j;
3554+ Dprintk("Rotating IRQs among CPUs.\n");
3555+ for_each_online_cpu(i) {
3556+ for (j = 0; j < NR_IRQS; j++) {
3557+ if (!irq_desc[j].action)
3558+ continue;
3559+ /* Is it a significant load ? */
3560+ if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
3561+ useful_load_threshold)
3562+ continue;
3563+ balance_irq(i, j);
3564+ }
3565+ }
3566+ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
3567+ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
3568+ return;
3569+}
3570+
3571+static void do_irq_balance(void)
3572+{
3573+ int i, j;
3574+ unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
3575+ unsigned long move_this_load = 0;
3576+ int max_loaded = 0, min_loaded = 0;
3577+ int load;
3578+ unsigned long useful_load_threshold = balanced_irq_interval + 10;
3579+ int selected_irq;
3580+ int tmp_loaded, first_attempt = 1;
3581+ unsigned long tmp_cpu_irq;
3582+ unsigned long imbalance = 0;
3583+ cpumask_t allowed_mask, target_cpu_mask, tmp;
3584+
3585+ for_each_possible_cpu(i) {
3586+ int package_index;
3587+ CPU_IRQ(i) = 0;
3588+ if (!cpu_online(i))
3589+ continue;
3590+ package_index = CPU_TO_PACKAGEINDEX(i);
3591+ for (j = 0; j < NR_IRQS; j++) {
3592+ unsigned long value_now, delta;
3593+ /* Is this an active IRQ? */
3594+ if (!irq_desc[j].action)
3595+ continue;
3596+ if ( package_index == i )
3597+ IRQ_DELTA(package_index,j) = 0;
3598+ /* Determine the total count per processor per IRQ */
3599+ value_now = (unsigned long) kstat_cpu(i).irqs[j];
3600+
3601+ /* Determine the activity per processor per IRQ */
3602+ delta = value_now - LAST_CPU_IRQ(i,j);
3603+
3604+ /* Update last_cpu_irq[][] for the next time */
3605+ LAST_CPU_IRQ(i,j) = value_now;
3606+
3607+ /* Ignore IRQs whose rate is less than the clock */
3608+ if (delta < useful_load_threshold)
3609+ continue;
3610+ /* update the load for the processor or package total */
3611+ IRQ_DELTA(package_index,j) += delta;
3612+
3613+ /* Keep track of the higher numbered sibling as well */
3614+ if (i != package_index)
3615+ CPU_IRQ(i) += delta;
3616+ /*
3617+ * We have sibling A and sibling B in the package
3618+ *
3619+ * cpu_irq[A] = load for cpu A + load for cpu B
3620+ * cpu_irq[B] = load for cpu B
3621+ */
3622+ CPU_IRQ(package_index) += delta;
3623+ }
3624+ }
3625+ /* Find the least loaded processor package */
3626+ for_each_online_cpu(i) {
3627+ if (i != CPU_TO_PACKAGEINDEX(i))
3628+ continue;
3629+ if (min_cpu_irq > CPU_IRQ(i)) {
3630+ min_cpu_irq = CPU_IRQ(i);
3631+ min_loaded = i;
3632+ }
3633+ }
3634+ max_cpu_irq = ULONG_MAX;
3635+
3636+tryanothercpu:
3637+ /* Look for heaviest loaded processor.
3638+ * We may come back to get the next heaviest loaded processor.
3639+ * Skip processors with trivial loads.
3640+ */
3641+ tmp_cpu_irq = 0;
3642+ tmp_loaded = -1;
3643+ for_each_online_cpu(i) {
3644+ if (i != CPU_TO_PACKAGEINDEX(i))
3645+ continue;
3646+ if (max_cpu_irq <= CPU_IRQ(i))
3647+ continue;
3648+ if (tmp_cpu_irq < CPU_IRQ(i)) {
3649+ tmp_cpu_irq = CPU_IRQ(i);
3650+ tmp_loaded = i;
3651+ }
3652+ }
3653+
3654+ if (tmp_loaded == -1) {
3655+ /* In the case of small number of heavy interrupt sources,
3656+ * loading some of the cpus too much. We use Ingo's original
3657+ * approach to rotate them around.
3658+ */
3659+ if (!first_attempt && imbalance >= useful_load_threshold) {
3660+ rotate_irqs_among_cpus(useful_load_threshold);
3661+ return;
3662+ }
3663+ goto not_worth_the_effort;
3664+ }
3665+
3666+ first_attempt = 0; /* heaviest search */
3667+ max_cpu_irq = tmp_cpu_irq; /* load */
3668+ max_loaded = tmp_loaded; /* processor */
3669+ imbalance = (max_cpu_irq - min_cpu_irq) / 2;
3670+
3671+ Dprintk("max_loaded cpu = %d\n", max_loaded);
3672+ Dprintk("min_loaded cpu = %d\n", min_loaded);
3673+ Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
3674+ Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
3675+ Dprintk("load imbalance = %lu\n", imbalance);
3676+
3677+ /* if imbalance is less than approx 10% of max load, then
3678+ * observe diminishing returns action. - quit
3679+ */
3680+ if (imbalance < (max_cpu_irq >> 3)) {
3681+ Dprintk("Imbalance too trivial\n");
3682+ goto not_worth_the_effort;
3683+ }
3684+
3685+tryanotherirq:
3686+ /* if we select an IRQ to move that can't go where we want, then
3687+ * see if there is another one to try.
3688+ */
3689+ move_this_load = 0;
3690+ selected_irq = -1;
3691+ for (j = 0; j < NR_IRQS; j++) {
3692+ /* Is this an active IRQ? */
3693+ if (!irq_desc[j].action)
3694+ continue;
3695+ if (imbalance <= IRQ_DELTA(max_loaded,j))
3696+ continue;
3697+ /* Try to find the IRQ that is closest to the imbalance
3698+ * without going over.
3699+ */
3700+ if (move_this_load < IRQ_DELTA(max_loaded,j)) {
3701+ move_this_load = IRQ_DELTA(max_loaded,j);
3702+ selected_irq = j;
3703+ }
3704+ }
3705+ if (selected_irq == -1) {
3706+ goto tryanothercpu;
3707+ }
3708+
3709+ imbalance = move_this_load;
3710+
3711+ /* For physical_balance case, we accumlated both load
3712+ * values in the one of the siblings cpu_irq[],
3713+ * to use the same code for physical and logical processors
3714+ * as much as possible.
3715+ *
3716+ * NOTE: the cpu_irq[] array holds the sum of the load for
3717+ * sibling A and sibling B in the slot for the lowest numbered
3718+ * sibling (A), _AND_ the load for sibling B in the slot for
3719+ * the higher numbered sibling.
3720+ *
3721+ * We seek the least loaded sibling by making the comparison
3722+ * (A+B)/2 vs B
3723+ */
3724+ load = CPU_IRQ(min_loaded) >> 1;
3725+ for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
3726+ if (load > CPU_IRQ(j)) {
3727+ /* This won't change cpu_sibling_map[min_loaded] */
3728+ load = CPU_IRQ(j);
3729+ min_loaded = j;
3730+ }
3731+ }
3732+
3733+ cpus_and(allowed_mask,
3734+ cpu_online_map,
3735+ balance_irq_affinity[selected_irq]);
3736+ target_cpu_mask = cpumask_of_cpu(min_loaded);
3737+ cpus_and(tmp, target_cpu_mask, allowed_mask);
3738+
3739+ if (!cpus_empty(tmp)) {
3740+
3741+ Dprintk("irq = %d moved to cpu = %d\n",
3742+ selected_irq, min_loaded);
3743+ /* mark for change destination */
3744+ set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
3745+
3746+ /* Since we made a change, come back sooner to
3747+ * check for more variation.
3748+ */
3749+ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
3750+ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
3751+ return;
3752+ }
3753+ goto tryanotherirq;
3754+
3755+not_worth_the_effort:
3756+ /*
3757+ * if we did not find an IRQ to move, then adjust the time interval
3758+ * upward
3759+ */
3760+ balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
3761+ balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
3762+ Dprintk("IRQ worth rotating not found\n");
3763+ return;
3764+}
3765+
3766+static int balanced_irq(void *unused)
3767+{
3768+ int i;
3769+ unsigned long prev_balance_time = jiffies;
3770+ long time_remaining = balanced_irq_interval;
3771+
3772+ daemonize("kirqd");
3773+
3774+ /* push everything to CPU 0 to give us a starting point. */
3775+ for (i = 0 ; i < NR_IRQS ; i++) {
3776+ irq_desc[i].pending_mask = cpumask_of_cpu(0);
3777+ set_pending_irq(i, cpumask_of_cpu(0));
3778+ }
3779+
3780+ for ( ; ; ) {
3781+ time_remaining = schedule_timeout_interruptible(time_remaining);
3782+ try_to_freeze();
3783+ if (time_after(jiffies,
3784+ prev_balance_time+balanced_irq_interval)) {
3785+ preempt_disable();
3786+ do_irq_balance();
3787+ prev_balance_time = jiffies;
3788+ time_remaining = balanced_irq_interval;
3789+ preempt_enable();
3790+ }
3791+ }
3792+ return 0;
3793+}
3794+
3795+static int __init balanced_irq_init(void)
3796+{
3797+ int i;
3798+ struct cpuinfo_x86 *c;
3799+ cpumask_t tmp;
3800+
3801+ cpus_shift_right(tmp, cpu_online_map, 2);
3802+ c = &boot_cpu_data;
3803+ /* When not overwritten by the command line ask subarchitecture. */
3804+ if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
3805+ irqbalance_disabled = NO_BALANCE_IRQ;
3806+ if (irqbalance_disabled)
3807+ return 0;
3808+
3809+ /* disable irqbalance completely if there is only one processor online */
3810+ if (num_online_cpus() < 2) {
3811+ irqbalance_disabled = 1;
3812+ return 0;
3813+ }
3814+ /*
3815+ * Enable physical balance only if more than 1 physical processor
3816+ * is present
3817+ */
3818+ if (smp_num_siblings > 1 && !cpus_empty(tmp))
3819+ physical_balance = 1;
3820+
3821+ for_each_online_cpu(i) {
3822+ irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
3823+ irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
3824+ if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
3825+ printk(KERN_ERR "balanced_irq_init: out of memory");
3826+ goto failed;
3827+ }
3828+ memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
3829+ memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
3830+ }
3831+
3832+ printk(KERN_INFO "Starting balanced_irq\n");
3833+ if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0)
3834+ return 0;
3835+ else
3836+ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
3837+failed:
3838+ for_each_possible_cpu(i) {
3839+ kfree(irq_cpu_data[i].irq_delta);
3840+ irq_cpu_data[i].irq_delta = NULL;
3841+ kfree(irq_cpu_data[i].last_irq);
3842+ irq_cpu_data[i].last_irq = NULL;
3843+ }
3844+ return 0;
3845+}
3846+
3847+int __init irqbalance_disable(char *str)
3848+{
3849+ irqbalance_disabled = 1;
3850+ return 1;
3851+}
3852+
3853+__setup("noirqbalance", irqbalance_disable);
3854+
3855+late_initcall(balanced_irq_init);
3856+#endif /* CONFIG_IRQBALANCE */
3857+#endif /* CONFIG_SMP */
3858+#endif
3859+
3860+#ifndef CONFIG_SMP
3861+void fastcall send_IPI_self(int vector)
3862+{
3863+#ifndef CONFIG_XEN
3864+ unsigned int cfg;
3865+
3866+ /*
3867+ * Wait for idle.
3868+ */
3869+ apic_wait_icr_idle();
3870+ cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
3871+ /*
3872+ * Send the IPI. The write to APIC_ICR fires this off.
3873+ */
3874+ apic_write_around(APIC_ICR, cfg);
3875+#endif
3876+}
3877+#endif /* !CONFIG_SMP */
3878+
3879+
3880+/*
3881+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
3882+ * specific CPU-side IRQs.
3883+ */
3884+
3885+#define MAX_PIRQS 8
3886+static int pirq_entries [MAX_PIRQS];
3887+static int pirqs_enabled;
3888+int skip_ioapic_setup;
3889+
3890+static int __init ioapic_setup(char *str)
3891+{
3892+ skip_ioapic_setup = 1;
3893+ return 1;
3894+}
3895+
3896+__setup("noapic", ioapic_setup);
3897+
3898+static int __init ioapic_pirq_setup(char *str)
3899+{
3900+ int i, max;
3901+ int ints[MAX_PIRQS+1];
3902+
3903+ get_options(str, ARRAY_SIZE(ints), ints);
3904+
3905+ for (i = 0; i < MAX_PIRQS; i++)
3906+ pirq_entries[i] = -1;
3907+
3908+ pirqs_enabled = 1;
3909+ apic_printk(APIC_VERBOSE, KERN_INFO
3910+ "PIRQ redirection, working around broken MP-BIOS.\n");
3911+ max = MAX_PIRQS;
3912+ if (ints[0] < MAX_PIRQS)
3913+ max = ints[0];
3914+
3915+ for (i = 0; i < max; i++) {
3916+ apic_printk(APIC_VERBOSE, KERN_DEBUG
3917+ "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
3918+ /*
3919+ * PIRQs are mapped upside down, usually.
3920+ */
3921+ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
3922+ }
3923+ return 1;
3924+}
3925+
3926+__setup("pirq=", ioapic_pirq_setup);
3927+
3928+/*
3929+ * Find the IRQ entry number of a certain pin.
3930+ */
3931+static int find_irq_entry(int apic, int pin, int type)
3932+{
3933+ int i;
3934+
3935+ for (i = 0; i < mp_irq_entries; i++)
3936+ if (mp_irqs[i].mpc_irqtype == type &&
3937+ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
3938+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
3939+ mp_irqs[i].mpc_dstirq == pin)
3940+ return i;
3941+
3942+ return -1;
3943+}
3944+
3945+/*
3946+ * Find the pin to which IRQ[irq] (ISA) is connected
3947+ */
3948+static int __init find_isa_irq_pin(int irq, int type)
3949+{
3950+ int i;
3951+
3952+ for (i = 0; i < mp_irq_entries; i++) {
3953+ int lbus = mp_irqs[i].mpc_srcbus;
3954+
3955+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
3956+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
3957+ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
3958+ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
3959+ ) &&
3960+ (mp_irqs[i].mpc_irqtype == type) &&
3961+ (mp_irqs[i].mpc_srcbusirq == irq))
3962+
3963+ return mp_irqs[i].mpc_dstirq;
3964+ }
3965+ return -1;
3966+}
3967+
3968+static int __init find_isa_irq_apic(int irq, int type)
3969+{
3970+ int i;
3971+
3972+ for (i = 0; i < mp_irq_entries; i++) {
3973+ int lbus = mp_irqs[i].mpc_srcbus;
3974+
3975+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
3976+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
3977+ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
3978+ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
3979+ ) &&
3980+ (mp_irqs[i].mpc_irqtype == type) &&
3981+ (mp_irqs[i].mpc_srcbusirq == irq))
3982+ break;
3983+ }
3984+ if (i < mp_irq_entries) {
3985+ int apic;
3986+ for(apic = 0; apic < nr_ioapics; apic++) {
3987+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
3988+ return apic;
3989+ }
3990+ }
3991+
3992+ return -1;
3993+}
3994+
3995+/*
3996+ * Find a specific PCI IRQ entry.
3997+ * Not an __init, possibly needed by modules
3998+ */
3999+static int pin_2_irq(int idx, int apic, int pin);
4000+
4001+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
4002+{
4003+ int apic, i, best_guess = -1;
4004+
4005+ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
4006+ "slot:%d, pin:%d.\n", bus, slot, pin);
4007+ if (mp_bus_id_to_pci_bus[bus] == -1) {
4008+ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
4009+ return -1;
4010+ }
4011+ for (i = 0; i < mp_irq_entries; i++) {
4012+ int lbus = mp_irqs[i].mpc_srcbus;
4013+
4014+ for (apic = 0; apic < nr_ioapics; apic++)
4015+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
4016+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
4017+ break;
4018+
4019+ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
4020+ !mp_irqs[i].mpc_irqtype &&
4021+ (bus == lbus) &&
4022+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
4023+ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
4024+
4025+ if (!(apic || IO_APIC_IRQ(irq)))
4026+ continue;
4027+
4028+ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
4029+ return irq;
4030+ /*
4031+ * Use the first all-but-pin matching entry as a
4032+ * best-guess fuzzy result for broken mptables.
4033+ */
4034+ if (best_guess < 0)
4035+ best_guess = irq;
4036+ }
4037+ }
4038+ return best_guess;
4039+}
4040+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
4041+
4042+/*
4043+ * This function currently is only a helper for the i386 smp boot process where
4044+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
4045+ * so mask in all cases should simply be TARGET_CPUS
4046+ */
4047+#ifdef CONFIG_SMP
4048+#ifndef CONFIG_XEN
4049+void __init setup_ioapic_dest(void)
4050+{
4051+ int pin, ioapic, irq, irq_entry;
4052+
4053+ if (skip_ioapic_setup == 1)
4054+ return;
4055+
4056+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
4057+ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
4058+ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
4059+ if (irq_entry == -1)
4060+ continue;
4061+ irq = pin_2_irq(irq_entry, ioapic, pin);
4062+ set_ioapic_affinity_irq(irq, TARGET_CPUS);
4063+ }
4064+
4065+ }
4066+}
4067+#endif /* !CONFIG_XEN */
4068+#endif
4069+
4070+/*
4071+ * EISA Edge/Level control register, ELCR
4072+ */
4073+static int EISA_ELCR(unsigned int irq)
4074+{
4075+ if (irq < 16) {
4076+ unsigned int port = 0x4d0 + (irq >> 3);
4077+ return (inb(port) >> (irq & 7)) & 1;
4078+ }
4079+ apic_printk(APIC_VERBOSE, KERN_INFO
4080+ "Broken MPtable reports ISA irq %d\n", irq);
4081+ return 0;
4082+}
4083+
4084+/* EISA interrupts are always polarity zero and can be edge or level
4085+ * trigger depending on the ELCR value. If an interrupt is listed as
4086+ * EISA conforming in the MP table, that means its trigger type must
4087+ * be read in from the ELCR */
4088+
4089+#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
4090+#define default_EISA_polarity(idx) (0)
4091+
4092+/* ISA interrupts are always polarity zero edge triggered,
4093+ * when listed as conforming in the MP table. */
4094+
4095+#define default_ISA_trigger(idx) (0)
4096+#define default_ISA_polarity(idx) (0)
4097+
4098+/* PCI interrupts are always polarity one level triggered,
4099+ * when listed as conforming in the MP table. */
4100+
4101+#define default_PCI_trigger(idx) (1)
4102+#define default_PCI_polarity(idx) (1)
4103+
4104+/* MCA interrupts are always polarity zero level triggered,
4105+ * when listed as conforming in the MP table. */
4106+
4107+#define default_MCA_trigger(idx) (1)
4108+#define default_MCA_polarity(idx) (0)
4109+
4110+/* NEC98 interrupts are always polarity zero edge triggered,
4111+ * when listed as conforming in the MP table. */
4112+
4113+#define default_NEC98_trigger(idx) (0)
4114+#define default_NEC98_polarity(idx) (0)
4115+
4116+static int __init MPBIOS_polarity(int idx)
4117+{
4118+ int bus = mp_irqs[idx].mpc_srcbus;
4119+ int polarity;
4120+
4121+ /*
4122+ * Determine IRQ line polarity (high active or low active):
4123+ */
4124+ switch (mp_irqs[idx].mpc_irqflag & 3)
4125+ {
4126+ case 0: /* conforms, ie. bus-type dependent polarity */
4127+ {
4128+ switch (mp_bus_id_to_type[bus])
4129+ {
4130+ case MP_BUS_ISA: /* ISA pin */
4131+ {
4132+ polarity = default_ISA_polarity(idx);
4133+ break;
4134+ }
4135+ case MP_BUS_EISA: /* EISA pin */
4136+ {
4137+ polarity = default_EISA_polarity(idx);
4138+ break;
4139+ }
4140+ case MP_BUS_PCI: /* PCI pin */
4141+ {
4142+ polarity = default_PCI_polarity(idx);
4143+ break;
4144+ }
4145+ case MP_BUS_MCA: /* MCA pin */
4146+ {
4147+ polarity = default_MCA_polarity(idx);
4148+ break;
4149+ }
4150+ case MP_BUS_NEC98: /* NEC 98 pin */
4151+ {
4152+ polarity = default_NEC98_polarity(idx);
4153+ break;
4154+ }
4155+ default:
4156+ {
4157+ printk(KERN_WARNING "broken BIOS!!\n");
4158+ polarity = 1;
4159+ break;
4160+ }
4161+ }
4162+ break;
4163+ }
4164+ case 1: /* high active */
4165+ {
4166+ polarity = 0;
4167+ break;
4168+ }
4169+ case 2: /* reserved */
4170+ {
4171+ printk(KERN_WARNING "broken BIOS!!\n");
4172+ polarity = 1;
4173+ break;
4174+ }
4175+ case 3: /* low active */
4176+ {
4177+ polarity = 1;
4178+ break;
4179+ }
4180+ default: /* invalid */
4181+ {
4182+ printk(KERN_WARNING "broken BIOS!!\n");
4183+ polarity = 1;
4184+ break;
4185+ }
4186+ }
4187+ return polarity;
4188+}
4189+
4190+static int MPBIOS_trigger(int idx)
4191+{
4192+ int bus = mp_irqs[idx].mpc_srcbus;
4193+ int trigger;
4194+
4195+ /*
4196+ * Determine IRQ trigger mode (edge or level sensitive):
4197+ */
4198+ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
4199+ {
4200+ case 0: /* conforms, ie. bus-type dependent */
4201+ {
4202+ switch (mp_bus_id_to_type[bus])
4203+ {
4204+ case MP_BUS_ISA: /* ISA pin */
4205+ {
4206+ trigger = default_ISA_trigger(idx);
4207+ break;
4208+ }
4209+ case MP_BUS_EISA: /* EISA pin */
4210+ {
4211+ trigger = default_EISA_trigger(idx);
4212+ break;
4213+ }
4214+ case MP_BUS_PCI: /* PCI pin */
4215+ {
4216+ trigger = default_PCI_trigger(idx);
4217+ break;
4218+ }
4219+ case MP_BUS_MCA: /* MCA pin */
4220+ {
4221+ trigger = default_MCA_trigger(idx);
4222+ break;
4223+ }
4224+ case MP_BUS_NEC98: /* NEC 98 pin */
4225+ {
4226+ trigger = default_NEC98_trigger(idx);
4227+ break;
4228+ }
4229+ default:
4230+ {
4231+ printk(KERN_WARNING "broken BIOS!!\n");
4232+ trigger = 1;
4233+ break;
4234+ }
4235+ }
4236+ break;
4237+ }
4238+ case 1: /* edge */
4239+ {
4240+ trigger = 0;
4241+ break;
4242+ }
4243+ case 2: /* reserved */
4244+ {
4245+ printk(KERN_WARNING "broken BIOS!!\n");
4246+ trigger = 1;
4247+ break;
4248+ }
4249+ case 3: /* level */
4250+ {
4251+ trigger = 1;
4252+ break;
4253+ }
4254+ default: /* invalid */
4255+ {
4256+ printk(KERN_WARNING "broken BIOS!!\n");
4257+ trigger = 0;
4258+ break;
4259+ }
4260+ }
4261+ return trigger;
4262+}
4263+
4264+static inline int irq_polarity(int idx)
4265+{
4266+ return MPBIOS_polarity(idx);
4267+}
4268+
4269+static inline int irq_trigger(int idx)
4270+{
4271+ return MPBIOS_trigger(idx);
4272+}
4273+
4274+static int pin_2_irq(int idx, int apic, int pin)
4275+{
4276+ int irq, i;
4277+ int bus = mp_irqs[idx].mpc_srcbus;
4278+
4279+ /*
4280+ * Debugging check, we are in big trouble if this message pops up!
4281+ */
4282+ if (mp_irqs[idx].mpc_dstirq != pin)
4283+ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
4284+
4285+ switch (mp_bus_id_to_type[bus])
4286+ {
4287+ case MP_BUS_ISA: /* ISA pin */
4288+ case MP_BUS_EISA:
4289+ case MP_BUS_MCA:
4290+ case MP_BUS_NEC98:
4291+ {
4292+ irq = mp_irqs[idx].mpc_srcbusirq;
4293+ break;
4294+ }
4295+ case MP_BUS_PCI: /* PCI pin */
4296+ {
4297+ /*
4298+ * PCI IRQs are mapped in order
4299+ */
4300+ i = irq = 0;
4301+ while (i < apic)
4302+ irq += nr_ioapic_registers[i++];
4303+ irq += pin;
4304+
4305+ /*
4306+ * For MPS mode, so far only needed by ES7000 platform
4307+ */
4308+ if (ioapic_renumber_irq)
4309+ irq = ioapic_renumber_irq(apic, irq);
4310+
4311+ break;
4312+ }
4313+ default:
4314+ {
4315+ printk(KERN_ERR "unknown bus type %d.\n",bus);
4316+ irq = 0;
4317+ break;
4318+ }
4319+ }
4320+
4321+ /*
4322+ * PCI IRQ command line redirection. Yes, limits are hardcoded.
4323+ */
4324+ if ((pin >= 16) && (pin <= 23)) {
4325+ if (pirq_entries[pin-16] != -1) {
4326+ if (!pirq_entries[pin-16]) {
4327+ apic_printk(APIC_VERBOSE, KERN_DEBUG
4328+ "disabling PIRQ%d\n", pin-16);
4329+ } else {
4330+ irq = pirq_entries[pin-16];
4331+ apic_printk(APIC_VERBOSE, KERN_DEBUG
4332+ "using PIRQ%d -> IRQ %d\n",
4333+ pin-16, irq);
4334+ }
4335+ }
4336+ }
4337+ return irq;
4338+}
4339+
4340+static inline int IO_APIC_irq_trigger(int irq)
4341+{
4342+ int apic, idx, pin;
4343+
4344+ for (apic = 0; apic < nr_ioapics; apic++) {
4345+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
4346+ idx = find_irq_entry(apic,pin,mp_INT);
4347+ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
4348+ return irq_trigger(idx);
4349+ }
4350+ }
4351+ /*
4352+ * nonexistent IRQs are edge default
4353+ */
4354+ return 0;
4355+}
4356+
4357+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
4358+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
4359+
4360+int assign_irq_vector(int irq)
4361+{
4362+ unsigned long flags;
4363+ int vector;
4364+ struct physdev_irq irq_op;
4365+
4366+ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
4367+
4368+ if (irq < PIRQ_BASE || irq - PIRQ_BASE > NR_PIRQS)
4369+ return -EINVAL;
4370+
4371+ spin_lock_irqsave(&vector_lock, flags);
4372+
4373+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
4374+ spin_unlock_irqrestore(&vector_lock, flags);
4375+ return IO_APIC_VECTOR(irq);
4376+ }
4377+
4378+ irq_op.irq = irq;
4379+ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
4380+ spin_unlock_irqrestore(&vector_lock, flags);
4381+ return -ENOSPC;
4382+ }
4383+
4384+ vector = irq_op.vector;
4385+ vector_irq[vector] = irq;
4386+ if (irq != AUTO_ASSIGN)
4387+ IO_APIC_VECTOR(irq) = vector;
4388+
4389+ spin_unlock_irqrestore(&vector_lock, flags);
4390+
4391+ return vector;
4392+}
4393+
4394+#ifndef CONFIG_XEN
4395+static struct hw_interrupt_type ioapic_level_type;
4396+static struct hw_interrupt_type ioapic_edge_type;
4397+
4398+#define IOAPIC_AUTO -1
4399+#define IOAPIC_EDGE 0
4400+#define IOAPIC_LEVEL 1
4401+
4402+static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
4403+{
4404+ unsigned idx;
4405+
4406+ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
4407+
4408+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
4409+ trigger == IOAPIC_LEVEL)
4410+ irq_desc[idx].chip = &ioapic_level_type;
4411+ else
4412+ irq_desc[idx].chip = &ioapic_edge_type;
4413+ set_intr_gate(vector, interrupt[idx]);
4414+}
4415+#else
4416+#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq)
4417+#endif
4418+
4419+static void __init setup_IO_APIC_irqs(void)
4420+{
4421+ struct IO_APIC_route_entry entry;
4422+ int apic, pin, idx, irq, first_notcon = 1, vector;
4423+ unsigned long flags;
4424+
4425+ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
4426+
4427+ for (apic = 0; apic < nr_ioapics; apic++) {
4428+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
4429+
4430+ /*
4431+ * add it to the IO-APIC irq-routing table:
4432+ */
4433+ memset(&entry,0,sizeof(entry));
4434+
4435+ entry.delivery_mode = INT_DELIVERY_MODE;
4436+ entry.dest_mode = INT_DEST_MODE;
4437+ entry.mask = 0; /* enable IRQ */
4438+ entry.dest.logical.logical_dest =
4439+ cpu_mask_to_apicid(TARGET_CPUS);
4440+
4441+ idx = find_irq_entry(apic,pin,mp_INT);
4442+ if (idx == -1) {
4443+ if (first_notcon) {
4444+ apic_printk(APIC_VERBOSE, KERN_DEBUG
4445+ " IO-APIC (apicid-pin) %d-%d",
4446+ mp_ioapics[apic].mpc_apicid,
4447+ pin);
4448+ first_notcon = 0;
4449+ } else
4450+ apic_printk(APIC_VERBOSE, ", %d-%d",
4451+ mp_ioapics[apic].mpc_apicid, pin);
4452+ continue;
4453+ }
4454+
4455+ entry.trigger = irq_trigger(idx);
4456+ entry.polarity = irq_polarity(idx);
4457+
4458+ if (irq_trigger(idx)) {
4459+ entry.trigger = 1;
4460+ entry.mask = 1;
4461+ }
4462+
4463+ irq = pin_2_irq(idx, apic, pin);
4464+ /*
4465+ * skip adding the timer int on secondary nodes, which causes
4466+ * a small but painful rift in the time-space continuum
4467+ */
4468+ if (multi_timer_check(apic, irq))
4469+ continue;
4470+ else
4471+ add_pin_to_irq(irq, apic, pin);
4472+
4473+ if (/*!apic &&*/ !IO_APIC_IRQ(irq))
4474+ continue;
4475+
4476+ if (IO_APIC_IRQ(irq)) {
4477+ vector = assign_irq_vector(irq);
4478+ entry.vector = vector;
4479+ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
4480+
4481+ if (!apic && (irq < 16))
4482+ disable_8259A_irq(irq);
4483+ }
4484+ spin_lock_irqsave(&ioapic_lock, flags);
4485+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
4486+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
4487+ set_native_irq_info(irq, TARGET_CPUS);
4488+ spin_unlock_irqrestore(&ioapic_lock, flags);
4489+ }
4490+ }
4491+
4492+ if (!first_notcon)
4493+ apic_printk(APIC_VERBOSE, " not connected.\n");
4494+}
4495+
4496+/*
4497+ * Set up the 8259A-master output pin:
4498+ */
4499+#ifndef CONFIG_XEN
4500+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
4501+{
4502+ struct IO_APIC_route_entry entry;
4503+ unsigned long flags;
4504+
4505+ memset(&entry,0,sizeof(entry));
4506+
4507+ disable_8259A_irq(0);
4508+
4509+ /* mask LVT0 */
4510+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
4511+
4512+ /*
4513+ * We use logical delivery to get the timer IRQ
4514+ * to the first CPU.
4515+ */
4516+ entry.dest_mode = INT_DEST_MODE;
4517+ entry.mask = 0; /* unmask IRQ now */
4518+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
4519+ entry.delivery_mode = INT_DELIVERY_MODE;
4520+ entry.polarity = 0;
4521+ entry.trigger = 0;
4522+ entry.vector = vector;
4523+
4524+ /*
4525+ * The timer IRQ doesn't have to know that behind the
4526+ * scene we have a 8259A-master in AEOI mode ...
4527+ */
4528+ irq_desc[0].chip = &ioapic_edge_type;
4529+
4530+ /*
4531+ * Add it to the IO-APIC irq-routing table:
4532+ */
4533+ spin_lock_irqsave(&ioapic_lock, flags);
4534+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
4535+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
4536+ spin_unlock_irqrestore(&ioapic_lock, flags);
4537+
4538+ enable_8259A_irq(0);
4539+}
4540+
4541+static inline void UNEXPECTED_IO_APIC(void)
4542+{
4543+}
4544+
4545+void __init print_IO_APIC(void)
4546+{
4547+ int apic, i;
4548+ union IO_APIC_reg_00 reg_00;
4549+ union IO_APIC_reg_01 reg_01;
4550+ union IO_APIC_reg_02 reg_02;
4551+ union IO_APIC_reg_03 reg_03;
4552+ unsigned long flags;
4553+
4554+ if (apic_verbosity == APIC_QUIET)
4555+ return;
4556+
4557+ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
4558+ for (i = 0; i < nr_ioapics; i++)
4559+ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
4560+ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
4561+
4562+ /*
4563+ * We are a bit conservative about what we expect. We have to
4564+ * know about every hardware change ASAP.
4565+ */
4566+ printk(KERN_INFO "testing the IO APIC.......................\n");
4567+
4568+ for (apic = 0; apic < nr_ioapics; apic++) {
4569+
4570+ spin_lock_irqsave(&ioapic_lock, flags);
4571+ reg_00.raw = io_apic_read(apic, 0);
4572+ reg_01.raw = io_apic_read(apic, 1);
4573+ if (reg_01.bits.version >= 0x10)
4574+ reg_02.raw = io_apic_read(apic, 2);
4575+ if (reg_01.bits.version >= 0x20)
4576+ reg_03.raw = io_apic_read(apic, 3);
4577+ spin_unlock_irqrestore(&ioapic_lock, flags);
4578+
4579+ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
4580+ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
4581+ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
4582+ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
4583+ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
4584+ if (reg_00.bits.ID >= get_physical_broadcast())
4585+ UNEXPECTED_IO_APIC();
4586+ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
4587+ UNEXPECTED_IO_APIC();
4588+
4589+ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
4590+ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
4591+ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
4592+ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
4593+ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
4594+ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
4595+ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
4596+ (reg_01.bits.entries != 0x2E) &&
4597+ (reg_01.bits.entries != 0x3F)
4598+ )
4599+ UNEXPECTED_IO_APIC();
4600+
4601+ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
4602+ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
4603+ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
4604+ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
4605+ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
4606+ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
4607+ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
4608+ )
4609+ UNEXPECTED_IO_APIC();
4610+ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
4611+ UNEXPECTED_IO_APIC();
4612+
4613+ /*
4614+ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
4615+ * but the value of reg_02 is read as the previous read register
4616+ * value, so ignore it if reg_02 == reg_01.
4617+ */
4618+ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
4619+ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
4620+ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
4621+ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
4622+ UNEXPECTED_IO_APIC();
4623+ }
4624+
4625+ /*
4626+ * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
4627+ * or reg_03, but the value of reg_0[23] is read as the previous read
4628+ * register value, so ignore it if reg_03 == reg_0[12].
4629+ */
4630+ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
4631+ reg_03.raw != reg_01.raw) {
4632+ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
4633+ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
4634+ if (reg_03.bits.__reserved_1)
4635+ UNEXPECTED_IO_APIC();
4636+ }
4637+
4638+ printk(KERN_DEBUG ".... IRQ redirection table:\n");
4639+
4640+ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
4641+ " Stat Dest Deli Vect: \n");
4642+
4643+ for (i = 0; i <= reg_01.bits.entries; i++) {
4644+ struct IO_APIC_route_entry entry;
4645+
4646+ spin_lock_irqsave(&ioapic_lock, flags);
4647+ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
4648+ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
4649+ spin_unlock_irqrestore(&ioapic_lock, flags);
4650+
4651+ printk(KERN_DEBUG " %02x %03X %02X ",
4652+ i,
4653+ entry.dest.logical.logical_dest,
4654+ entry.dest.physical.physical_dest
4655+ );
4656+
4657+ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
4658+ entry.mask,
4659+ entry.trigger,
4660+ entry.irr,
4661+ entry.polarity,
4662+ entry.delivery_status,
4663+ entry.dest_mode,
4664+ entry.delivery_mode,
4665+ entry.vector
4666+ );
4667+ }
4668+ }
4669+ if (use_pci_vector())
4670+ printk(KERN_INFO "Using vector-based indexing\n");
4671+ printk(KERN_DEBUG "IRQ to pin mappings:\n");
4672+ for (i = 0; i < NR_IRQS; i++) {
4673+ struct irq_pin_list *entry = irq_2_pin + i;
4674+ if (entry->pin < 0)
4675+ continue;
4676+ if (use_pci_vector() && !platform_legacy_irq(i))
4677+ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
4678+ else
4679+ printk(KERN_DEBUG "IRQ%d ", i);
4680+ for (;;) {
4681+ printk("-> %d:%d", entry->apic, entry->pin);
4682+ if (!entry->next)
4683+ break;
4684+ entry = irq_2_pin + entry->next;
4685+ }
4686+ printk("\n");
4687+ }
4688+
4689+ printk(KERN_INFO ".................................... done.\n");
4690+
4691+ return;
4692+}
4693+
4694+static void print_APIC_bitfield (int base)
4695+{
4696+ unsigned int v;
4697+ int i, j;
4698+
4699+ if (apic_verbosity == APIC_QUIET)
4700+ return;
4701+
4702+ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
4703+ for (i = 0; i < 8; i++) {
4704+ v = apic_read(base + i*0x10);
4705+ for (j = 0; j < 32; j++) {
4706+ if (v & (1<<j))
4707+ printk("1");
4708+ else
4709+ printk("0");
4710+ }
4711+ printk("\n");
4712+ }
4713+}
4714+
4715+void /*__init*/ print_local_APIC(void * dummy)
4716+{
4717+ unsigned int v, ver, maxlvt;
4718+
4719+ if (apic_verbosity == APIC_QUIET)
4720+ return;
4721+
4722+ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
4723+ smp_processor_id(), hard_smp_processor_id());
4724+ v = apic_read(APIC_ID);
4725+ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
4726+ v = apic_read(APIC_LVR);
4727+ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
4728+ ver = GET_APIC_VERSION(v);
4729+ maxlvt = get_maxlvt();
4730+
4731+ v = apic_read(APIC_TASKPRI);
4732+ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
4733+
4734+ if (APIC_INTEGRATED(ver)) { /* !82489DX */
4735+ v = apic_read(APIC_ARBPRI);
4736+ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
4737+ v & APIC_ARBPRI_MASK);
4738+ v = apic_read(APIC_PROCPRI);
4739+ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
4740+ }
4741+
4742+ v = apic_read(APIC_EOI);
4743+ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
4744+ v = apic_read(APIC_RRR);
4745+ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
4746+ v = apic_read(APIC_LDR);
4747+ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
4748+ v = apic_read(APIC_DFR);
4749+ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
4750+ v = apic_read(APIC_SPIV);
4751+ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
4752+
4753+ printk(KERN_DEBUG "... APIC ISR field:\n");
4754+ print_APIC_bitfield(APIC_ISR);
4755+ printk(KERN_DEBUG "... APIC TMR field:\n");
4756+ print_APIC_bitfield(APIC_TMR);
4757+ printk(KERN_DEBUG "... APIC IRR field:\n");
4758+ print_APIC_bitfield(APIC_IRR);
4759+
4760+ if (APIC_INTEGRATED(ver)) { /* !82489DX */
4761+ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
4762+ apic_write(APIC_ESR, 0);
4763+ v = apic_read(APIC_ESR);
4764+ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
4765+ }
4766+
4767+ v = apic_read(APIC_ICR);
4768+ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
4769+ v = apic_read(APIC_ICR2);
4770+ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
4771+
4772+ v = apic_read(APIC_LVTT);
4773+ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
4774+
4775+ if (maxlvt > 3) { /* PC is LVT#4. */
4776+ v = apic_read(APIC_LVTPC);
4777+ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
4778+ }
4779+ v = apic_read(APIC_LVT0);
4780+ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
4781+ v = apic_read(APIC_LVT1);
4782+ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
4783+
4784+ if (maxlvt > 2) { /* ERR is LVT#3. */
4785+ v = apic_read(APIC_LVTERR);
4786+ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
4787+ }
4788+
4789+ v = apic_read(APIC_TMICT);
4790+ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
4791+ v = apic_read(APIC_TMCCT);
4792+ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
4793+ v = apic_read(APIC_TDCR);
4794+ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
4795+ printk("\n");
4796+}
4797+
4798+void print_all_local_APICs (void)
4799+{
4800+ on_each_cpu(print_local_APIC, NULL, 1, 1);
4801+}
4802+
4803+void /*__init*/ print_PIC(void)
4804+{
4805+ unsigned int v;
4806+ unsigned long flags;
4807+
4808+ if (apic_verbosity == APIC_QUIET)
4809+ return;
4810+
4811+ printk(KERN_DEBUG "\nprinting PIC contents\n");
4812+
4813+ spin_lock_irqsave(&i8259A_lock, flags);
4814+
4815+ v = inb(0xa1) << 8 | inb(0x21);
4816+ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
4817+
4818+ v = inb(0xa0) << 8 | inb(0x20);
4819+ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
4820+
4821+ outb(0x0b,0xa0);
4822+ outb(0x0b,0x20);
4823+ v = inb(0xa0) << 8 | inb(0x20);
4824+ outb(0x0a,0xa0);
4825+ outb(0x0a,0x20);
4826+
4827+ spin_unlock_irqrestore(&i8259A_lock, flags);
4828+
4829+ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
4830+
4831+ v = inb(0x4d1) << 8 | inb(0x4d0);
4832+ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
4833+}
4834+#endif /* !CONFIG_XEN */
4835+
4836+static void __init enable_IO_APIC(void)
4837+{
4838+ union IO_APIC_reg_01 reg_01;
4839+ int i8259_apic, i8259_pin;
4840+ int i, apic;
4841+ unsigned long flags;
4842+
4843+ for (i = 0; i < PIN_MAP_SIZE; i++) {
4844+ irq_2_pin[i].pin = -1;
4845+ irq_2_pin[i].next = 0;
4846+ }
4847+ if (!pirqs_enabled)
4848+ for (i = 0; i < MAX_PIRQS; i++)
4849+ pirq_entries[i] = -1;
4850+
4851+ /*
4852+ * The number of IO-APIC IRQ registers (== #pins):
4853+ */
4854+ for (apic = 0; apic < nr_ioapics; apic++) {
4855+ spin_lock_irqsave(&ioapic_lock, flags);
4856+ reg_01.raw = io_apic_read(apic, 1);
4857+ spin_unlock_irqrestore(&ioapic_lock, flags);
4858+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
4859+ }
4860+ for(apic = 0; apic < nr_ioapics; apic++) {
4861+ int pin;
4862+ /* See if any of the pins is in ExtINT mode */
4863+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
4864+ struct IO_APIC_route_entry entry;
4865+ spin_lock_irqsave(&ioapic_lock, flags);
4866+ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
4867+ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
4868+ spin_unlock_irqrestore(&ioapic_lock, flags);
4869+
4870+
4871+ /* If the interrupt line is enabled and in ExtInt mode
4872+ * I have found the pin where the i8259 is connected.
4873+ */
4874+ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
4875+ ioapic_i8259.apic = apic;
4876+ ioapic_i8259.pin = pin;
4877+ goto found_i8259;
4878+ }
4879+ }
4880+ }
4881+ found_i8259:
4882+ /* Look to see what if the MP table has reported the ExtINT */
4883+ /* If we could not find the appropriate pin by looking at the ioapic
4884+ * the i8259 probably is not connected the ioapic but give the
4885+ * mptable a chance anyway.
4886+ */
4887+ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
4888+ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
4889+ /* Trust the MP table if nothing is setup in the hardware */
4890+ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
4891+ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
4892+ ioapic_i8259.pin = i8259_pin;
4893+ ioapic_i8259.apic = i8259_apic;
4894+ }
4895+ /* Complain if the MP table and the hardware disagree */
4896+ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
4897+ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
4898+ {
4899+ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
4900+ }
4901+
4902+ /*
4903+ * Do not trust the IO-APIC being empty at bootup
4904+ */
4905+ clear_IO_APIC();
4906+}
4907+
4908+/*
4909+ * Not an __init, needed by the reboot code
4910+ */
4911+void disable_IO_APIC(void)
4912+{
4913+ /*
4914+ * Clear the IO-APIC before rebooting:
4915+ */
4916+ clear_IO_APIC();
4917+
4918+#ifndef CONFIG_XEN
4919+ /*
4920+ * If the i8259 is routed through an IOAPIC
4921+ * Put that IOAPIC in virtual wire mode
4922+ * so legacy interrupts can be delivered.
4923+ */
4924+ if (ioapic_i8259.pin != -1) {
4925+ struct IO_APIC_route_entry entry;
4926+ unsigned long flags;
4927+
4928+ memset(&entry, 0, sizeof(entry));
4929+ entry.mask = 0; /* Enabled */
4930+ entry.trigger = 0; /* Edge */
4931+ entry.irr = 0;
4932+ entry.polarity = 0; /* High */
4933+ entry.delivery_status = 0;
4934+ entry.dest_mode = 0; /* Physical */
4935+ entry.delivery_mode = dest_ExtINT; /* ExtInt */
4936+ entry.vector = 0;
4937+ entry.dest.physical.physical_dest =
4938+ GET_APIC_ID(apic_read(APIC_ID));
4939+
4940+ /*
4941+ * Add it to the IO-APIC irq-routing table:
4942+ */
4943+ spin_lock_irqsave(&ioapic_lock, flags);
4944+ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
4945+ *(((int *)&entry)+1));
4946+ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
4947+ *(((int *)&entry)+0));
4948+ spin_unlock_irqrestore(&ioapic_lock, flags);
4949+ }
4950+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
4951+#endif
4952+}
4953+
4954+/*
4955+ * function to set the IO-APIC physical IDs based on the
4956+ * values stored in the MPC table.
4957+ *
4958+ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
4959+ */
4960+
4961+#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
4962+static void __init setup_ioapic_ids_from_mpc(void)
4963+{
4964+ union IO_APIC_reg_00 reg_00;
4965+ physid_mask_t phys_id_present_map;
4966+ int apic;
4967+ int i;
4968+ unsigned char old_id;
4969+ unsigned long flags;
4970+
4971+ /*
4972+ * Don't check I/O APIC IDs for xAPIC systems. They have
4973+ * no meaning without the serial APIC bus.
4974+ */
4975+ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
4976+ || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
4977+ return;
4978+ /*
4979+ * This is broken; anything with a real cpu count has to
4980+ * circumvent this idiocy regardless.
4981+ */
4982+ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
4983+
4984+ /*
4985+ * Set the IOAPIC ID to the value stored in the MPC table.
4986+ */
4987+ for (apic = 0; apic < nr_ioapics; apic++) {
4988+
4989+ /* Read the register 0 value */
4990+ spin_lock_irqsave(&ioapic_lock, flags);
4991+ reg_00.raw = io_apic_read(apic, 0);
4992+ spin_unlock_irqrestore(&ioapic_lock, flags);
4993+
4994+ old_id = mp_ioapics[apic].mpc_apicid;
4995+
4996+ if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
4997+ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
4998+ apic, mp_ioapics[apic].mpc_apicid);
4999+ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
5000+ reg_00.bits.ID);
5001+ mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
5002+ }
5003+
5004+ /*
5005+ * Sanity check, is the ID really free? Every APIC in a
5006+ * system must have a unique ID or we get lots of nice
5007+ * 'stuck on smp_invalidate_needed IPI wait' messages.
5008+ */
5009+ if (check_apicid_used(phys_id_present_map,
5010+ mp_ioapics[apic].mpc_apicid)) {
5011+ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
5012+ apic, mp_ioapics[apic].mpc_apicid);
5013+ for (i = 0; i < get_physical_broadcast(); i++)
5014+ if (!physid_isset(i, phys_id_present_map))
5015+ break;
5016+ if (i >= get_physical_broadcast())
5017+ panic("Max APIC ID exceeded!\n");
5018+ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
5019+ i);
5020+ physid_set(i, phys_id_present_map);
5021+ mp_ioapics[apic].mpc_apicid = i;
5022+ } else {
5023+ physid_mask_t tmp;
5024+ tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
5025+ apic_printk(APIC_VERBOSE, "Setting %d in the "
5026+ "phys_id_present_map\n",
5027+ mp_ioapics[apic].mpc_apicid);
5028+ physids_or(phys_id_present_map, phys_id_present_map, tmp);
5029+ }
5030+
5031+
5032+ /*
5033+ * We need to adjust the IRQ routing table
5034+ * if the ID changed.
5035+ */
5036+ if (old_id != mp_ioapics[apic].mpc_apicid)
5037+ for (i = 0; i < mp_irq_entries; i++)
5038+ if (mp_irqs[i].mpc_dstapic == old_id)
5039+ mp_irqs[i].mpc_dstapic
5040+ = mp_ioapics[apic].mpc_apicid;
5041+
5042+ /*
5043+ * Read the right value from the MPC table and
5044+ * write it into the ID register.
5045+ */
5046+ apic_printk(APIC_VERBOSE, KERN_INFO
5047+ "...changing IO-APIC physical APIC ID to %d ...",
5048+ mp_ioapics[apic].mpc_apicid);
5049+
5050+ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
5051+ spin_lock_irqsave(&ioapic_lock, flags);
5052+ io_apic_write(apic, 0, reg_00.raw);
5053+ spin_unlock_irqrestore(&ioapic_lock, flags);
5054+
5055+ /*
5056+ * Sanity check
5057+ */
5058+ spin_lock_irqsave(&ioapic_lock, flags);
5059+ reg_00.raw = io_apic_read(apic, 0);
5060+ spin_unlock_irqrestore(&ioapic_lock, flags);
5061+ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
5062+ printk("could not set ID!\n");
5063+ else
5064+ apic_printk(APIC_VERBOSE, " ok.\n");
5065+ }
5066+}
5067+#else
5068+static void __init setup_ioapic_ids_from_mpc(void) { }
5069+#endif
5070+
5071+#ifndef CONFIG_XEN
5072+/*
5073+ * There is a nasty bug in some older SMP boards, their mptable lies
5074+ * about the timer IRQ. We do the following to work around the situation:
5075+ *
5076+ * - timer IRQ defaults to IO-APIC IRQ
5077+ * - if this function detects that timer IRQs are defunct, then we fall
5078+ * back to ISA timer IRQs
5079+ */
5080+static int __init timer_irq_works(void)
5081+{
5082+ unsigned long t1 = jiffies;
5083+
5084+ local_irq_enable();
5085+ /* Let ten ticks pass... */
5086+ mdelay((10 * 1000) / HZ);
5087+
5088+ /*
5089+ * Expect a few ticks at least, to be sure some possible
5090+ * glue logic does not lock up after one or two first
5091+ * ticks in a non-ExtINT mode. Also the local APIC
5092+ * might have cached one ExtINT interrupt. Finally, at
5093+ * least one tick may be lost due to delays.
5094+ */
5095+ if (jiffies - t1 > 4)
5096+ return 1;
5097+
5098+ return 0;
5099+}
5100+
5101+/*
5102+ * In the SMP+IOAPIC case it might happen that there are an unspecified
5103+ * number of pending IRQ events unhandled. These cases are very rare,
5104+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
5105+ * better to do it this way as thus we do not have to be aware of
5106+ * 'pending' interrupts in the IRQ path, except at this point.
5107+ */
5108+/*
5109+ * Edge triggered needs to resend any interrupt
5110+ * that was delayed but this is now handled in the device
5111+ * independent code.
5112+ */
5113+
5114+/*
5115+ * Starting up a edge-triggered IO-APIC interrupt is
5116+ * nasty - we need to make sure that we get the edge.
5117+ * If it is already asserted for some reason, we need
5118+ * return 1 to indicate that is was pending.
5119+ *
5120+ * This is not complete - we should be able to fake
5121+ * an edge even if it isn't on the 8259A...
5122+ */
5123+static unsigned int startup_edge_ioapic_irq(unsigned int irq)
5124+{
5125+ int was_pending = 0;
5126+ unsigned long flags;
5127+
5128+ spin_lock_irqsave(&ioapic_lock, flags);
5129+ if (irq < 16) {
5130+ disable_8259A_irq(irq);
5131+ if (i8259A_irq_pending(irq))
5132+ was_pending = 1;
5133+ }
5134+ __unmask_IO_APIC_irq(irq);
5135+ spin_unlock_irqrestore(&ioapic_lock, flags);
5136+
5137+ return was_pending;
5138+}
5139+
5140+/*
5141+ * Once we have recorded IRQ_PENDING already, we can mask the
5142+ * interrupt for real. This prevents IRQ storms from unhandled
5143+ * devices.
5144+ */
5145+static void ack_edge_ioapic_irq(unsigned int irq)
5146+{
5147+ move_irq(irq);
5148+ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
5149+ == (IRQ_PENDING | IRQ_DISABLED))
5150+ mask_IO_APIC_irq(irq);
5151+ ack_APIC_irq();
5152+}
5153+
5154+/*
5155+ * Level triggered interrupts can just be masked,
5156+ * and shutting down and starting up the interrupt
5157+ * is the same as enabling and disabling them -- except
5158+ * with a startup need to return a "was pending" value.
5159+ *
5160+ * Level triggered interrupts are special because we
5161+ * do not touch any IO-APIC register while handling
5162+ * them. We ack the APIC in the end-IRQ handler, not
5163+ * in the start-IRQ-handler. Protection against reentrance
5164+ * from the same interrupt is still provided, both by the
5165+ * generic IRQ layer and by the fact that an unacked local
5166+ * APIC does not accept IRQs.
5167+ */
5168+static unsigned int startup_level_ioapic_irq (unsigned int irq)
5169+{
5170+ unmask_IO_APIC_irq(irq);
5171+
5172+ return 0; /* don't check for pending */
5173+}
5174+
5175+static void end_level_ioapic_irq (unsigned int irq)
5176+{
5177+ unsigned long v;
5178+ int i;
5179+
5180+ move_irq(irq);
5181+/*
5182+ * It appears there is an erratum which affects at least version 0x11
5183+ * of I/O APIC (that's the 82093AA and cores integrated into various
5184+ * chipsets). Under certain conditions a level-triggered interrupt is
5185+ * erroneously delivered as edge-triggered one but the respective IRR
5186+ * bit gets set nevertheless. As a result the I/O unit expects an EOI
5187+ * message but it will never arrive and further interrupts are blocked
5188+ * from the source. The exact reason is so far unknown, but the
5189+ * phenomenon was observed when two consecutive interrupt requests
5190+ * from a given source get delivered to the same CPU and the source is
5191+ * temporarily disabled in between.
5192+ *
5193+ * A workaround is to simulate an EOI message manually. We achieve it
5194+ * by setting the trigger mode to edge and then to level when the edge
5195+ * trigger mode gets detected in the TMR of a local APIC for a
5196+ * level-triggered interrupt. We mask the source for the time of the
5197+ * operation to prevent an edge-triggered interrupt escaping meanwhile.
5198+ * The idea is from Manfred Spraul. --macro
5199+ */
5200+ i = IO_APIC_VECTOR(irq);
5201+
5202+ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
5203+
5204+ ack_APIC_irq();
5205+
5206+ if (!(v & (1 << (i & 0x1f)))) {
5207+ atomic_inc(&irq_mis_count);
5208+ spin_lock(&ioapic_lock);
5209+ __mask_and_edge_IO_APIC_irq(irq);
5210+ __unmask_and_level_IO_APIC_irq(irq);
5211+ spin_unlock(&ioapic_lock);
5212+ }
5213+}
5214+
5215+#ifdef CONFIG_PCI_MSI
5216+static unsigned int startup_edge_ioapic_vector(unsigned int vector)
5217+{
5218+ int irq = vector_to_irq(vector);
5219+
5220+ return startup_edge_ioapic_irq(irq);
5221+}
5222+
5223+static void ack_edge_ioapic_vector(unsigned int vector)
5224+{
5225+ int irq = vector_to_irq(vector);
5226+
5227+ move_native_irq(vector);
5228+ ack_edge_ioapic_irq(irq);
5229+}
5230+
5231+static unsigned int startup_level_ioapic_vector (unsigned int vector)
5232+{
5233+ int irq = vector_to_irq(vector);
5234+
5235+ return startup_level_ioapic_irq (irq);
5236+}
5237+
5238+static void end_level_ioapic_vector (unsigned int vector)
5239+{
5240+ int irq = vector_to_irq(vector);
5241+
5242+ move_native_irq(vector);
5243+ end_level_ioapic_irq(irq);
5244+}
5245+
5246+static void mask_IO_APIC_vector (unsigned int vector)
5247+{
5248+ int irq = vector_to_irq(vector);
5249+
5250+ mask_IO_APIC_irq(irq);
5251+}
5252+
5253+static void unmask_IO_APIC_vector (unsigned int vector)
5254+{
5255+ int irq = vector_to_irq(vector);
5256+
5257+ unmask_IO_APIC_irq(irq);
5258+}
5259+
5260+#ifdef CONFIG_SMP
5261+static void set_ioapic_affinity_vector (unsigned int vector,
5262+ cpumask_t cpu_mask)
5263+{
5264+ int irq = vector_to_irq(vector);
5265+
5266+ set_native_irq_info(vector, cpu_mask);
5267+ set_ioapic_affinity_irq(irq, cpu_mask);
5268+}
5269+#endif
5270+#endif
5271+
5272+static int ioapic_retrigger(unsigned int irq)
5273+{
5274+ send_IPI_self(IO_APIC_VECTOR(irq));
5275+
5276+ return 1;
5277+}
5278+
5279+/*
5280+ * Level and edge triggered IO-APIC interrupts need different handling,
5281+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
5282+ * handled with the level-triggered descriptor, but that one has slightly
5283+ * more overhead. Level-triggered interrupts cannot be handled with the
5284+ * edge-triggered handler, without risking IRQ storms and other ugly
5285+ * races.
5286+ */
5287+static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
5288+ .typename = "IO-APIC-edge",
5289+ .startup = startup_edge_ioapic,
5290+ .shutdown = shutdown_edge_ioapic,
5291+ .enable = enable_edge_ioapic,
5292+ .disable = disable_edge_ioapic,
5293+ .ack = ack_edge_ioapic,
5294+ .end = end_edge_ioapic,
5295+#ifdef CONFIG_SMP
5296+ .set_affinity = set_ioapic_affinity,
5297+#endif
5298+ .retrigger = ioapic_retrigger,
5299+};
5300+
5301+static struct hw_interrupt_type ioapic_level_type __read_mostly = {
5302+ .typename = "IO-APIC-level",
5303+ .startup = startup_level_ioapic,
5304+ .shutdown = shutdown_level_ioapic,
5305+ .enable = enable_level_ioapic,
5306+ .disable = disable_level_ioapic,
5307+ .ack = mask_and_ack_level_ioapic,
5308+ .end = end_level_ioapic,
5309+#ifdef CONFIG_SMP
5310+ .set_affinity = set_ioapic_affinity,
5311+#endif
5312+ .retrigger = ioapic_retrigger,
5313+};
5314+#endif /* !CONFIG_XEN */
5315+
5316+static inline void init_IO_APIC_traps(void)
5317+{
5318+ int irq;
5319+
5320+ /*
5321+ * NOTE! The local APIC isn't very good at handling
5322+ * multiple interrupts at the same interrupt level.
5323+ * As the interrupt level is determined by taking the
5324+ * vector number and shifting that right by 4, we
5325+ * want to spread these out a bit so that they don't
5326+ * all fall in the same interrupt level.
5327+ *
5328+ * Also, we've got to be careful not to trash gate
5329+ * 0x80, because int 0x80 is hm, kind of importantish. ;)
5330+ */
5331+ for (irq = 0; irq < NR_IRQS ; irq++) {
5332+ int tmp = irq;
5333+ if (use_pci_vector()) {
5334+ if (!platform_legacy_irq(tmp))
5335+ if ((tmp = vector_to_irq(tmp)) == -1)
5336+ continue;
5337+ }
5338+ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
5339+ /*
5340+ * Hmm.. We don't have an entry for this,
5341+ * so default to an old-fashioned 8259
5342+ * interrupt if we can..
5343+ */
5344+ if (irq < 16)
5345+ make_8259A_irq(irq);
5346+#ifndef CONFIG_XEN
5347+ else
5348+ /* Strange. Oh, well.. */
5349+ irq_desc[irq].chip = &no_irq_type;
5350+#endif
5351+ }
5352+ }
5353+}
5354+
5355+#ifndef CONFIG_XEN
5356+static void enable_lapic_irq (unsigned int irq)
5357+{
5358+ unsigned long v;
5359+
5360+ v = apic_read(APIC_LVT0);
5361+ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
5362+}
5363+
5364+static void disable_lapic_irq (unsigned int irq)
5365+{
5366+ unsigned long v;
5367+
5368+ v = apic_read(APIC_LVT0);
5369+ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
5370+}
5371+
5372+static void ack_lapic_irq (unsigned int irq)
5373+{
5374+ ack_APIC_irq();
5375+}
5376+
5377+static void end_lapic_irq (unsigned int i) { /* nothing */ }
5378+
5379+static struct hw_interrupt_type lapic_irq_type __read_mostly = {
5380+ .typename = "local-APIC-edge",
5381+ .startup = NULL, /* startup_irq() not used for IRQ0 */
5382+ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
5383+ .enable = enable_lapic_irq,
5384+ .disable = disable_lapic_irq,
5385+ .ack = ack_lapic_irq,
5386+ .end = end_lapic_irq
5387+};
5388+
5389+static void setup_nmi (void)
5390+{
5391+ /*
5392+ * Dirty trick to enable the NMI watchdog ...
5393+ * We put the 8259A master into AEOI mode and
5394+ * unmask on all local APICs LVT0 as NMI.
5395+ *
5396+ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
5397+ * is from Maciej W. Rozycki - so we do not have to EOI from
5398+ * the NMI handler or the timer interrupt.
5399+ */
5400+ apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
5401+
5402+ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
5403+
5404+ apic_printk(APIC_VERBOSE, " done.\n");
5405+}
5406+
5407+/*
5408+ * This looks a bit hackish but it's about the only one way of sending
5409+ * a few INTA cycles to 8259As and any associated glue logic. ICR does
5410+ * not support the ExtINT mode, unfortunately. We need to send these
5411+ * cycles as some i82489DX-based boards have glue logic that keeps the
5412+ * 8259A interrupt line asserted until INTA. --macro
5413+ */
5414+static inline void unlock_ExtINT_logic(void)
5415+{
5416+ int apic, pin, i;
5417+ struct IO_APIC_route_entry entry0, entry1;
5418+ unsigned char save_control, save_freq_select;
5419+ unsigned long flags;
5420+
5421+ pin = find_isa_irq_pin(8, mp_INT);
5422+ apic = find_isa_irq_apic(8, mp_INT);
5423+ if (pin == -1)
5424+ return;
5425+
5426+ spin_lock_irqsave(&ioapic_lock, flags);
5427+ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
5428+ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
5429+ spin_unlock_irqrestore(&ioapic_lock, flags);
5430+ clear_IO_APIC_pin(apic, pin);
5431+
5432+ memset(&entry1, 0, sizeof(entry1));
5433+
5434+ entry1.dest_mode = 0; /* physical delivery */
5435+ entry1.mask = 0; /* unmask IRQ now */
5436+ entry1.dest.physical.physical_dest = hard_smp_processor_id();
5437+ entry1.delivery_mode = dest_ExtINT;
5438+ entry1.polarity = entry0.polarity;
5439+ entry1.trigger = 0;
5440+ entry1.vector = 0;
5441+
5442+ spin_lock_irqsave(&ioapic_lock, flags);
5443+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
5444+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
5445+ spin_unlock_irqrestore(&ioapic_lock, flags);
5446+
5447+ save_control = CMOS_READ(RTC_CONTROL);
5448+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
5449+ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
5450+ RTC_FREQ_SELECT);
5451+ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
5452+
5453+ i = 100;
5454+ while (i-- > 0) {
5455+ mdelay(10);
5456+ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
5457+ i -= 10;
5458+ }
5459+
5460+ CMOS_WRITE(save_control, RTC_CONTROL);
5461+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
5462+ clear_IO_APIC_pin(apic, pin);
5463+
5464+ spin_lock_irqsave(&ioapic_lock, flags);
5465+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
5466+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
5467+ spin_unlock_irqrestore(&ioapic_lock, flags);
5468+}
5469+
5470+int timer_uses_ioapic_pin_0;
5471+
5472+/*
5473+ * This code may look a bit paranoid, but it's supposed to cooperate with
5474+ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
5475+ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
5476+ * fanatically on his truly buggy board.
5477+ */
5478+static inline void check_timer(void)
5479+{
5480+ int apic1, pin1, apic2, pin2;
5481+ int vector;
5482+
5483+ /*
5484+ * get/set the timer IRQ vector:
5485+ */
5486+ disable_8259A_irq(0);
5487+ vector = assign_irq_vector(0);
5488+ set_intr_gate(vector, interrupt[0]);
5489+
5490+ /*
5491+ * Subtle, code in do_timer_interrupt() expects an AEOI
5492+ * mode for the 8259A whenever interrupts are routed
5493+ * through I/O APICs. Also IRQ0 has to be enabled in
5494+ * the 8259A which implies the virtual wire has to be
5495+ * disabled in the local APIC.
5496+ */
5497+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
5498+ init_8259A(1);
5499+ timer_ack = 1;
5500+ if (timer_over_8254 > 0)
5501+ enable_8259A_irq(0);
5502+
5503+ pin1 = find_isa_irq_pin(0, mp_INT);
5504+ apic1 = find_isa_irq_apic(0, mp_INT);
5505+ pin2 = ioapic_i8259.pin;
5506+ apic2 = ioapic_i8259.apic;
5507+
5508+ if (pin1 == 0)
5509+ timer_uses_ioapic_pin_0 = 1;
5510+
5511+ printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
5512+ vector, apic1, pin1, apic2, pin2);
5513+
5514+ if (pin1 != -1) {
5515+ /*
5516+ * Ok, does IRQ0 through the IOAPIC work?
5517+ */
5518+ unmask_IO_APIC_irq(0);
5519+ if (timer_irq_works()) {
5520+ if (nmi_watchdog == NMI_IO_APIC) {
5521+ disable_8259A_irq(0);
5522+ setup_nmi();
5523+ enable_8259A_irq(0);
5524+ }
5525+ if (disable_timer_pin_1 > 0)
5526+ clear_IO_APIC_pin(0, pin1);
5527+ return;
5528+ }
5529+ clear_IO_APIC_pin(apic1, pin1);
5530+ printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
5531+ "IO-APIC\n");
5532+ }
5533+
5534+ printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
5535+ if (pin2 != -1) {
5536+ printk("\n..... (found pin %d) ...", pin2);
5537+ /*
5538+ * legacy devices should be connected to IO APIC #0
5539+ */
5540+ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
5541+ if (timer_irq_works()) {
5542+ printk("works.\n");
5543+ if (pin1 != -1)
5544+ replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
5545+ else
5546+ add_pin_to_irq(0, apic2, pin2);
5547+ if (nmi_watchdog == NMI_IO_APIC) {
5548+ setup_nmi();
5549+ }
5550+ return;
5551+ }
5552+ /*
5553+ * Cleanup, just in case ...
5554+ */
5555+ clear_IO_APIC_pin(apic2, pin2);
5556+ }
5557+ printk(" failed.\n");
5558+
5559+ if (nmi_watchdog == NMI_IO_APIC) {
5560+ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
5561+ nmi_watchdog = 0;
5562+ }
5563+
5564+ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
5565+
5566+ disable_8259A_irq(0);
5567+ irq_desc[0].chip = &lapic_irq_type;
5568+ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
5569+ enable_8259A_irq(0);
5570+
5571+ if (timer_irq_works()) {
5572+ printk(" works.\n");
5573+ return;
5574+ }
5575+ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
5576+ printk(" failed.\n");
5577+
5578+ printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
5579+
5580+ timer_ack = 0;
5581+ init_8259A(0);
5582+ make_8259A_irq(0);
5583+ apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
5584+
5585+ unlock_ExtINT_logic();
5586+
5587+ if (timer_irq_works()) {
5588+ printk(" works.\n");
5589+ return;
5590+ }
5591+ printk(" failed :(.\n");
5592+ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
5593+ "report. Then try booting with the 'noapic' option");
5594+}
5595+#else
5596+int timer_uses_ioapic_pin_0 = 0;
5597+#define check_timer() ((void)0)
5598+#endif
5599+
5600+/*
5601+ *
5602+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
5603+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
5604+ * Linux doesn't really care, as it's not actually used
5605+ * for any interrupt handling anyway.
5606+ */
5607+#define PIC_IRQS (1 << PIC_CASCADE_IR)
5608+
5609+void __init setup_IO_APIC(void)
5610+{
5611+ enable_IO_APIC();
5612+
5613+ if (acpi_ioapic)
5614+ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
5615+ else
5616+ io_apic_irqs = ~PIC_IRQS;
5617+
5618+ printk("ENABLING IO-APIC IRQs\n");
5619+
5620+ /*
5621+ * Set up IO-APIC IRQ routing.
5622+ */
5623+ if (!acpi_ioapic)
5624+ setup_ioapic_ids_from_mpc();
5625+#ifndef CONFIG_XEN
5626+ sync_Arb_IDs();
5627+#endif
5628+ setup_IO_APIC_irqs();
5629+ init_IO_APIC_traps();
5630+ check_timer();
5631+ if (!acpi_ioapic)
5632+ print_IO_APIC();
5633+}
5634+
5635+static int __init setup_disable_8254_timer(char *s)
5636+{
5637+ timer_over_8254 = -1;
5638+ return 1;
5639+}
5640+static int __init setup_enable_8254_timer(char *s)
5641+{
5642+ timer_over_8254 = 2;
5643+ return 1;
5644+}
5645+
5646+__setup("disable_8254_timer", setup_disable_8254_timer);
5647+__setup("enable_8254_timer", setup_enable_8254_timer);
5648+
5649+/*
5650+ * Called after all the initialization is done. If we didnt find any
5651+ * APIC bugs then we can allow the modify fast path
5652+ */
5653+
5654+static int __init io_apic_bug_finalize(void)
5655+{
5656+ if(sis_apic_bug == -1)
5657+ sis_apic_bug = 0;
5658+ if (is_initial_xendomain()) {
5659+ struct xen_platform_op op = { .cmd = XENPF_platform_quirk };
5660+ op.u.platform_quirk.quirk_id = sis_apic_bug ?
5661+ QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL;
5662+ VOID(HYPERVISOR_platform_op(&op));
5663+ }
5664+ return 0;
5665+}
5666+
5667+late_initcall(io_apic_bug_finalize);
5668+
5669+struct sysfs_ioapic_data {
5670+ struct sys_device dev;
5671+ struct IO_APIC_route_entry entry[0];
5672+};
5673+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
5674+
5675+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
5676+{
5677+ struct IO_APIC_route_entry *entry;
5678+ struct sysfs_ioapic_data *data;
5679+ unsigned long flags;
5680+ int i;
5681+
5682+ data = container_of(dev, struct sysfs_ioapic_data, dev);
5683+ entry = data->entry;
5684+ spin_lock_irqsave(&ioapic_lock, flags);
5685+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
5686+ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
5687+ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
5688+ }
5689+ spin_unlock_irqrestore(&ioapic_lock, flags);
5690+
5691+ return 0;
5692+}
5693+
5694+static int ioapic_resume(struct sys_device *dev)
5695+{
5696+ struct IO_APIC_route_entry *entry;
5697+ struct sysfs_ioapic_data *data;
5698+ unsigned long flags;
5699+ union IO_APIC_reg_00 reg_00;
5700+ int i;
5701+
5702+ data = container_of(dev, struct sysfs_ioapic_data, dev);
5703+ entry = data->entry;
5704+
5705+ spin_lock_irqsave(&ioapic_lock, flags);
5706+ reg_00.raw = io_apic_read(dev->id, 0);
5707+ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
5708+ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
5709+ io_apic_write(dev->id, 0, reg_00.raw);
5710+ }
5711+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
5712+ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
5713+ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
5714+ }
5715+ spin_unlock_irqrestore(&ioapic_lock, flags);
5716+
5717+ return 0;
5718+}
5719+
5720+static struct sysdev_class ioapic_sysdev_class = {
5721+ set_kset_name("ioapic"),
5722+#ifndef CONFIG_XEN
5723+ .suspend = ioapic_suspend,
5724+ .resume = ioapic_resume,
5725+#endif
5726+};
5727+
5728+static int __init ioapic_init_sysfs(void)
5729+{
5730+ struct sys_device * dev;
5731+ int i, size, error = 0;
5732+
5733+ error = sysdev_class_register(&ioapic_sysdev_class);
5734+ if (error)
5735+ return error;
5736+
5737+ for (i = 0; i < nr_ioapics; i++ ) {
5738+ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
5739+ * sizeof(struct IO_APIC_route_entry);
5740+ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
5741+ if (!mp_ioapic_data[i]) {
5742+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
5743+ continue;
5744+ }
5745+ memset(mp_ioapic_data[i], 0, size);
5746+ dev = &mp_ioapic_data[i]->dev;
5747+ dev->id = i;
5748+ dev->cls = &ioapic_sysdev_class;
5749+ error = sysdev_register(dev);
5750+ if (error) {
5751+ kfree(mp_ioapic_data[i]);
5752+ mp_ioapic_data[i] = NULL;
5753+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
5754+ continue;
5755+ }
5756+ }
5757+
5758+ return 0;
5759+}
5760+
5761+device_initcall(ioapic_init_sysfs);
5762+
5763+/* --------------------------------------------------------------------------
5764+ ACPI-based IOAPIC Configuration
5765+ -------------------------------------------------------------------------- */
5766+
5767+#ifdef CONFIG_ACPI
5768+
5769+int __init io_apic_get_unique_id (int ioapic, int apic_id)
5770+{
5771+#ifndef CONFIG_XEN
5772+ union IO_APIC_reg_00 reg_00;
5773+ static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
5774+ physid_mask_t tmp;
5775+ unsigned long flags;
5776+ int i = 0;
5777+
5778+ /*
5779+ * The P4 platform supports up to 256 APIC IDs on two separate APIC
5780+ * buses (one for LAPICs, one for IOAPICs), where predecessors only
5781+ * supports up to 16 on one shared APIC bus.
5782+ *
5783+ * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
5784+ * advantage of new APIC bus architecture.
5785+ */
5786+
5787+ if (physids_empty(apic_id_map))
5788+ apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
5789+
5790+ spin_lock_irqsave(&ioapic_lock, flags);
5791+ reg_00.raw = io_apic_read(ioapic, 0);
5792+ spin_unlock_irqrestore(&ioapic_lock, flags);
5793+
5794+ if (apic_id >= get_physical_broadcast()) {
5795+ printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
5796+ "%d\n", ioapic, apic_id, reg_00.bits.ID);
5797+ apic_id = reg_00.bits.ID;
5798+ }
5799+
5800+ /*
5801+ * Every APIC in a system must have a unique ID or we get lots of nice
5802+ * 'stuck on smp_invalidate_needed IPI wait' messages.
5803+ */
5804+ if (check_apicid_used(apic_id_map, apic_id)) {
5805+
5806+ for (i = 0; i < get_physical_broadcast(); i++) {
5807+ if (!check_apicid_used(apic_id_map, i))
5808+ break;
5809+ }
5810+
5811+ if (i == get_physical_broadcast())
5812+ panic("Max apic_id exceeded!\n");
5813+
5814+ printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
5815+ "trying %d\n", ioapic, apic_id, i);
5816+
5817+ apic_id = i;
5818+ }
5819+
5820+ tmp = apicid_to_cpu_present(apic_id);
5821+ physids_or(apic_id_map, apic_id_map, tmp);
5822+
5823+ if (reg_00.bits.ID != apic_id) {
5824+ reg_00.bits.ID = apic_id;
5825+
5826+ spin_lock_irqsave(&ioapic_lock, flags);
5827+ io_apic_write(ioapic, 0, reg_00.raw);
5828+ reg_00.raw = io_apic_read(ioapic, 0);
5829+ spin_unlock_irqrestore(&ioapic_lock, flags);
5830+
5831+ /* Sanity check */
5832+ if (reg_00.bits.ID != apic_id) {
5833+ printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
5834+ return -1;
5835+ }
5836+ }
5837+
5838+ apic_printk(APIC_VERBOSE, KERN_INFO
5839+ "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
5840+#endif /* !CONFIG_XEN */
5841+
5842+ return apic_id;
5843+}
5844+
5845+
5846+int __init io_apic_get_version (int ioapic)
5847+{
5848+ union IO_APIC_reg_01 reg_01;
5849+ unsigned long flags;
5850+
5851+ spin_lock_irqsave(&ioapic_lock, flags);
5852+ reg_01.raw = io_apic_read(ioapic, 1);
5853+ spin_unlock_irqrestore(&ioapic_lock, flags);
5854+
5855+ return reg_01.bits.version;
5856+}
5857+
5858+
5859+int __init io_apic_get_redir_entries (int ioapic)
5860+{
5861+ union IO_APIC_reg_01 reg_01;
5862+ unsigned long flags;
5863+
5864+ spin_lock_irqsave(&ioapic_lock, flags);
5865+ reg_01.raw = io_apic_read(ioapic, 1);
5866+ spin_unlock_irqrestore(&ioapic_lock, flags);
5867+
5868+ return reg_01.bits.entries;
5869+}
5870+
5871+
5872+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
5873+{
5874+ struct IO_APIC_route_entry entry;
5875+ unsigned long flags;
5876+
5877+ if (!IO_APIC_IRQ(irq)) {
5878+ printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
5879+ ioapic);
5880+ return -EINVAL;
5881+ }
5882+
5883+ /*
5884+ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
5885+ * Note that we mask (disable) IRQs now -- these get enabled when the
5886+ * corresponding device driver registers for this IRQ.
5887+ */
5888+
5889+ memset(&entry,0,sizeof(entry));
5890+
5891+ entry.delivery_mode = INT_DELIVERY_MODE;
5892+ entry.dest_mode = INT_DEST_MODE;
5893+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
5894+ entry.trigger = edge_level;
5895+ entry.polarity = active_high_low;
5896+ entry.mask = 1;
5897+
5898+ /*
5899+ * IRQs < 16 are already in the irq_2_pin[] map
5900+ */
5901+ if (irq >= 16)
5902+ add_pin_to_irq(irq, ioapic, pin);
5903+
5904+ entry.vector = assign_irq_vector(irq);
5905+
5906+ apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
5907+ "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
5908+ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
5909+ edge_level, active_high_low);
5910+
5911+ ioapic_register_intr(irq, entry.vector, edge_level);
5912+
5913+ if (!ioapic && (irq < 16))
5914+ disable_8259A_irq(irq);
5915+
5916+ spin_lock_irqsave(&ioapic_lock, flags);
5917+ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
5918+ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
5919+ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
5920+ spin_unlock_irqrestore(&ioapic_lock, flags);
5921+
5922+ return 0;
5923+}
5924+
5925+#endif /* CONFIG_ACPI */
5926Index: head-2008-11-25/arch/x86/kernel/ioport_32-xen.c
5927===================================================================
5928--- /dev/null 1970-01-01 00:00:00.000000000 +0000
5929+++ head-2008-11-25/arch/x86/kernel/ioport_32-xen.c 2008-01-28 12:24:19.000000000 +0100
5930@@ -0,0 +1,123 @@
5931+/*
5932+ * linux/arch/i386/kernel/ioport.c
5933+ *
5934+ * This contains the io-permission bitmap code - written by obz, with changes
5935+ * by Linus.
5936+ */
5937+
5938+#include <linux/sched.h>
5939+#include <linux/kernel.h>
5940+#include <linux/capability.h>
5941+#include <linux/errno.h>
5942+#include <linux/types.h>
5943+#include <linux/ioport.h>
5944+#include <linux/smp.h>
5945+#include <linux/smp_lock.h>
5946+#include <linux/stddef.h>
5947+#include <linux/slab.h>
5948+#include <linux/thread_info.h>
5949+#include <xen/interface/physdev.h>
5950+
5951+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
5952+static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
5953+{
5954+ unsigned long mask;
5955+ unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
5956+ unsigned int low_index = base & (BITS_PER_LONG-1);
5957+ int length = low_index + extent;
5958+
5959+ if (low_index != 0) {
5960+ mask = (~0UL << low_index);
5961+ if (length < BITS_PER_LONG)
5962+ mask &= ~(~0UL << length);
5963+ if (new_value)
5964+ *bitmap_base++ |= mask;
5965+ else
5966+ *bitmap_base++ &= ~mask;
5967+ length -= BITS_PER_LONG;
5968+ }
5969+
5970+ mask = (new_value ? ~0UL : 0UL);
5971+ while (length >= BITS_PER_LONG) {
5972+ *bitmap_base++ = mask;
5973+ length -= BITS_PER_LONG;
5974+ }
5975+
5976+ if (length > 0) {
5977+ mask = ~(~0UL << length);
5978+ if (new_value)
5979+ *bitmap_base++ |= mask;
5980+ else
5981+ *bitmap_base++ &= ~mask;
5982+ }
5983+}
5984+
5985+
5986+/*
5987+ * this changes the io permissions bitmap in the current task.
5988+ */
5989+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
5990+{
5991+ struct thread_struct * t = &current->thread;
5992+ unsigned long *bitmap;
5993+ struct physdev_set_iobitmap set_iobitmap;
5994+
5995+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
5996+ return -EINVAL;
5997+ if (turn_on && !capable(CAP_SYS_RAWIO))
5998+ return -EPERM;
5999+
6000+ /*
6001+ * If it's the first ioperm() call in this thread's lifetime, set the
6002+ * IO bitmap up. ioperm() is much less timing critical than clone(),
6003+ * this is why we delay this operation until now:
6004+ */
6005+ if (!t->io_bitmap_ptr) {
6006+ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
6007+ if (!bitmap)
6008+ return -ENOMEM;
6009+
6010+ memset(bitmap, 0xff, IO_BITMAP_BYTES);
6011+ t->io_bitmap_ptr = bitmap;
6012+ set_thread_flag(TIF_IO_BITMAP);
6013+
6014+ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
6015+ set_iobitmap.nr_ports = IO_BITMAP_BITS;
6016+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
6017+ &set_iobitmap));
6018+ }
6019+
6020+ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
6021+
6022+ return 0;
6023+}
6024+
6025+/*
6026+ * sys_iopl has to be used when you want to access the IO ports
6027+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
6028+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
6029+ *
6030+ * Here we just change the eflags value on the stack: we allow
6031+ * only the super-user to do it. This depends on the stack-layout
6032+ * on system-call entry - see also fork() and the signal handling
6033+ * code.
6034+ */
6035+
6036+asmlinkage long sys_iopl(unsigned long unused)
6037+{
6038+ volatile struct pt_regs * regs = (struct pt_regs *) &unused;
6039+ unsigned int level = regs->ebx;
6040+ struct thread_struct *t = &current->thread;
6041+ unsigned int old = (t->iopl >> 12) & 3;
6042+
6043+ if (level > 3)
6044+ return -EINVAL;
6045+ /* Trying to gain more privileges? */
6046+ if (level > old) {
6047+ if (!capable(CAP_SYS_RAWIO))
6048+ return -EPERM;
6049+ }
6050+ t->iopl = level << 12;
6051+ set_iopl_mask(t->iopl);
6052+ return 0;
6053+}
6054Index: head-2008-11-25/arch/x86/kernel/irq_32-xen.c
6055===================================================================
6056--- /dev/null 1970-01-01 00:00:00.000000000 +0000
6057+++ head-2008-11-25/arch/x86/kernel/irq_32-xen.c 2008-10-29 09:55:56.000000000 +0100
6058@@ -0,0 +1,324 @@
6059+/*
6060+ * linux/arch/i386/kernel/irq.c
6061+ *
6062+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6063+ *
6064+ * This file contains the lowest level x86-specific interrupt
6065+ * entry, irq-stacks and irq statistics code. All the remaining
6066+ * irq logic is done by the generic kernel/irq/ code and
6067+ * by the x86-specific irq controller code. (e.g. i8259.c and
6068+ * io_apic.c.)
6069+ */
6070+
6071+#include <asm/uaccess.h>
6072+#include <linux/module.h>
6073+#include <linux/seq_file.h>
6074+#include <linux/interrupt.h>
6075+#include <linux/kernel_stat.h>
6076+#include <linux/notifier.h>
6077+#include <linux/cpu.h>
6078+#include <linux/delay.h>
6079+
6080+DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
6081+EXPORT_PER_CPU_SYMBOL(irq_stat);
6082+
6083+#ifndef CONFIG_X86_LOCAL_APIC
6084+/*
6085+ * 'what should we do if we get a hw irq event on an illegal vector'.
6086+ * each architecture has to answer this themselves.
6087+ */
6088+void ack_bad_irq(unsigned int irq)
6089+{
6090+ printk("unexpected IRQ trap at vector %02x\n", irq);
6091+}
6092+#endif
6093+
6094+#ifdef CONFIG_4KSTACKS
6095+/*
6096+ * per-CPU IRQ handling contexts (thread information and stack)
6097+ */
6098+union irq_ctx {
6099+ struct thread_info tinfo;
6100+ u32 stack[THREAD_SIZE/sizeof(u32)];
6101+};
6102+
6103+static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
6104+static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
6105+#endif
6106+
6107+/*
6108+ * do_IRQ handles all normal device IRQ's (the special
6109+ * SMP cross-CPU interrupts have their own specific
6110+ * handlers).
6111+ */
6112+fastcall unsigned int do_IRQ(struct pt_regs *regs)
6113+{
6114+ /* high bit used in ret_from_ code */
6115+ int irq = ~regs->orig_eax;
6116+#ifdef CONFIG_4KSTACKS
6117+ union irq_ctx *curctx, *irqctx;
6118+ u32 *isp;
6119+#endif
6120+
6121+ if (unlikely((unsigned)irq >= NR_IRQS)) {
6122+ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
6123+ __FUNCTION__, irq);
6124+ BUG();
6125+ }
6126+
6127+ /*irq_enter();*/
6128+#ifdef CONFIG_DEBUG_STACKOVERFLOW
6129+ /* Debugging check for stack overflow: is there less than 1KB free? */
6130+ {
6131+ long esp;
6132+
6133+ __asm__ __volatile__("andl %%esp,%0" :
6134+ "=r" (esp) : "0" (THREAD_SIZE - 1));
6135+ if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
6136+ printk("do_IRQ: stack overflow: %ld\n",
6137+ esp - sizeof(struct thread_info));
6138+ dump_stack();
6139+ }
6140+ }
6141+#endif
6142+
6143+#ifdef CONFIG_4KSTACKS
6144+
6145+ curctx = (union irq_ctx *) current_thread_info();
6146+ irqctx = hardirq_ctx[smp_processor_id()];
6147+
6148+ /*
6149+ * this is where we switch to the IRQ stack. However, if we are
6150+ * already using the IRQ stack (because we interrupted a hardirq
6151+ * handler) we can't do that and just have to keep using the
6152+ * current stack (which is the irq stack already after all)
6153+ */
6154+ if (curctx != irqctx) {
6155+ int arg1, arg2, ebx;
6156+
6157+ /* build the stack frame on the IRQ stack */
6158+ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
6159+ irqctx->tinfo.task = curctx->tinfo.task;
6160+ irqctx->tinfo.previous_esp = current_stack_pointer;
6161+
6162+ /*
6163+ * Copy the softirq bits in preempt_count so that the
6164+ * softirq checks work in the hardirq context.
6165+ */
6166+ irqctx->tinfo.preempt_count =
6167+ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
6168+ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
6169+
6170+ asm volatile(
6171+ " xchgl %%ebx,%%esp \n"
6172+ " call __do_IRQ \n"
6173+ " movl %%ebx,%%esp \n"
6174+ : "=a" (arg1), "=d" (arg2), "=b" (ebx)
6175+ : "0" (irq), "1" (regs), "2" (isp)
6176+ : "memory", "cc", "ecx"
6177+ );
6178+ } else
6179+#endif
6180+ __do_IRQ(irq, regs);
6181+
6182+ /*irq_exit();*/
6183+
6184+ return 1;
6185+}
6186+
6187+#ifdef CONFIG_4KSTACKS
6188+
6189+/*
6190+ * These should really be __section__(".bss.page_aligned") as well, but
6191+ * gcc's 3.0 and earlier don't handle that correctly.
6192+ */
6193+static char softirq_stack[NR_CPUS * THREAD_SIZE]
6194+ __attribute__((__aligned__(THREAD_SIZE)));
6195+
6196+static char hardirq_stack[NR_CPUS * THREAD_SIZE]
6197+ __attribute__((__aligned__(THREAD_SIZE)));
6198+
6199+/*
6200+ * allocate per-cpu stacks for hardirq and for softirq processing
6201+ */
6202+void irq_ctx_init(int cpu)
6203+{
6204+ union irq_ctx *irqctx;
6205+
6206+ if (hardirq_ctx[cpu])
6207+ return;
6208+
6209+ irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
6210+ irqctx->tinfo.task = NULL;
6211+ irqctx->tinfo.exec_domain = NULL;
6212+ irqctx->tinfo.cpu = cpu;
6213+ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
6214+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
6215+
6216+ hardirq_ctx[cpu] = irqctx;
6217+
6218+ irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
6219+ irqctx->tinfo.task = NULL;
6220+ irqctx->tinfo.exec_domain = NULL;
6221+ irqctx->tinfo.cpu = cpu;
6222+ irqctx->tinfo.preempt_count = 0;
6223+ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
6224+
6225+ softirq_ctx[cpu] = irqctx;
6226+
6227+ printk("CPU %u irqstacks, hard=%p soft=%p\n",
6228+ cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
6229+}
6230+
6231+void irq_ctx_exit(int cpu)
6232+{
6233+ hardirq_ctx[cpu] = NULL;
6234+}
6235+
6236+extern asmlinkage void __do_softirq(void);
6237+
6238+asmlinkage void do_softirq(void)
6239+{
6240+ unsigned long flags;
6241+ struct thread_info *curctx;
6242+ union irq_ctx *irqctx;
6243+ u32 *isp;
6244+
6245+ if (in_interrupt())
6246+ return;
6247+
6248+ local_irq_save(flags);
6249+
6250+ if (local_softirq_pending()) {
6251+ curctx = current_thread_info();
6252+ irqctx = softirq_ctx[smp_processor_id()];
6253+ irqctx->tinfo.task = curctx->task;
6254+ irqctx->tinfo.previous_esp = current_stack_pointer;
6255+
6256+ /* build the stack frame on the softirq stack */
6257+ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
6258+
6259+ asm volatile(
6260+ " xchgl %%ebx,%%esp \n"
6261+ " call __do_softirq \n"
6262+ " movl %%ebx,%%esp \n"
6263+ : "=b"(isp)
6264+ : "0"(isp)
6265+ : "memory", "cc", "edx", "ecx", "eax"
6266+ );
6267+ /*
6268+ * Shouldnt happen, we returned above if in_interrupt():
6269+ */
6270+ WARN_ON_ONCE(softirq_count());
6271+ }
6272+
6273+ local_irq_restore(flags);
6274+}
6275+
6276+EXPORT_SYMBOL(do_softirq);
6277+#endif
6278+
6279+/*
6280+ * Interrupt statistics:
6281+ */
6282+
6283+atomic_t irq_err_count;
6284+
6285+/*
6286+ * /proc/interrupts printing:
6287+ */
6288+
6289+int show_interrupts(struct seq_file *p, void *v)
6290+{
6291+ int i = *(loff_t *) v, j;
6292+ struct irqaction * action;
6293+ unsigned long flags;
6294+
6295+ if (i == 0) {
6296+ seq_printf(p, " ");
6297+ for_each_online_cpu(j)
6298+ seq_printf(p, "CPU%-8d",j);
6299+ seq_putc(p, '\n');
6300+ }
6301+
6302+ if (i < NR_IRQS) {
6303+ spin_lock_irqsave(&irq_desc[i].lock, flags);
6304+ action = irq_desc[i].action;
6305+ if (!action)
6306+ goto skip;
6307+ seq_printf(p, "%3d: ",i);
6308+#ifndef CONFIG_SMP
6309+ seq_printf(p, "%10u ", kstat_irqs(i));
6310+#else
6311+ for_each_online_cpu(j)
6312+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
6313+#endif
6314+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
6315+ seq_printf(p, " %s", action->name);
6316+
6317+ for (action=action->next; action; action = action->next)
6318+ seq_printf(p, ", %s", action->name);
6319+
6320+ seq_putc(p, '\n');
6321+skip:
6322+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
6323+ } else if (i == NR_IRQS) {
6324+ seq_printf(p, "NMI: ");
6325+ for_each_online_cpu(j)
6326+ seq_printf(p, "%10u ", nmi_count(j));
6327+ seq_putc(p, '\n');
6328+#ifdef CONFIG_X86_LOCAL_APIC
6329+ seq_printf(p, "LOC: ");
6330+ for_each_online_cpu(j)
6331+ seq_printf(p, "%10u ",
6332+ per_cpu(irq_stat,j).apic_timer_irqs);
6333+ seq_putc(p, '\n');
6334+#endif
6335+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
6336+#if defined(CONFIG_X86_IO_APIC)
6337+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
6338+#endif
6339+ }
6340+ return 0;
6341+}
6342+
6343+#ifdef CONFIG_HOTPLUG_CPU
6344+
6345+void fixup_irqs(cpumask_t map)
6346+{
6347+ unsigned int irq;
6348+ static int warned;
6349+
6350+ for (irq = 0; irq < NR_IRQS; irq++) {
6351+ cpumask_t mask;
6352+ if (irq == 2)
6353+ continue;
6354+
6355+ cpus_and(mask, irq_desc[irq].affinity, map);
6356+ if (any_online_cpu(mask) == NR_CPUS) {
6357+ /*printk("Breaking affinity for irq %i\n", irq);*/
6358+ mask = map;
6359+ }
6360+ if (irq_desc[irq].chip->set_affinity)
6361+ irq_desc[irq].chip->set_affinity(irq, mask);
6362+ else if (irq_desc[irq].action && !(warned++))
6363+ printk("Cannot set affinity for irq %i\n", irq);
6364+ }
6365+
6366+#if 0
6367+ barrier();
6368+ /* Ingo Molnar says: "after the IO-APIC masks have been redirected
6369+ [note the nop - the interrupt-enable boundary on x86 is two
6370+ instructions from sti] - to flush out pending hardirqs and
6371+ IPIs. After this point nothing is supposed to reach this CPU." */
6372+ __asm__ __volatile__("sti; nop; cli");
6373+ barrier();
6374+#else
6375+ /* That doesn't seem sufficient. Give it 1ms. */
6376+ local_irq_enable();
6377+ mdelay(1);
6378+ local_irq_disable();
6379+#endif
6380+}
6381+#endif
6382+
6383Index: head-2008-11-25/arch/x86/kernel/ldt_32-xen.c
6384===================================================================
6385--- /dev/null 1970-01-01 00:00:00.000000000 +0000
6386+++ head-2008-11-25/arch/x86/kernel/ldt_32-xen.c 2007-06-12 13:12:48.000000000 +0200
6387@@ -0,0 +1,270 @@
6388+/*
6389+ * linux/kernel/ldt.c
6390+ *
6391+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
6392+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
6393+ */
6394+
6395+#include <linux/errno.h>
6396+#include <linux/sched.h>
6397+#include <linux/string.h>
6398+#include <linux/mm.h>
6399+#include <linux/smp.h>
6400+#include <linux/smp_lock.h>
6401+#include <linux/vmalloc.h>
6402+#include <linux/slab.h>
6403+
6404+#include <asm/uaccess.h>
6405+#include <asm/system.h>
6406+#include <asm/ldt.h>
6407+#include <asm/desc.h>
6408+#include <asm/mmu_context.h>
6409+
6410+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
6411+static void flush_ldt(void *null)
6412+{
6413+ if (current->active_mm)
6414+ load_LDT(&current->active_mm->context);
6415+}
6416+#endif
6417+
6418+static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
6419+{
6420+ void *oldldt;
6421+ void *newldt;
6422+ int oldsize;
6423+
6424+ if (mincount <= pc->size)
6425+ return 0;
6426+ oldsize = pc->size;
6427+ mincount = (mincount+511)&(~511);
6428+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
6429+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
6430+ else
6431+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
6432+
6433+ if (!newldt)
6434+ return -ENOMEM;
6435+
6436+ if (oldsize)
6437+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
6438+ oldldt = pc->ldt;
6439+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
6440+ pc->ldt = newldt;
6441+ wmb();
6442+ pc->size = mincount;
6443+ wmb();
6444+
6445+ if (reload) {
6446+#ifdef CONFIG_SMP
6447+ cpumask_t mask;
6448+ preempt_disable();
6449+#endif
6450+ make_pages_readonly(
6451+ pc->ldt,
6452+ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
6453+ XENFEAT_writable_descriptor_tables);
6454+ load_LDT(pc);
6455+#ifdef CONFIG_SMP
6456+ mask = cpumask_of_cpu(smp_processor_id());
6457+ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
6458+ smp_call_function(flush_ldt, NULL, 1, 1);
6459+ preempt_enable();
6460+#endif
6461+ }
6462+ if (oldsize) {
6463+ make_pages_writable(
6464+ oldldt,
6465+ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
6466+ XENFEAT_writable_descriptor_tables);
6467+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
6468+ vfree(oldldt);
6469+ else
6470+ kfree(oldldt);
6471+ }
6472+ return 0;
6473+}
6474+
6475+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
6476+{
6477+ int err = alloc_ldt(new, old->size, 0);
6478+ if (err < 0)
6479+ return err;
6480+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
6481+ make_pages_readonly(
6482+ new->ldt,
6483+ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
6484+ XENFEAT_writable_descriptor_tables);
6485+ return 0;
6486+}
6487+
6488+/*
6489+ * we do not have to muck with descriptors here, that is
6490+ * done in switch_mm() as needed.
6491+ */
6492+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
6493+{
6494+ struct mm_struct * old_mm;
6495+ int retval = 0;
6496+
6497+ init_MUTEX(&mm->context.sem);
6498+ mm->context.size = 0;
6499+ mm->context.has_foreign_mappings = 0;
6500+ old_mm = current->mm;
6501+ if (old_mm && old_mm->context.size > 0) {
6502+ down(&old_mm->context.sem);
6503+ retval = copy_ldt(&mm->context, &old_mm->context);
6504+ up(&old_mm->context.sem);
6505+ }
6506+ return retval;
6507+}
6508+
6509+/*
6510+ * No need to lock the MM as we are the last user
6511+ */
6512+void destroy_context(struct mm_struct *mm)
6513+{
6514+ if (mm->context.size) {
6515+ if (mm == current->active_mm)
6516+ clear_LDT();
6517+ make_pages_writable(
6518+ mm->context.ldt,
6519+ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
6520+ XENFEAT_writable_descriptor_tables);
6521+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
6522+ vfree(mm->context.ldt);
6523+ else
6524+ kfree(mm->context.ldt);
6525+ mm->context.size = 0;
6526+ }
6527+}
6528+
6529+static int read_ldt(void __user * ptr, unsigned long bytecount)
6530+{
6531+ int err;
6532+ unsigned long size;
6533+ struct mm_struct * mm = current->mm;
6534+
6535+ if (!mm->context.size)
6536+ return 0;
6537+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
6538+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
6539+
6540+ down(&mm->context.sem);
6541+ size = mm->context.size*LDT_ENTRY_SIZE;
6542+ if (size > bytecount)
6543+ size = bytecount;
6544+
6545+ err = 0;
6546+ if (copy_to_user(ptr, mm->context.ldt, size))
6547+ err = -EFAULT;
6548+ up(&mm->context.sem);
6549+ if (err < 0)
6550+ goto error_return;
6551+ if (size != bytecount) {
6552+ /* zero-fill the rest */
6553+ if (clear_user(ptr+size, bytecount-size) != 0) {
6554+ err = -EFAULT;
6555+ goto error_return;
6556+ }
6557+ }
6558+ return bytecount;
6559+error_return:
6560+ return err;
6561+}
6562+
6563+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
6564+{
6565+ int err;
6566+ unsigned long size;
6567+ void *address;
6568+
6569+ err = 0;
6570+ address = &default_ldt[0];
6571+ size = 5*sizeof(struct desc_struct);
6572+ if (size > bytecount)
6573+ size = bytecount;
6574+
6575+ err = size;
6576+ if (copy_to_user(ptr, address, size))
6577+ err = -EFAULT;
6578+
6579+ return err;
6580+}
6581+
6582+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
6583+{
6584+ struct mm_struct * mm = current->mm;
6585+ __u32 entry_1, entry_2;
6586+ int error;
6587+ struct user_desc ldt_info;
6588+
6589+ error = -EINVAL;
6590+ if (bytecount != sizeof(ldt_info))
6591+ goto out;
6592+ error = -EFAULT;
6593+ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
6594+ goto out;
6595+
6596+ error = -EINVAL;
6597+ if (ldt_info.entry_number >= LDT_ENTRIES)
6598+ goto out;
6599+ if (ldt_info.contents == 3) {
6600+ if (oldmode)
6601+ goto out;
6602+ if (ldt_info.seg_not_present == 0)
6603+ goto out;
6604+ }
6605+
6606+ down(&mm->context.sem);
6607+ if (ldt_info.entry_number >= mm->context.size) {
6608+ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
6609+ if (error < 0)
6610+ goto out_unlock;
6611+ }
6612+
6613+ /* Allow LDTs to be cleared by the user. */
6614+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6615+ if (oldmode || LDT_empty(&ldt_info)) {
6616+ entry_1 = 0;
6617+ entry_2 = 0;
6618+ goto install;
6619+ }
6620+ }
6621+
6622+ entry_1 = LDT_entry_a(&ldt_info);
6623+ entry_2 = LDT_entry_b(&ldt_info);
6624+ if (oldmode)
6625+ entry_2 &= ~(1 << 20);
6626+
6627+ /* Install the new entry ... */
6628+install:
6629+ error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
6630+ entry_1, entry_2);
6631+
6632+out_unlock:
6633+ up(&mm->context.sem);
6634+out:
6635+ return error;
6636+}
6637+
6638+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
6639+{
6640+ int ret = -ENOSYS;
6641+
6642+ switch (func) {
6643+ case 0:
6644+ ret = read_ldt(ptr, bytecount);
6645+ break;
6646+ case 1:
6647+ ret = write_ldt(ptr, bytecount, 1);
6648+ break;
6649+ case 2:
6650+ ret = read_default_ldt(ptr, bytecount);
6651+ break;
6652+ case 0x11:
6653+ ret = write_ldt(ptr, bytecount, 0);
6654+ break;
6655+ }
6656+ return ret;
6657+}
6658Index: head-2008-11-25/arch/x86/kernel/microcode-xen.c
6659===================================================================
6660--- /dev/null 1970-01-01 00:00:00.000000000 +0000
6661+++ head-2008-11-25/arch/x86/kernel/microcode-xen.c 2007-06-12 13:12:48.000000000 +0200
6662@@ -0,0 +1,144 @@
6663+/*
6664+ * Intel CPU Microcode Update Driver for Linux
6665+ *
6666+ * Copyright (C) 2000-2004 Tigran Aivazian
6667+ *
6668+ * This driver allows to upgrade microcode on Intel processors
6669+ * belonging to IA-32 family - PentiumPro, Pentium II,
6670+ * Pentium III, Xeon, Pentium 4, etc.
6671+ *
6672+ * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual,
6673+ * Order Number 245472 or free download from:
6674+ *
6675+ * http://developer.intel.com/design/pentium4/manuals/245472.htm
6676+ *
6677+ * For more information, go to http://www.urbanmyth.org/microcode
6678+ *
6679+ * This program is free software; you can redistribute it and/or
6680+ * modify it under the terms of the GNU General Public License
6681+ * as published by the Free Software Foundation; either version
6682+ * 2 of the License, or (at your option) any later version.
6683+ */
6684+
6685+//#define DEBUG /* pr_debug */
6686+#include <linux/capability.h>
6687+#include <linux/kernel.h>
6688+#include <linux/init.h>
6689+#include <linux/sched.h>
6690+#include <linux/cpumask.h>
6691+#include <linux/module.h>
6692+#include <linux/slab.h>
6693+#include <linux/vmalloc.h>
6694+#include <linux/miscdevice.h>
6695+#include <linux/spinlock.h>
6696+#include <linux/mm.h>
6697+#include <linux/mutex.h>
6698+#include <linux/syscalls.h>
6699+
6700+#include <asm/msr.h>
6701+#include <asm/uaccess.h>
6702+#include <asm/processor.h>
6703+
6704+MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
6705+MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
6706+MODULE_LICENSE("GPL");
6707+
6708+static int verbose;
6709+module_param(verbose, int, 0644);
6710+
6711+#define MICROCODE_VERSION "1.14a-xen"
6712+
6713+#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
6714+#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
6715+#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
6716+
6717+/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
6718+static DEFINE_MUTEX(microcode_mutex);
6719+
6720+static int microcode_open (struct inode *unused1, struct file *unused2)
6721+{
6722+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
6723+}
6724+
6725+
6726+static int do_microcode_update (const void __user *ubuf, size_t len)
6727+{
6728+ int err;
6729+ void *kbuf;
6730+
6731+ kbuf = vmalloc(len);
6732+ if (!kbuf)
6733+ return -ENOMEM;
6734+
6735+ if (copy_from_user(kbuf, ubuf, len) == 0) {
6736+ struct xen_platform_op op;
6737+
6738+ op.cmd = XENPF_microcode_update;
6739+ set_xen_guest_handle(op.u.microcode.data, kbuf);
6740+ op.u.microcode.length = len;
6741+ err = HYPERVISOR_platform_op(&op);
6742+ } else
6743+ err = -EFAULT;
6744+
6745+ vfree(kbuf);
6746+
6747+ return err;
6748+}
6749+
6750+static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
6751+{
6752+ ssize_t ret;
6753+
6754+ if (len < MC_HEADER_SIZE) {
6755+ printk(KERN_ERR "microcode: not enough data\n");
6756+ return -EINVAL;
6757+ }
6758+
6759+ mutex_lock(&microcode_mutex);
6760+
6761+ ret = do_microcode_update(buf, len);
6762+ if (!ret)
6763+ ret = (ssize_t)len;
6764+
6765+ mutex_unlock(&microcode_mutex);
6766+
6767+ return ret;
6768+}
6769+
6770+static struct file_operations microcode_fops = {
6771+ .owner = THIS_MODULE,
6772+ .write = microcode_write,
6773+ .open = microcode_open,
6774+};
6775+
6776+static struct miscdevice microcode_dev = {
6777+ .minor = MICROCODE_MINOR,
6778+ .name = "microcode",
6779+ .fops = &microcode_fops,
6780+};
6781+
6782+static int __init microcode_init (void)
6783+{
6784+ int error;
6785+
6786+ error = misc_register(&microcode_dev);
6787+ if (error) {
6788+ printk(KERN_ERR
6789+ "microcode: can't misc_register on minor=%d\n",
6790+ MICROCODE_MINOR);
6791+ return error;
6792+ }
6793+
6794+ printk(KERN_INFO
6795+ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n");
6796+ return 0;
6797+}
6798+
6799+static void __exit microcode_exit (void)
6800+{
6801+ misc_deregister(&microcode_dev);
6802+}
6803+
6804+module_init(microcode_init)
6805+module_exit(microcode_exit)
6806+MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
6807Index: head-2008-11-25/arch/x86/kernel/mpparse_32-xen.c
6808===================================================================
6809--- /dev/null 1970-01-01 00:00:00.000000000 +0000
6810+++ head-2008-11-25/arch/x86/kernel/mpparse_32-xen.c 2007-06-12 13:12:48.000000000 +0200
6811@@ -0,0 +1,1185 @@
6812+/*
6813+ * Intel Multiprocessor Specification 1.1 and 1.4
6814+ * compliant MP-table parsing routines.
6815+ *
6816+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6817+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
6818+ *
6819+ * Fixes
6820+ * Erich Boleyn : MP v1.4 and additional changes.
6821+ * Alan Cox : Added EBDA scanning
6822+ * Ingo Molnar : various cleanups and rewrites
6823+ * Maciej W. Rozycki: Bits for default MP configurations
6824+ * Paul Diefenbaugh: Added full ACPI support
6825+ */
6826+
6827+#include <linux/mm.h>
6828+#include <linux/init.h>
6829+#include <linux/acpi.h>
6830+#include <linux/delay.h>
6831+#include <linux/bootmem.h>
6832+#include <linux/smp_lock.h>
6833+#include <linux/kernel_stat.h>
6834+#include <linux/mc146818rtc.h>
6835+#include <linux/bitops.h>
6836+
6837+#include <asm/smp.h>
6838+#include <asm/acpi.h>
6839+#include <asm/mtrr.h>
6840+#include <asm/mpspec.h>
6841+#include <asm/io_apic.h>
6842+
6843+#include <mach_apic.h>
6844+#include <mach_mpparse.h>
6845+#include <bios_ebda.h>
6846+
6847+/* Have we found an MP table */
6848+int smp_found_config;
6849+unsigned int __initdata maxcpus = NR_CPUS;
6850+
6851+/*
6852+ * Various Linux-internal data structures created from the
6853+ * MP-table.
6854+ */
6855+int apic_version [MAX_APICS];
6856+int mp_bus_id_to_type [MAX_MP_BUSSES];
6857+int mp_bus_id_to_node [MAX_MP_BUSSES];
6858+int mp_bus_id_to_local [MAX_MP_BUSSES];
6859+int quad_local_to_mp_bus_id [NR_CPUS/4][4];
6860+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
6861+static int mp_current_pci_id;
6862+
6863+/* I/O APIC entries */
6864+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
6865+
6866+/* # of MP IRQ source entries */
6867+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
6868+
6869+/* MP IRQ source entries */
6870+int mp_irq_entries;
6871+
6872+int nr_ioapics;
6873+
6874+int pic_mode;
6875+unsigned long mp_lapic_addr;
6876+
6877+unsigned int def_to_bigsmp = 0;
6878+
6879+/* Processor that is doing the boot up */
6880+unsigned int boot_cpu_physical_apicid = -1U;
6881+/* Internal processor count */
6882+static unsigned int __devinitdata num_processors;
6883+
6884+/* Bitmask of physically existing CPUs */
6885+physid_mask_t phys_cpu_present_map;
6886+
6887+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
6888+
6889+/*
6890+ * Intel MP BIOS table parsing routines:
6891+ */
6892+
6893+
6894+/*
6895+ * Checksum an MP configuration block.
6896+ */
6897+
6898+static int __init mpf_checksum(unsigned char *mp, int len)
6899+{
6900+ int sum = 0;
6901+
6902+ while (len--)
6903+ sum += *mp++;
6904+
6905+ return sum & 0xFF;
6906+}
6907+
6908+/*
6909+ * Have to match translation table entries to main table entries by counter
6910+ * hence the mpc_record variable .... can't see a less disgusting way of
6911+ * doing this ....
6912+ */
6913+
6914+static int mpc_record;
6915+static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
6916+
6917+#ifndef CONFIG_XEN
6918+static void __devinit MP_processor_info (struct mpc_config_processor *m)
6919+{
6920+ int ver, apicid;
6921+ physid_mask_t phys_cpu;
6922+
6923+ if (!(m->mpc_cpuflag & CPU_ENABLED))
6924+ return;
6925+
6926+ apicid = mpc_apic_id(m, translation_table[mpc_record]);
6927+
6928+ if (m->mpc_featureflag&(1<<0))
6929+ Dprintk(" Floating point unit present.\n");
6930+ if (m->mpc_featureflag&(1<<7))
6931+ Dprintk(" Machine Exception supported.\n");
6932+ if (m->mpc_featureflag&(1<<8))
6933+ Dprintk(" 64 bit compare & exchange supported.\n");
6934+ if (m->mpc_featureflag&(1<<9))
6935+ Dprintk(" Internal APIC present.\n");
6936+ if (m->mpc_featureflag&(1<<11))
6937+ Dprintk(" SEP present.\n");
6938+ if (m->mpc_featureflag&(1<<12))
6939+ Dprintk(" MTRR present.\n");
6940+ if (m->mpc_featureflag&(1<<13))
6941+ Dprintk(" PGE present.\n");
6942+ if (m->mpc_featureflag&(1<<14))
6943+ Dprintk(" MCA present.\n");
6944+ if (m->mpc_featureflag&(1<<15))
6945+ Dprintk(" CMOV present.\n");
6946+ if (m->mpc_featureflag&(1<<16))
6947+ Dprintk(" PAT present.\n");
6948+ if (m->mpc_featureflag&(1<<17))
6949+ Dprintk(" PSE present.\n");
6950+ if (m->mpc_featureflag&(1<<18))
6951+ Dprintk(" PSN present.\n");
6952+ if (m->mpc_featureflag&(1<<19))
6953+ Dprintk(" Cache Line Flush Instruction present.\n");
6954+ /* 20 Reserved */
6955+ if (m->mpc_featureflag&(1<<21))
6956+ Dprintk(" Debug Trace and EMON Store present.\n");
6957+ if (m->mpc_featureflag&(1<<22))
6958+ Dprintk(" ACPI Thermal Throttle Registers present.\n");
6959+ if (m->mpc_featureflag&(1<<23))
6960+ Dprintk(" MMX present.\n");
6961+ if (m->mpc_featureflag&(1<<24))
6962+ Dprintk(" FXSR present.\n");
6963+ if (m->mpc_featureflag&(1<<25))
6964+ Dprintk(" XMM present.\n");
6965+ if (m->mpc_featureflag&(1<<26))
6966+ Dprintk(" Willamette New Instructions present.\n");
6967+ if (m->mpc_featureflag&(1<<27))
6968+ Dprintk(" Self Snoop present.\n");
6969+ if (m->mpc_featureflag&(1<<28))
6970+ Dprintk(" HT present.\n");
6971+ if (m->mpc_featureflag&(1<<29))
6972+ Dprintk(" Thermal Monitor present.\n");
6973+ /* 30, 31 Reserved */
6974+
6975+
6976+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
6977+ Dprintk(" Bootup CPU\n");
6978+ boot_cpu_physical_apicid = m->mpc_apicid;
6979+ }
6980+
6981+ ver = m->mpc_apicver;
6982+
6983+ /*
6984+ * Validate version
6985+ */
6986+ if (ver == 0x0) {
6987+ printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
6988+ "fixing up to 0x10. (tell your hw vendor)\n",
6989+ m->mpc_apicid);
6990+ ver = 0x10;
6991+ }
6992+ apic_version[m->mpc_apicid] = ver;
6993+
6994+ phys_cpu = apicid_to_cpu_present(apicid);
6995+ physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
6996+
6997+ if (num_processors >= NR_CPUS) {
6998+ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
6999+ " Processor ignored.\n", NR_CPUS);
7000+ return;
7001+ }
7002+
7003+ if (num_processors >= maxcpus) {
7004+ printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
7005+ " Processor ignored.\n", maxcpus);
7006+ return;
7007+ }
7008+
7009+ cpu_set(num_processors, cpu_possible_map);
7010+ num_processors++;
7011+
7012+ /*
7013+ * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
7014+ * but we need to work other dependencies like SMP_SUSPEND etc
7015+ * before this can be done without some confusion.
7016+ * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
7017+ * - Ashok Raj <ashok.raj@intel.com>
7018+ */
7019+ if (num_processors > 8) {
7020+ switch (boot_cpu_data.x86_vendor) {
7021+ case X86_VENDOR_INTEL:
7022+ if (!APIC_XAPIC(ver)) {
7023+ def_to_bigsmp = 0;
7024+ break;
7025+ }
7026+ /* If P4 and above fall through */
7027+ case X86_VENDOR_AMD:
7028+ def_to_bigsmp = 1;
7029+ }
7030+ }
7031+ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
7032+}
7033+#else
7034+void __init MP_processor_info (struct mpc_config_processor *m)
7035+{
7036+ num_processors++;
7037+}
7038+#endif /* CONFIG_XEN */
7039+
7040+static void __init MP_bus_info (struct mpc_config_bus *m)
7041+{
7042+ char str[7];
7043+
7044+ memcpy(str, m->mpc_bustype, 6);
7045+ str[6] = 0;
7046+
7047+ mpc_oem_bus_info(m, str, translation_table[mpc_record]);
7048+
7049+ if (m->mpc_busid >= MAX_MP_BUSSES) {
7050+ printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
7051+ " is too large, max. supported is %d\n",
7052+ m->mpc_busid, str, MAX_MP_BUSSES - 1);
7053+ return;
7054+ }
7055+
7056+ if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
7057+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
7058+ } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
7059+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
7060+ } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
7061+ mpc_oem_pci_bus(m, translation_table[mpc_record]);
7062+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
7063+ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
7064+ mp_current_pci_id++;
7065+ } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
7066+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
7067+ } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
7068+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
7069+ } else {
7070+ printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
7071+ }
7072+}
7073+
7074+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
7075+{
7076+ if (!(m->mpc_flags & MPC_APIC_USABLE))
7077+ return;
7078+
7079+ printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
7080+ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
7081+ if (nr_ioapics >= MAX_IO_APICS) {
7082+ printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
7083+ MAX_IO_APICS, nr_ioapics);
7084+ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
7085+ }
7086+ if (!m->mpc_apicaddr) {
7087+ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
7088+ " found in MP table, skipping!\n");
7089+ return;
7090+ }
7091+ mp_ioapics[nr_ioapics] = *m;
7092+ nr_ioapics++;
7093+}
7094+
7095+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
7096+{
7097+ mp_irqs [mp_irq_entries] = *m;
7098+ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
7099+ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
7100+ m->mpc_irqtype, m->mpc_irqflag & 3,
7101+ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
7102+ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
7103+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
7104+ panic("Max # of irq sources exceeded!!\n");
7105+}
7106+
7107+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
7108+{
7109+ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
7110+ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
7111+ m->mpc_irqtype, m->mpc_irqflag & 3,
7112+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
7113+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
7114+ /*
7115+ * Well it seems all SMP boards in existence
7116+ * use ExtINT/LVT1 == LINT0 and
7117+ * NMI/LVT2 == LINT1 - the following check
7118+ * will show us if this assumptions is false.
7119+ * Until then we do not have to add baggage.
7120+ */
7121+ if ((m->mpc_irqtype == mp_ExtINT) &&
7122+ (m->mpc_destapiclint != 0))
7123+ BUG();
7124+ if ((m->mpc_irqtype == mp_NMI) &&
7125+ (m->mpc_destapiclint != 1))
7126+ BUG();
7127+}
7128+
7129+#ifdef CONFIG_X86_NUMAQ
7130+static void __init MP_translation_info (struct mpc_config_translation *m)
7131+{
7132+ printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
7133+
7134+ if (mpc_record >= MAX_MPC_ENTRY)
7135+ printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
7136+ else
7137+ translation_table[mpc_record] = m; /* stash this for later */
7138+ if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
7139+ node_set_online(m->trans_quad);
7140+}
7141+
7142+/*
7143+ * Read/parse the MPC oem tables
7144+ */
7145+
7146+static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
7147+ unsigned short oemsize)
7148+{
7149+ int count = sizeof (*oemtable); /* the header size */
7150+ unsigned char *oemptr = ((unsigned char *)oemtable)+count;
7151+
7152+ mpc_record = 0;
7153+ printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
7154+ if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
7155+ {
7156+ printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
7157+ oemtable->oem_signature[0],
7158+ oemtable->oem_signature[1],
7159+ oemtable->oem_signature[2],
7160+ oemtable->oem_signature[3]);
7161+ return;
7162+ }
7163+ if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
7164+ {
7165+ printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
7166+ return;
7167+ }
7168+ while (count < oemtable->oem_length) {
7169+ switch (*oemptr) {
7170+ case MP_TRANSLATION:
7171+ {
7172+ struct mpc_config_translation *m=
7173+ (struct mpc_config_translation *)oemptr;
7174+ MP_translation_info(m);
7175+ oemptr += sizeof(*m);
7176+ count += sizeof(*m);
7177+ ++mpc_record;
7178+ break;
7179+ }
7180+ default:
7181+ {
7182+ printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
7183+ return;
7184+ }
7185+ }
7186+ }
7187+}
7188+
7189+static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
7190+ char *productid)
7191+{
7192+ if (strncmp(oem, "IBM NUMA", 8))
7193+ printk("Warning! May not be a NUMA-Q system!\n");
7194+ if (mpc->mpc_oemptr)
7195+ smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
7196+ mpc->mpc_oemsize);
7197+}
7198+#endif /* CONFIG_X86_NUMAQ */
7199+
7200+/*
7201+ * Read/parse the MPC
7202+ */
7203+
7204+static int __init smp_read_mpc(struct mp_config_table *mpc)
7205+{
7206+ char str[16];
7207+ char oem[10];
7208+ int count=sizeof(*mpc);
7209+ unsigned char *mpt=((unsigned char *)mpc)+count;
7210+
7211+ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
7212+ printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
7213+ *(u32 *)mpc->mpc_signature);
7214+ return 0;
7215+ }
7216+ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
7217+ printk(KERN_ERR "SMP mptable: checksum error!\n");
7218+ return 0;
7219+ }
7220+ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
7221+ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
7222+ mpc->mpc_spec);
7223+ return 0;
7224+ }
7225+ if (!mpc->mpc_lapic) {
7226+ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
7227+ return 0;
7228+ }
7229+ memcpy(oem,mpc->mpc_oem,8);
7230+ oem[8]=0;
7231+ printk(KERN_INFO "OEM ID: %s ",oem);
7232+
7233+ memcpy(str,mpc->mpc_productid,12);
7234+ str[12]=0;
7235+ printk("Product ID: %s ",str);
7236+
7237+ mps_oem_check(mpc, oem, str);
7238+
7239+ printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
7240+
7241+ /*
7242+ * Save the local APIC address (it might be non-default) -- but only
7243+ * if we're not using ACPI.
7244+ */
7245+ if (!acpi_lapic)
7246+ mp_lapic_addr = mpc->mpc_lapic;
7247+
7248+ /*
7249+ * Now process the configuration blocks.
7250+ */
7251+ mpc_record = 0;
7252+ while (count < mpc->mpc_length) {
7253+ switch(*mpt) {
7254+ case MP_PROCESSOR:
7255+ {
7256+ struct mpc_config_processor *m=
7257+ (struct mpc_config_processor *)mpt;
7258+ /* ACPI may have already provided this data */
7259+ if (!acpi_lapic)
7260+ MP_processor_info(m);
7261+ mpt += sizeof(*m);
7262+ count += sizeof(*m);
7263+ break;
7264+ }
7265+ case MP_BUS:
7266+ {
7267+ struct mpc_config_bus *m=
7268+ (struct mpc_config_bus *)mpt;
7269+ MP_bus_info(m);
7270+ mpt += sizeof(*m);
7271+ count += sizeof(*m);
7272+ break;
7273+ }
7274+ case MP_IOAPIC:
7275+ {
7276+ struct mpc_config_ioapic *m=
7277+ (struct mpc_config_ioapic *)mpt;
7278+ MP_ioapic_info(m);
7279+ mpt+=sizeof(*m);
7280+ count+=sizeof(*m);
7281+ break;
7282+ }
7283+ case MP_INTSRC:
7284+ {
7285+ struct mpc_config_intsrc *m=
7286+ (struct mpc_config_intsrc *)mpt;
7287+
7288+ MP_intsrc_info(m);
7289+ mpt+=sizeof(*m);
7290+ count+=sizeof(*m);
7291+ break;
7292+ }
7293+ case MP_LINTSRC:
7294+ {
7295+ struct mpc_config_lintsrc *m=
7296+ (struct mpc_config_lintsrc *)mpt;
7297+ MP_lintsrc_info(m);
7298+ mpt+=sizeof(*m);
7299+ count+=sizeof(*m);
7300+ break;
7301+ }
7302+ default:
7303+ {
7304+ count = mpc->mpc_length;
7305+ break;
7306+ }
7307+ }
7308+ ++mpc_record;
7309+ }
7310+ clustered_apic_check();
7311+ if (!num_processors)
7312+ printk(KERN_ERR "SMP mptable: no processors registered!\n");
7313+ return num_processors;
7314+}
7315+
7316+static int __init ELCR_trigger(unsigned int irq)
7317+{
7318+ unsigned int port;
7319+
7320+ port = 0x4d0 + (irq >> 3);
7321+ return (inb(port) >> (irq & 7)) & 1;
7322+}
7323+
7324+static void __init construct_default_ioirq_mptable(int mpc_default_type)
7325+{
7326+ struct mpc_config_intsrc intsrc;
7327+ int i;
7328+ int ELCR_fallback = 0;
7329+
7330+ intsrc.mpc_type = MP_INTSRC;
7331+ intsrc.mpc_irqflag = 0; /* conforming */
7332+ intsrc.mpc_srcbus = 0;
7333+ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
7334+
7335+ intsrc.mpc_irqtype = mp_INT;
7336+
7337+ /*
7338+ * If true, we have an ISA/PCI system with no IRQ entries
7339+ * in the MP table. To prevent the PCI interrupts from being set up
7340+ * incorrectly, we try to use the ELCR. The sanity check to see if
7341+ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
7342+ * never be level sensitive, so we simply see if the ELCR agrees.
7343+ * If it does, we assume it's valid.
7344+ */
7345+ if (mpc_default_type == 5) {
7346+ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
7347+
7348+ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
7349+ printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
7350+ else {
7351+ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
7352+ ELCR_fallback = 1;
7353+ }
7354+ }
7355+
7356+ for (i = 0; i < 16; i++) {
7357+ switch (mpc_default_type) {
7358+ case 2:
7359+ if (i == 0 || i == 13)
7360+ continue; /* IRQ0 & IRQ13 not connected */
7361+ /* fall through */
7362+ default:
7363+ if (i == 2)
7364+ continue; /* IRQ2 is never connected */
7365+ }
7366+
7367+ if (ELCR_fallback) {
7368+ /*
7369+ * If the ELCR indicates a level-sensitive interrupt, we
7370+ * copy that information over to the MP table in the
7371+ * irqflag field (level sensitive, active high polarity).
7372+ */
7373+ if (ELCR_trigger(i))
7374+ intsrc.mpc_irqflag = 13;
7375+ else
7376+ intsrc.mpc_irqflag = 0;
7377+ }
7378+
7379+ intsrc.mpc_srcbusirq = i;
7380+ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
7381+ MP_intsrc_info(&intsrc);
7382+ }
7383+
7384+ intsrc.mpc_irqtype = mp_ExtINT;
7385+ intsrc.mpc_srcbusirq = 0;
7386+ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
7387+ MP_intsrc_info(&intsrc);
7388+}
7389+
7390+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
7391+{
7392+ struct mpc_config_processor processor;
7393+ struct mpc_config_bus bus;
7394+ struct mpc_config_ioapic ioapic;
7395+ struct mpc_config_lintsrc lintsrc;
7396+ int linttypes[2] = { mp_ExtINT, mp_NMI };
7397+ int i;
7398+
7399+ /*
7400+ * local APIC has default address
7401+ */
7402+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
7403+
7404+ /*
7405+ * 2 CPUs, numbered 0 & 1.
7406+ */
7407+ processor.mpc_type = MP_PROCESSOR;
7408+ /* Either an integrated APIC or a discrete 82489DX. */
7409+ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
7410+ processor.mpc_cpuflag = CPU_ENABLED;
7411+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
7412+ (boot_cpu_data.x86_model << 4) |
7413+ boot_cpu_data.x86_mask;
7414+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
7415+ processor.mpc_reserved[0] = 0;
7416+ processor.mpc_reserved[1] = 0;
7417+ for (i = 0; i < 2; i++) {
7418+ processor.mpc_apicid = i;
7419+ MP_processor_info(&processor);
7420+ }
7421+
7422+ bus.mpc_type = MP_BUS;
7423+ bus.mpc_busid = 0;
7424+ switch (mpc_default_type) {
7425+ default:
7426+ printk("???\n");
7427+ printk(KERN_ERR "Unknown standard configuration %d\n",
7428+ mpc_default_type);
7429+ /* fall through */
7430+ case 1:
7431+ case 5:
7432+ memcpy(bus.mpc_bustype, "ISA ", 6);
7433+ break;
7434+ case 2:
7435+ case 6:
7436+ case 3:
7437+ memcpy(bus.mpc_bustype, "EISA ", 6);
7438+ break;
7439+ case 4:
7440+ case 7:
7441+ memcpy(bus.mpc_bustype, "MCA ", 6);
7442+ }
7443+ MP_bus_info(&bus);
7444+ if (mpc_default_type > 4) {
7445+ bus.mpc_busid = 1;
7446+ memcpy(bus.mpc_bustype, "PCI ", 6);
7447+ MP_bus_info(&bus);
7448+ }
7449+
7450+ ioapic.mpc_type = MP_IOAPIC;
7451+ ioapic.mpc_apicid = 2;
7452+ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
7453+ ioapic.mpc_flags = MPC_APIC_USABLE;
7454+ ioapic.mpc_apicaddr = 0xFEC00000;
7455+ MP_ioapic_info(&ioapic);
7456+
7457+ /*
7458+ * We set up most of the low 16 IO-APIC pins according to MPS rules.
7459+ */
7460+ construct_default_ioirq_mptable(mpc_default_type);
7461+
7462+ lintsrc.mpc_type = MP_LINTSRC;
7463+ lintsrc.mpc_irqflag = 0; /* conforming */
7464+ lintsrc.mpc_srcbusid = 0;
7465+ lintsrc.mpc_srcbusirq = 0;
7466+ lintsrc.mpc_destapic = MP_APIC_ALL;
7467+ for (i = 0; i < 2; i++) {
7468+ lintsrc.mpc_irqtype = linttypes[i];
7469+ lintsrc.mpc_destapiclint = i;
7470+ MP_lintsrc_info(&lintsrc);
7471+ }
7472+}
7473+
7474+static struct intel_mp_floating *mpf_found;
7475+
7476+/*
7477+ * Scan the memory blocks for an SMP configuration block.
7478+ */
7479+void __init get_smp_config (void)
7480+{
7481+ struct intel_mp_floating *mpf = mpf_found;
7482+
7483+ /*
7484+ * ACPI supports both logical (e.g. Hyper-Threading) and physical
7485+ * processors, where MPS only supports physical.
7486+ */
7487+ if (acpi_lapic && acpi_ioapic) {
7488+ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
7489+ return;
7490+ }
7491+ else if (acpi_lapic)
7492+ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
7493+
7494+ printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
7495+ if (mpf->mpf_feature2 & (1<<7)) {
7496+ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
7497+ pic_mode = 1;
7498+ } else {
7499+ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
7500+ pic_mode = 0;
7501+ }
7502+
7503+ /*
7504+ * Now see if we need to read further.
7505+ */
7506+ if (mpf->mpf_feature1 != 0) {
7507+
7508+ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
7509+ construct_default_ISA_mptable(mpf->mpf_feature1);
7510+
7511+ } else if (mpf->mpf_physptr) {
7512+
7513+ /*
7514+ * Read the physical hardware table. Anything here will
7515+ * override the defaults.
7516+ */
7517+ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
7518+ smp_found_config = 0;
7519+ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
7520+ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
7521+ return;
7522+ }
7523+ /*
7524+ * If there are no explicit MP IRQ entries, then we are
7525+ * broken. We set up most of the low 16 IO-APIC pins to
7526+ * ISA defaults and hope it will work.
7527+ */
7528+ if (!mp_irq_entries) {
7529+ struct mpc_config_bus bus;
7530+
7531+ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
7532+
7533+ bus.mpc_type = MP_BUS;
7534+ bus.mpc_busid = 0;
7535+ memcpy(bus.mpc_bustype, "ISA ", 6);
7536+ MP_bus_info(&bus);
7537+
7538+ construct_default_ioirq_mptable(0);
7539+ }
7540+
7541+ } else
7542+ BUG();
7543+
7544+ printk(KERN_INFO "Processors: %d\n", num_processors);
7545+ /*
7546+ * Only use the first configuration found.
7547+ */
7548+}
7549+
7550+static int __init smp_scan_config (unsigned long base, unsigned long length)
7551+{
7552+ unsigned long *bp = isa_bus_to_virt(base);
7553+ struct intel_mp_floating *mpf;
7554+
7555+ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
7556+ if (sizeof(*mpf) != 16)
7557+ printk("Error: MPF size\n");
7558+
7559+ while (length > 0) {
7560+ mpf = (struct intel_mp_floating *)bp;
7561+ if ((*bp == SMP_MAGIC_IDENT) &&
7562+ (mpf->mpf_length == 1) &&
7563+ !mpf_checksum((unsigned char *)bp, 16) &&
7564+ ((mpf->mpf_specification == 1)
7565+ || (mpf->mpf_specification == 4)) ) {
7566+
7567+ smp_found_config = 1;
7568+#ifndef CONFIG_XEN
7569+ printk(KERN_INFO "found SMP MP-table at %08lx\n",
7570+ virt_to_phys(mpf));
7571+ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
7572+ if (mpf->mpf_physptr) {
7573+ /*
7574+ * We cannot access to MPC table to compute
7575+ * table size yet, as only few megabytes from
7576+ * the bottom is mapped now.
7577+ * PC-9800's MPC table places on the very last
7578+ * of physical memory; so that simply reserving
7579+ * PAGE_SIZE from mpg->mpf_physptr yields BUG()
7580+ * in reserve_bootmem.
7581+ */
7582+ unsigned long size = PAGE_SIZE;
7583+ unsigned long end = max_low_pfn * PAGE_SIZE;
7584+ if (mpf->mpf_physptr + size > end)
7585+ size = end - mpf->mpf_physptr;
7586+ reserve_bootmem(mpf->mpf_physptr, size);
7587+ }
7588+#else
7589+ printk(KERN_INFO "found SMP MP-table at %08lx\n",
7590+ ((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
7591+#endif
7592+
7593+ mpf_found = mpf;
7594+ return 1;
7595+ }
7596+ bp += 4;
7597+ length -= 16;
7598+ }
7599+ return 0;
7600+}
7601+
7602+void __init find_smp_config (void)
7603+{
7604+#ifndef CONFIG_XEN
7605+ unsigned int address;
7606+#endif
7607+
7608+ /*
7609+ * FIXME: Linux assumes you have 640K of base ram..
7610+ * this continues the error...
7611+ *
7612+ * 1) Scan the bottom 1K for a signature
7613+ * 2) Scan the top 1K of base RAM
7614+ * 3) Scan the 64K of bios
7615+ */
7616+ if (smp_scan_config(0x0,0x400) ||
7617+ smp_scan_config(639*0x400,0x400) ||
7618+ smp_scan_config(0xF0000,0x10000))
7619+ return;
7620+ /*
7621+ * If it is an SMP machine we should know now, unless the
7622+ * configuration is in an EISA/MCA bus machine with an
7623+ * extended bios data area.
7624+ *
7625+ * there is a real-mode segmented pointer pointing to the
7626+ * 4K EBDA area at 0x40E, calculate and scan it here.
7627+ *
7628+ * NOTE! There are Linux loaders that will corrupt the EBDA
7629+ * area, and as such this kind of SMP config may be less
7630+ * trustworthy, simply because the SMP table may have been
7631+ * stomped on during early boot. These loaders are buggy and
7632+ * should be fixed.
7633+ *
7634+ * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
7635+ */
7636+
7637+#ifndef CONFIG_XEN
7638+ address = get_bios_ebda();
7639+ if (address)
7640+ smp_scan_config(address, 0x400);
7641+#endif
7642+}
7643+
7644+int es7000_plat;
7645+
7646+/* --------------------------------------------------------------------------
7647+ ACPI-based MP Configuration
7648+ -------------------------------------------------------------------------- */
7649+
7650+#ifdef CONFIG_ACPI
7651+
7652+void __init mp_register_lapic_address (
7653+ u64 address)
7654+{
7655+#ifndef CONFIG_XEN
7656+ mp_lapic_addr = (unsigned long) address;
7657+
7658+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
7659+
7660+ if (boot_cpu_physical_apicid == -1U)
7661+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
7662+
7663+ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
7664+#endif
7665+}
7666+
7667+
7668+void __devinit mp_register_lapic (
7669+ u8 id,
7670+ u8 enabled)
7671+{
7672+ struct mpc_config_processor processor;
7673+ int boot_cpu = 0;
7674+
7675+ if (MAX_APICS - id <= 0) {
7676+ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
7677+ id, MAX_APICS);
7678+ return;
7679+ }
7680+
7681+ if (id == boot_cpu_physical_apicid)
7682+ boot_cpu = 1;
7683+
7684+#ifndef CONFIG_XEN
7685+ processor.mpc_type = MP_PROCESSOR;
7686+ processor.mpc_apicid = id;
7687+ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
7688+ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
7689+ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
7690+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
7691+ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
7692+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
7693+ processor.mpc_reserved[0] = 0;
7694+ processor.mpc_reserved[1] = 0;
7695+#endif
7696+
7697+ MP_processor_info(&processor);
7698+}
7699+
7700+#ifdef CONFIG_X86_IO_APIC
7701+
7702+#define MP_ISA_BUS 0
7703+#define MP_MAX_IOAPIC_PIN 127
7704+
7705+static struct mp_ioapic_routing {
7706+ int apic_id;
7707+ int gsi_base;
7708+ int gsi_end;
7709+ u32 pin_programmed[4];
7710+} mp_ioapic_routing[MAX_IO_APICS];
7711+
7712+
7713+static int mp_find_ioapic (
7714+ int gsi)
7715+{
7716+ int i = 0;
7717+
7718+ /* Find the IOAPIC that manages this GSI. */
7719+ for (i = 0; i < nr_ioapics; i++) {
7720+ if ((gsi >= mp_ioapic_routing[i].gsi_base)
7721+ && (gsi <= mp_ioapic_routing[i].gsi_end))
7722+ return i;
7723+ }
7724+
7725+ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
7726+
7727+ return -1;
7728+}
7729+
7730+
7731+void __init mp_register_ioapic (
7732+ u8 id,
7733+ u32 address,
7734+ u32 gsi_base)
7735+{
7736+ int idx = 0;
7737+ int tmpid;
7738+
7739+ if (nr_ioapics >= MAX_IO_APICS) {
7740+ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
7741+ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
7742+ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
7743+ }
7744+ if (!address) {
7745+ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
7746+ " found in MADT table, skipping!\n");
7747+ return;
7748+ }
7749+
7750+ idx = nr_ioapics++;
7751+
7752+ mp_ioapics[idx].mpc_type = MP_IOAPIC;
7753+ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
7754+ mp_ioapics[idx].mpc_apicaddr = address;
7755+
7756+#ifndef CONFIG_XEN
7757+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
7758+#endif
7759+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
7760+ && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
7761+ tmpid = io_apic_get_unique_id(idx, id);
7762+ else
7763+ tmpid = id;
7764+ if (tmpid == -1) {
7765+ nr_ioapics--;
7766+ return;
7767+ }
7768+ mp_ioapics[idx].mpc_apicid = tmpid;
7769+ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
7770+
7771+ /*
7772+ * Build basic GSI lookup table to facilitate gsi->io_apic lookups
7773+ * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
7774+ */
7775+ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
7776+ mp_ioapic_routing[idx].gsi_base = gsi_base;
7777+ mp_ioapic_routing[idx].gsi_end = gsi_base +
7778+ io_apic_get_redir_entries(idx);
7779+
7780+ printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
7781+ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
7782+ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
7783+ mp_ioapic_routing[idx].gsi_base,
7784+ mp_ioapic_routing[idx].gsi_end);
7785+
7786+ return;
7787+}
7788+
7789+
7790+void __init mp_override_legacy_irq (
7791+ u8 bus_irq,
7792+ u8 polarity,
7793+ u8 trigger,
7794+ u32 gsi)
7795+{
7796+ struct mpc_config_intsrc intsrc;
7797+ int ioapic = -1;
7798+ int pin = -1;
7799+
7800+ /*
7801+ * Convert 'gsi' to 'ioapic.pin'.
7802+ */
7803+ ioapic = mp_find_ioapic(gsi);
7804+ if (ioapic < 0)
7805+ return;
7806+ pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
7807+
7808+ /*
7809+ * TBD: This check is for faulty timer entries, where the override
7810+ * erroneously sets the trigger to level, resulting in a HUGE
7811+ * increase of timer interrupts!
7812+ */
7813+ if ((bus_irq == 0) && (trigger == 3))
7814+ trigger = 1;
7815+
7816+ intsrc.mpc_type = MP_INTSRC;
7817+ intsrc.mpc_irqtype = mp_INT;
7818+ intsrc.mpc_irqflag = (trigger << 2) | polarity;
7819+ intsrc.mpc_srcbus = MP_ISA_BUS;
7820+ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
7821+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
7822+ intsrc.mpc_dstirq = pin; /* INTIN# */
7823+
7824+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
7825+ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
7826+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
7827+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
7828+
7829+ mp_irqs[mp_irq_entries] = intsrc;
7830+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
7831+ panic("Max # of irq sources exceeded!\n");
7832+
7833+ return;
7834+}
7835+
7836+void __init mp_config_acpi_legacy_irqs (void)
7837+{
7838+ struct mpc_config_intsrc intsrc;
7839+ int i = 0;
7840+ int ioapic = -1;
7841+
7842+ /*
7843+ * Fabricate the legacy ISA bus (bus #31).
7844+ */
7845+ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
7846+ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
7847+
7848+ /*
7849+ * Older generations of ES7000 have no legacy identity mappings
7850+ */
7851+ if (es7000_plat == 1)
7852+ return;
7853+
7854+ /*
7855+ * Locate the IOAPIC that manages the ISA IRQs (0-15).
7856+ */
7857+ ioapic = mp_find_ioapic(0);
7858+ if (ioapic < 0)
7859+ return;
7860+
7861+ intsrc.mpc_type = MP_INTSRC;
7862+ intsrc.mpc_irqflag = 0; /* Conforming */
7863+ intsrc.mpc_srcbus = MP_ISA_BUS;
7864+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
7865+
7866+ /*
7867+ * Use the default configuration for the IRQs 0-15. Unless
7868+ * overriden by (MADT) interrupt source override entries.
7869+ */
7870+ for (i = 0; i < 16; i++) {
7871+ int idx;
7872+
7873+ for (idx = 0; idx < mp_irq_entries; idx++) {
7874+ struct mpc_config_intsrc *irq = mp_irqs + idx;
7875+
7876+ /* Do we already have a mapping for this ISA IRQ? */
7877+ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
7878+ break;
7879+
7880+ /* Do we already have a mapping for this IOAPIC pin */
7881+ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
7882+ (irq->mpc_dstirq == i))
7883+ break;
7884+ }
7885+
7886+ if (idx != mp_irq_entries) {
7887+ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
7888+ continue; /* IRQ already used */
7889+ }
7890+
7891+ intsrc.mpc_irqtype = mp_INT;
7892+ intsrc.mpc_srcbusirq = i; /* Identity mapped */
7893+ intsrc.mpc_dstirq = i;
7894+
7895+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
7896+ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
7897+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
7898+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
7899+ intsrc.mpc_dstirq);
7900+
7901+ mp_irqs[mp_irq_entries] = intsrc;
7902+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
7903+ panic("Max # of irq sources exceeded!\n");
7904+ }
7905+}
7906+
7907+#define MAX_GSI_NUM 4096
7908+
7909+int mp_register_gsi (u32 gsi, int triggering, int polarity)
7910+{
7911+ int ioapic = -1;
7912+ int ioapic_pin = 0;
7913+ int idx, bit = 0;
7914+ static int pci_irq = 16;
7915+ /*
7916+ * Mapping between Global System Interrups, which
7917+ * represent all possible interrupts, and IRQs
7918+ * assigned to actual devices.
7919+ */
7920+ static int gsi_to_irq[MAX_GSI_NUM];
7921+
7922+ /* Don't set up the ACPI SCI because it's already set up */
7923+ if (acpi_fadt.sci_int == gsi)
7924+ return gsi;
7925+
7926+ ioapic = mp_find_ioapic(gsi);
7927+ if (ioapic < 0) {
7928+ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
7929+ return gsi;
7930+ }
7931+
7932+ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
7933+
7934+ if (ioapic_renumber_irq)
7935+ gsi = ioapic_renumber_irq(ioapic, gsi);
7936+
7937+ /*
7938+ * Avoid pin reprogramming. PRTs typically include entries
7939+ * with redundant pin->gsi mappings (but unique PCI devices);
7940+ * we only program the IOAPIC on the first.
7941+ */
7942+ bit = ioapic_pin % 32;
7943+ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
7944+ if (idx > 3) {
7945+ printk(KERN_ERR "Invalid reference to IOAPIC pin "
7946+ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
7947+ ioapic_pin);
7948+ return gsi;
7949+ }
7950+ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
7951+ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
7952+ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
7953+ return gsi_to_irq[gsi];
7954+ }
7955+
7956+ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
7957+
7958+ if (triggering == ACPI_LEVEL_SENSITIVE) {
7959+ /*
7960+ * For PCI devices assign IRQs in order, avoiding gaps
7961+ * due to unused I/O APIC pins.
7962+ */
7963+ int irq = gsi;
7964+ if (gsi < MAX_GSI_NUM) {
7965+ /*
7966+ * Retain the VIA chipset work-around (gsi > 15), but
7967+ * avoid a problem where the 8254 timer (IRQ0) is setup
7968+ * via an override (so it's not on pin 0 of the ioapic),
7969+ * and at the same time, the pin 0 interrupt is a PCI
7970+ * type. The gsi > 15 test could cause these two pins
7971+ * to be shared as IRQ0, and they are not shareable.
7972+ * So test for this condition, and if necessary, avoid
7973+ * the pin collision.
7974+ */
7975+ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
7976+ gsi = pci_irq++;
7977+ /*
7978+ * Don't assign IRQ used by ACPI SCI
7979+ */
7980+ if (gsi == acpi_fadt.sci_int)
7981+ gsi = pci_irq++;
7982+ gsi_to_irq[irq] = gsi;
7983+ } else {
7984+ printk(KERN_ERR "GSI %u is too high\n", gsi);
7985+ return gsi;
7986+ }
7987+ }
7988+
7989+ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
7990+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
7991+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
7992+ return gsi;
7993+}
7994+
7995+#endif /* CONFIG_X86_IO_APIC */
7996+#endif /* CONFIG_ACPI */
7997Index: head-2008-11-25/arch/x86/kernel/pci-dma-xen.c
7998===================================================================
7999--- /dev/null 1970-01-01 00:00:00.000000000 +0000
8000+++ head-2008-11-25/arch/x86/kernel/pci-dma-xen.c 2008-10-29 09:55:56.000000000 +0100
8001@@ -0,0 +1,409 @@
8002+/*
8003+ * Dynamic DMA mapping support.
8004+ *
8005+ * On i386 there is no hardware dynamic DMA address translation,
8006+ * so consistent alloc/free are merely page allocation/freeing.
8007+ * The rest of the dynamic DMA mapping interface is implemented
8008+ * in asm/pci.h.
8009+ */
8010+
8011+#include <linux/types.h>
8012+#include <linux/mm.h>
8013+#include <linux/string.h>
8014+#include <linux/pci.h>
8015+#include <linux/module.h>
8016+#include <linux/version.h>
8017+#include <asm/io.h>
8018+#include <xen/balloon.h>
8019+#include <xen/gnttab.h>
8020+#include <asm/swiotlb.h>
8021+#include <asm/tlbflush.h>
8022+#include <asm-i386/mach-xen/asm/swiotlb.h>
8023+#include <asm-i386/mach-xen/asm/gnttab_dma.h>
8024+#include <asm/bug.h>
8025+
8026+#ifdef __x86_64__
8027+#include <asm/proto.h>
8028+
8029+int iommu_merge __read_mostly = 0;
8030+EXPORT_SYMBOL(iommu_merge);
8031+
8032+dma_addr_t bad_dma_address __read_mostly;
8033+EXPORT_SYMBOL(bad_dma_address);
8034+
8035+/* This tells the BIO block layer to assume merging. Default to off
8036+ because we cannot guarantee merging later. */
8037+int iommu_bio_merge __read_mostly = 0;
8038+EXPORT_SYMBOL(iommu_bio_merge);
8039+
8040+int force_iommu __read_mostly= 0;
8041+
8042+__init int iommu_setup(char *p)
8043+{
8044+ return 1;
8045+}
8046+
8047+void __init pci_iommu_alloc(void)
8048+{
8049+#ifdef CONFIG_SWIOTLB
8050+ pci_swiotlb_init();
8051+#endif
8052+}
8053+
8054+static int __init pci_iommu_init(void)
8055+{
8056+ no_iommu_init();
8057+ return 0;
8058+}
8059+
8060+/* Must execute after PCI subsystem */
8061+fs_initcall(pci_iommu_init);
8062+#endif
8063+
8064+struct dma_coherent_mem {
8065+ void *virt_base;
8066+ u32 device_base;
8067+ int size;
8068+ int flags;
8069+ unsigned long *bitmap;
8070+};
8071+
8072+#define IOMMU_BUG_ON(test) \
8073+do { \
8074+ if (unlikely(test)) { \
8075+ printk(KERN_ALERT "Fatal DMA error! " \
8076+ "Please use 'swiotlb=force'\n"); \
8077+ BUG(); \
8078+ } \
8079+} while (0)
8080+
8081+static int check_pages_physically_contiguous(unsigned long pfn,
8082+ unsigned int offset,
8083+ size_t length)
8084+{
8085+ unsigned long next_mfn;
8086+ int i;
8087+ int nr_pages;
8088+
8089+ next_mfn = pfn_to_mfn(pfn);
8090+ nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
8091+
8092+ for (i = 1; i < nr_pages; i++) {
8093+ if (pfn_to_mfn(++pfn) != ++next_mfn)
8094+ return 0;
8095+ }
8096+ return 1;
8097+}
8098+
8099+int range_straddles_page_boundary(paddr_t p, size_t size)
8100+{
8101+ unsigned long pfn = p >> PAGE_SHIFT;
8102+ unsigned int offset = p & ~PAGE_MASK;
8103+
8104+ return ((offset + size > PAGE_SIZE) &&
8105+ !check_pages_physically_contiguous(pfn, offset, size));
8106+}
8107+
8108+int
8109+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
8110+ enum dma_data_direction direction)
8111+{
8112+ int i, rc;
8113+
8114+ if (direction == DMA_NONE)
8115+ BUG();
8116+ WARN_ON(nents == 0 || sg[0].length == 0);
8117+
8118+ if (swiotlb) {
8119+ rc = swiotlb_map_sg(hwdev, sg, nents, direction);
8120+ } else {
8121+ for (i = 0; i < nents; i++ ) {
8122+ BUG_ON(!sg[i].page);
8123+ sg[i].dma_address =
8124+ gnttab_dma_map_page(sg[i].page) + sg[i].offset;
8125+ sg[i].dma_length = sg[i].length;
8126+ IOMMU_BUG_ON(address_needs_mapping(
8127+ hwdev, sg[i].dma_address));
8128+ IOMMU_BUG_ON(range_straddles_page_boundary(
8129+ page_to_pseudophys(sg[i].page) + sg[i].offset,
8130+ sg[i].length));
8131+ }
8132+ rc = nents;
8133+ }
8134+
8135+ flush_write_buffers();
8136+ return rc;
8137+}
8138+EXPORT_SYMBOL(dma_map_sg);
8139+
8140+void
8141+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
8142+ enum dma_data_direction direction)
8143+{
8144+ int i;
8145+
8146+ BUG_ON(direction == DMA_NONE);
8147+ if (swiotlb)
8148+ swiotlb_unmap_sg(hwdev, sg, nents, direction);
8149+ else {
8150+ for (i = 0; i < nents; i++ )
8151+ gnttab_dma_unmap_page(sg[i].dma_address);
8152+ }
8153+}
8154+EXPORT_SYMBOL(dma_unmap_sg);
8155+
8156+#ifdef CONFIG_HIGHMEM
8157+dma_addr_t
8158+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
8159+ size_t size, enum dma_data_direction direction)
8160+{
8161+ dma_addr_t dma_addr;
8162+
8163+ BUG_ON(direction == DMA_NONE);
8164+
8165+ if (swiotlb) {
8166+ dma_addr = swiotlb_map_page(
8167+ dev, page, offset, size, direction);
8168+ } else {
8169+ dma_addr = gnttab_dma_map_page(page) + offset;
8170+ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
8171+ }
8172+
8173+ return dma_addr;
8174+}
8175+EXPORT_SYMBOL(dma_map_page);
8176+
8177+void
8178+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
8179+ enum dma_data_direction direction)
8180+{
8181+ BUG_ON(direction == DMA_NONE);
8182+ if (swiotlb)
8183+ swiotlb_unmap_page(dev, dma_address, size, direction);
8184+ else
8185+ gnttab_dma_unmap_page(dma_address);
8186+}
8187+EXPORT_SYMBOL(dma_unmap_page);
8188+#endif /* CONFIG_HIGHMEM */
8189+
8190+int
8191+dma_mapping_error(dma_addr_t dma_addr)
8192+{
8193+ if (swiotlb)
8194+ return swiotlb_dma_mapping_error(dma_addr);
8195+ return 0;
8196+}
8197+EXPORT_SYMBOL(dma_mapping_error);
8198+
8199+int
8200+dma_supported(struct device *dev, u64 mask)
8201+{
8202+ if (swiotlb)
8203+ return swiotlb_dma_supported(dev, mask);
8204+ /*
8205+ * By default we'll BUG when an infeasible DMA is requested, and
8206+ * request swiotlb=force (see IOMMU_BUG_ON).
8207+ */
8208+ return 1;
8209+}
8210+EXPORT_SYMBOL(dma_supported);
8211+
8212+void *dma_alloc_coherent(struct device *dev, size_t size,
8213+ dma_addr_t *dma_handle, gfp_t gfp)
8214+{
8215+ void *ret;
8216+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
8217+ unsigned int order = get_order(size);
8218+ unsigned long vstart;
8219+ u64 mask;
8220+
8221+ /* ignore region specifiers */
8222+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
8223+
8224+ if (mem) {
8225+ int page = bitmap_find_free_region(mem->bitmap, mem->size,
8226+ order);
8227+ if (page >= 0) {
8228+ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
8229+ ret = mem->virt_base + (page << PAGE_SHIFT);
8230+ memset(ret, 0, size);
8231+ return ret;
8232+ }
8233+ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
8234+ return NULL;
8235+ }
8236+
8237+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
8238+ gfp |= GFP_DMA;
8239+
8240+ vstart = __get_free_pages(gfp, order);
8241+ ret = (void *)vstart;
8242+
8243+ if (dev != NULL && dev->coherent_dma_mask)
8244+ mask = dev->coherent_dma_mask;
8245+ else
8246+ mask = 0xffffffff;
8247+
8248+ if (ret != NULL) {
8249+ if (xen_create_contiguous_region(vstart, order,
8250+ fls64(mask)) != 0) {
8251+ free_pages(vstart, order);
8252+ return NULL;
8253+ }
8254+ memset(ret, 0, size);
8255+ *dma_handle = virt_to_bus(ret);
8256+ }
8257+ return ret;
8258+}
8259+EXPORT_SYMBOL(dma_alloc_coherent);
8260+
8261+void dma_free_coherent(struct device *dev, size_t size,
8262+ void *vaddr, dma_addr_t dma_handle)
8263+{
8264+ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
8265+ int order = get_order(size);
8266+
8267+ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
8268+ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
8269+
8270+ bitmap_release_region(mem->bitmap, page, order);
8271+ } else {
8272+ xen_destroy_contiguous_region((unsigned long)vaddr, order);
8273+ free_pages((unsigned long)vaddr, order);
8274+ }
8275+}
8276+EXPORT_SYMBOL(dma_free_coherent);
8277+
8278+#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
8279+int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
8280+ dma_addr_t device_addr, size_t size, int flags)
8281+{
8282+ void __iomem *mem_base;
8283+ int pages = size >> PAGE_SHIFT;
8284+ int bitmap_size = (pages + 31)/32;
8285+
8286+ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
8287+ goto out;
8288+ if (!size)
8289+ goto out;
8290+ if (dev->dma_mem)
8291+ goto out;
8292+
8293+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
8294+
8295+ mem_base = ioremap(bus_addr, size);
8296+ if (!mem_base)
8297+ goto out;
8298+
8299+ dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
8300+ if (!dev->dma_mem)
8301+ goto out;
8302+ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
8303+ dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
8304+ if (!dev->dma_mem->bitmap)
8305+ goto free1_out;
8306+ memset(dev->dma_mem->bitmap, 0, bitmap_size);
8307+
8308+ dev->dma_mem->virt_base = mem_base;
8309+ dev->dma_mem->device_base = device_addr;
8310+ dev->dma_mem->size = pages;
8311+ dev->dma_mem->flags = flags;
8312+
8313+ if (flags & DMA_MEMORY_MAP)
8314+ return DMA_MEMORY_MAP;
8315+
8316+ return DMA_MEMORY_IO;
8317+
8318+ free1_out:
8319+ kfree(dev->dma_mem->bitmap);
8320+ out:
8321+ return 0;
8322+}
8323+EXPORT_SYMBOL(dma_declare_coherent_memory);
8324+
8325+void dma_release_declared_memory(struct device *dev)
8326+{
8327+ struct dma_coherent_mem *mem = dev->dma_mem;
8328+
8329+ if(!mem)
8330+ return;
8331+ dev->dma_mem = NULL;
8332+ iounmap(mem->virt_base);
8333+ kfree(mem->bitmap);
8334+ kfree(mem);
8335+}
8336+EXPORT_SYMBOL(dma_release_declared_memory);
8337+
8338+void *dma_mark_declared_memory_occupied(struct device *dev,
8339+ dma_addr_t device_addr, size_t size)
8340+{
8341+ struct dma_coherent_mem *mem = dev->dma_mem;
8342+ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
8343+ int pos, err;
8344+
8345+ if (!mem)
8346+ return ERR_PTR(-EINVAL);
8347+
8348+ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
8349+ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
8350+ if (err != 0)
8351+ return ERR_PTR(err);
8352+ return mem->virt_base + (pos << PAGE_SHIFT);
8353+}
8354+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
8355+#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
8356+
8357+dma_addr_t
8358+dma_map_single(struct device *dev, void *ptr, size_t size,
8359+ enum dma_data_direction direction)
8360+{
8361+ dma_addr_t dma;
8362+
8363+ if (direction == DMA_NONE)
8364+ BUG();
8365+ WARN_ON(size == 0);
8366+
8367+ if (swiotlb) {
8368+ dma = swiotlb_map_single(dev, ptr, size, direction);
8369+ } else {
8370+ dma = gnttab_dma_map_page(virt_to_page(ptr)) +
8371+ offset_in_page(ptr);
8372+ IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
8373+ IOMMU_BUG_ON(address_needs_mapping(dev, dma));
8374+ }
8375+
8376+ flush_write_buffers();
8377+ return dma;
8378+}
8379+EXPORT_SYMBOL(dma_map_single);
8380+
8381+void
8382+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
8383+ enum dma_data_direction direction)
8384+{
8385+ if (direction == DMA_NONE)
8386+ BUG();
8387+ if (swiotlb)
8388+ swiotlb_unmap_single(dev, dma_addr, size, direction);
8389+ else
8390+ gnttab_dma_unmap_page(dma_addr);
8391+}
8392+EXPORT_SYMBOL(dma_unmap_single);
8393+
8394+void
8395+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
8396+ enum dma_data_direction direction)
8397+{
8398+ if (swiotlb)
8399+ swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
8400+}
8401+EXPORT_SYMBOL(dma_sync_single_for_cpu);
8402+
8403+void
8404+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
8405+ enum dma_data_direction direction)
8406+{
8407+ if (swiotlb)
8408+ swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
8409+}
8410+EXPORT_SYMBOL(dma_sync_single_for_device);
8411Index: head-2008-11-25/arch/x86/kernel/process_32-xen.c
8412===================================================================
8413--- /dev/null 1970-01-01 00:00:00.000000000 +0000
8414+++ head-2008-11-25/arch/x86/kernel/process_32-xen.c 2008-07-21 11:00:32.000000000 +0200
8415@@ -0,0 +1,877 @@
8416+/*
8417+ * linux/arch/i386/kernel/process.c
8418+ *
8419+ * Copyright (C) 1995 Linus Torvalds
8420+ *
8421+ * Pentium III FXSR, SSE support
8422+ * Gareth Hughes <gareth@valinux.com>, May 2000
8423+ */
8424+
8425+/*
8426+ * This file handles the architecture-dependent parts of process handling..
8427+ */
8428+
8429+#include <stdarg.h>
8430+
8431+#include <linux/cpu.h>
8432+#include <linux/errno.h>
8433+#include <linux/sched.h>
8434+#include <linux/fs.h>
8435+#include <linux/kernel.h>
8436+#include <linux/mm.h>
8437+#include <linux/elfcore.h>
8438+#include <linux/smp.h>
8439+#include <linux/smp_lock.h>
8440+#include <linux/stddef.h>
8441+#include <linux/slab.h>
8442+#include <linux/vmalloc.h>
8443+#include <linux/user.h>
8444+#include <linux/a.out.h>
8445+#include <linux/interrupt.h>
8446+#include <linux/utsname.h>
8447+#include <linux/delay.h>
8448+#include <linux/reboot.h>
8449+#include <linux/init.h>
8450+#include <linux/mc146818rtc.h>
8451+#include <linux/module.h>
8452+#include <linux/kallsyms.h>
8453+#include <linux/ptrace.h>
8454+#include <linux/random.h>
8455+
8456+#include <asm/uaccess.h>
8457+#include <asm/pgtable.h>
8458+#include <asm/system.h>
8459+#include <asm/io.h>
8460+#include <asm/ldt.h>
8461+#include <asm/processor.h>
8462+#include <asm/i387.h>
8463+#include <asm/desc.h>
8464+#include <asm/vm86.h>
8465+#ifdef CONFIG_MATH_EMULATION
8466+#include <asm/math_emu.h>
8467+#endif
8468+
8469+#include <xen/interface/physdev.h>
8470+#include <xen/interface/vcpu.h>
8471+#include <xen/cpu_hotplug.h>
8472+
8473+#include <linux/err.h>
8474+
8475+#include <asm/tlbflush.h>
8476+#include <asm/cpu.h>
8477+
8478+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
8479+
8480+static int hlt_counter;
8481+
8482+unsigned long boot_option_idle_override = 0;
8483+EXPORT_SYMBOL(boot_option_idle_override);
8484+
8485+/*
8486+ * Return saved PC of a blocked thread.
8487+ */
8488+unsigned long thread_saved_pc(struct task_struct *tsk)
8489+{
8490+ return ((unsigned long *)tsk->thread.esp)[3];
8491+}
8492+
8493+/*
8494+ * Powermanagement idle function, if any..
8495+ */
8496+void (*pm_idle)(void);
8497+EXPORT_SYMBOL(pm_idle);
8498+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
8499+
8500+void disable_hlt(void)
8501+{
8502+ hlt_counter++;
8503+}
8504+
8505+EXPORT_SYMBOL(disable_hlt);
8506+
8507+void enable_hlt(void)
8508+{
8509+ hlt_counter--;
8510+}
8511+
8512+EXPORT_SYMBOL(enable_hlt);
8513+
8514+/*
8515+ * On SMP it's slightly faster (but much more power-consuming!)
8516+ * to poll the ->work.need_resched flag instead of waiting for the
8517+ * cross-CPU IPI to arrive. Use this option with caution.
8518+ */
8519+static void poll_idle (void)
8520+{
8521+ local_irq_enable();
8522+
8523+ asm volatile(
8524+ "2:"
8525+ "testl %0, %1;"
8526+ "rep; nop;"
8527+ "je 2b;"
8528+ : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
8529+}
8530+
8531+static void xen_idle(void)
8532+{
8533+ local_irq_disable();
8534+
8535+ if (need_resched())
8536+ local_irq_enable();
8537+ else {
8538+ current_thread_info()->status &= ~TS_POLLING;
8539+ smp_mb__after_clear_bit();
8540+ safe_halt();
8541+ current_thread_info()->status |= TS_POLLING;
8542+ }
8543+}
8544+#ifdef CONFIG_APM_MODULE
8545+EXPORT_SYMBOL(default_idle);
8546+#endif
8547+
8548+#ifdef CONFIG_HOTPLUG_CPU
8549+extern cpumask_t cpu_initialized;
8550+static inline void play_dead(void)
8551+{
8552+ idle_task_exit();
8553+ local_irq_disable();
8554+ cpu_clear(smp_processor_id(), cpu_initialized);
8555+ preempt_enable_no_resched();
8556+ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
8557+ cpu_bringup();
8558+}
8559+#else
8560+static inline void play_dead(void)
8561+{
8562+ BUG();
8563+}
8564+#endif /* CONFIG_HOTPLUG_CPU */
8565+
8566+/*
8567+ * The idle thread. There's no useful work to be
8568+ * done, so just try to conserve power and have a
8569+ * low exit latency (ie sit in a loop waiting for
8570+ * somebody to say that they'd like to reschedule)
8571+ */
8572+void cpu_idle(void)
8573+{
8574+ int cpu = smp_processor_id();
8575+
8576+ current_thread_info()->status |= TS_POLLING;
8577+
8578+ /* endless idle loop with no priority at all */
8579+ while (1) {
8580+ while (!need_resched()) {
8581+ void (*idle)(void);
8582+
8583+ if (__get_cpu_var(cpu_idle_state))
8584+ __get_cpu_var(cpu_idle_state) = 0;
8585+
8586+ rmb();
8587+ idle = xen_idle; /* no alternatives */
8588+
8589+ if (cpu_is_offline(cpu))
8590+ play_dead();
8591+
8592+ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
8593+ idle();
8594+ }
8595+ preempt_enable_no_resched();
8596+ schedule();
8597+ preempt_disable();
8598+ }
8599+}
8600+
8601+void cpu_idle_wait(void)
8602+{
8603+ unsigned int cpu, this_cpu = get_cpu();
8604+ cpumask_t map;
8605+
8606+ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
8607+ put_cpu();
8608+
8609+ cpus_clear(map);
8610+ for_each_online_cpu(cpu) {
8611+ per_cpu(cpu_idle_state, cpu) = 1;
8612+ cpu_set(cpu, map);
8613+ }
8614+
8615+ __get_cpu_var(cpu_idle_state) = 0;
8616+
8617+ wmb();
8618+ do {
8619+ ssleep(1);
8620+ for_each_online_cpu(cpu) {
8621+ if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
8622+ cpu_clear(cpu, map);
8623+ }
8624+ cpus_and(map, map, cpu_online_map);
8625+ } while (!cpus_empty(map));
8626+}
8627+EXPORT_SYMBOL_GPL(cpu_idle_wait);
8628+
8629+void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
8630+{
8631+}
8632+
8633+static int __init idle_setup (char *str)
8634+{
8635+ if (!strncmp(str, "poll", 4)) {
8636+ printk("using polling idle threads.\n");
8637+ pm_idle = poll_idle;
8638+ }
8639+
8640+ boot_option_idle_override = 1;
8641+ return 1;
8642+}
8643+
8644+__setup("idle=", idle_setup);
8645+
8646+void show_regs(struct pt_regs * regs)
8647+{
8648+ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
8649+
8650+ printk("\n");
8651+ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
8652+ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
8653+ print_symbol("EIP is at %s\n", regs->eip);
8654+
8655+ if (user_mode_vm(regs))
8656+ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
8657+ printk(" EFLAGS: %08lx %s (%s %.*s)\n",
8658+ regs->eflags, print_tainted(), system_utsname.release,
8659+ (int)strcspn(system_utsname.version, " "),
8660+ system_utsname.version);
8661+ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
8662+ regs->eax,regs->ebx,regs->ecx,regs->edx);
8663+ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
8664+ regs->esi, regs->edi, regs->ebp);
8665+ printk(" DS: %04x ES: %04x\n",
8666+ 0xffff & regs->xds,0xffff & regs->xes);
8667+
8668+ cr0 = read_cr0();
8669+ cr2 = read_cr2();
8670+ cr3 = read_cr3();
8671+ cr4 = read_cr4_safe();
8672+ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
8673+ show_trace(NULL, regs, &regs->esp);
8674+}
8675+
8676+/*
8677+ * This gets run with %ebx containing the
8678+ * function to call, and %edx containing
8679+ * the "args".
8680+ */
8681+extern void kernel_thread_helper(void);
8682+__asm__(".section .text\n"
8683+ ".align 4\n"
8684+ "kernel_thread_helper:\n\t"
8685+ "movl %edx,%eax\n\t"
8686+ "pushl %edx\n\t"
8687+ "call *%ebx\n\t"
8688+ "pushl %eax\n\t"
8689+ "call do_exit\n"
8690+ ".previous");
8691+
8692+/*
8693+ * Create a kernel thread
8694+ */
8695+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
8696+{
8697+ struct pt_regs regs;
8698+
8699+ memset(&regs, 0, sizeof(regs));
8700+
8701+ regs.ebx = (unsigned long) fn;
8702+ regs.edx = (unsigned long) arg;
8703+
8704+ regs.xds = __USER_DS;
8705+ regs.xes = __USER_DS;
8706+ regs.orig_eax = -1;
8707+ regs.eip = (unsigned long) kernel_thread_helper;
8708+ regs.xcs = GET_KERNEL_CS();
8709+ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
8710+
8711+ /* Ok, create the new process.. */
8712+ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
8713+}
8714+EXPORT_SYMBOL(kernel_thread);
8715+
8716+/*
8717+ * Free current thread data structures etc..
8718+ */
8719+void exit_thread(void)
8720+{
8721+ /* The process may have allocated an io port bitmap... nuke it. */
8722+ if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
8723+ struct task_struct *tsk = current;
8724+ struct thread_struct *t = &tsk->thread;
8725+ struct physdev_set_iobitmap set_iobitmap;
8726+ memset(&set_iobitmap, 0, sizeof(set_iobitmap));
8727+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
8728+ &set_iobitmap));
8729+ kfree(t->io_bitmap_ptr);
8730+ t->io_bitmap_ptr = NULL;
8731+ clear_thread_flag(TIF_IO_BITMAP);
8732+ }
8733+}
8734+
8735+void flush_thread(void)
8736+{
8737+ struct task_struct *tsk = current;
8738+
8739+ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
8740+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
8741+ clear_tsk_thread_flag(tsk, TIF_DEBUG);
8742+ /*
8743+ * Forget coprocessor state..
8744+ */
8745+ clear_fpu(tsk);
8746+ clear_used_math();
8747+}
8748+
8749+void release_thread(struct task_struct *dead_task)
8750+{
8751+ BUG_ON(dead_task->mm);
8752+ release_vm86_irqs(dead_task);
8753+}
8754+
8755+/*
8756+ * This gets called before we allocate a new thread and copy
8757+ * the current task into it.
8758+ */
8759+void prepare_to_copy(struct task_struct *tsk)
8760+{
8761+ unlazy_fpu(tsk);
8762+}
8763+
8764+int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
8765+ unsigned long unused,
8766+ struct task_struct * p, struct pt_regs * regs)
8767+{
8768+ struct pt_regs * childregs;
8769+ struct task_struct *tsk;
8770+ int err;
8771+
8772+ childregs = task_pt_regs(p);
8773+ *childregs = *regs;
8774+ childregs->eax = 0;
8775+ childregs->esp = esp;
8776+
8777+ p->thread.esp = (unsigned long) childregs;
8778+ p->thread.esp0 = (unsigned long) (childregs+1);
8779+
8780+ p->thread.eip = (unsigned long) ret_from_fork;
8781+
8782+ savesegment(fs,p->thread.fs);
8783+ savesegment(gs,p->thread.gs);
8784+
8785+ tsk = current;
8786+ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
8787+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
8788+ if (!p->thread.io_bitmap_ptr) {
8789+ p->thread.io_bitmap_max = 0;
8790+ return -ENOMEM;
8791+ }
8792+ memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
8793+ IO_BITMAP_BYTES);
8794+ set_tsk_thread_flag(p, TIF_IO_BITMAP);
8795+ }
8796+
8797+ /*
8798+ * Set a new TLS for the child thread?
8799+ */
8800+ if (clone_flags & CLONE_SETTLS) {
8801+ struct desc_struct *desc;
8802+ struct user_desc info;
8803+ int idx;
8804+
8805+ err = -EFAULT;
8806+ if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
8807+ goto out;
8808+ err = -EINVAL;
8809+ if (LDT_empty(&info))
8810+ goto out;
8811+
8812+ idx = info.entry_number;
8813+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
8814+ goto out;
8815+
8816+ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
8817+ desc->a = LDT_entry_a(&info);
8818+ desc->b = LDT_entry_b(&info);
8819+ }
8820+
8821+ p->thread.iopl = current->thread.iopl;
8822+
8823+ err = 0;
8824+ out:
8825+ if (err && p->thread.io_bitmap_ptr) {
8826+ kfree(p->thread.io_bitmap_ptr);
8827+ p->thread.io_bitmap_max = 0;
8828+ }
8829+ return err;
8830+}
8831+
8832+/*
8833+ * fill in the user structure for a core dump..
8834+ */
8835+void dump_thread(struct pt_regs * regs, struct user * dump)
8836+{
8837+ int i;
8838+
8839+/* changed the size calculations - should hopefully work better. lbt */
8840+ dump->magic = CMAGIC;
8841+ dump->start_code = 0;
8842+ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
8843+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
8844+ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
8845+ dump->u_dsize -= dump->u_tsize;
8846+ dump->u_ssize = 0;
8847+ for (i = 0; i < 8; i++)
8848+ dump->u_debugreg[i] = current->thread.debugreg[i];
8849+
8850+ if (dump->start_stack < TASK_SIZE)
8851+ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
8852+
8853+ dump->regs.ebx = regs->ebx;
8854+ dump->regs.ecx = regs->ecx;
8855+ dump->regs.edx = regs->edx;
8856+ dump->regs.esi = regs->esi;
8857+ dump->regs.edi = regs->edi;
8858+ dump->regs.ebp = regs->ebp;
8859+ dump->regs.eax = regs->eax;
8860+ dump->regs.ds = regs->xds;
8861+ dump->regs.es = regs->xes;
8862+ savesegment(fs,dump->regs.fs);
8863+ savesegment(gs,dump->regs.gs);
8864+ dump->regs.orig_eax = regs->orig_eax;
8865+ dump->regs.eip = regs->eip;
8866+ dump->regs.cs = regs->xcs;
8867+ dump->regs.eflags = regs->eflags;
8868+ dump->regs.esp = regs->esp;
8869+ dump->regs.ss = regs->xss;
8870+
8871+ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
8872+}
8873+EXPORT_SYMBOL(dump_thread);
8874+
8875+/*
8876+ * Capture the user space registers if the task is not running (in user space)
8877+ */
8878+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
8879+{
8880+ struct pt_regs ptregs = *task_pt_regs(tsk);
8881+ ptregs.xcs &= 0xffff;
8882+ ptregs.xds &= 0xffff;
8883+ ptregs.xes &= 0xffff;
8884+ ptregs.xss &= 0xffff;
8885+
8886+ elf_core_copy_regs(regs, &ptregs);
8887+
8888+ return 1;
8889+}
8890+
8891+static noinline void __switch_to_xtra(struct task_struct *next_p)
8892+{
8893+ struct thread_struct *next;
8894+
8895+ next = &next_p->thread;
8896+
8897+ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
8898+ set_debugreg(next->debugreg[0], 0);
8899+ set_debugreg(next->debugreg[1], 1);
8900+ set_debugreg(next->debugreg[2], 2);
8901+ set_debugreg(next->debugreg[3], 3);
8902+ /* no 4 and 5 */
8903+ set_debugreg(next->debugreg[6], 6);
8904+ set_debugreg(next->debugreg[7], 7);
8905+ }
8906+}
8907+
8908+/*
8909+ * This function selects if the context switch from prev to next
8910+ * has to tweak the TSC disable bit in the cr4.
8911+ */
8912+static inline void disable_tsc(struct task_struct *prev_p,
8913+ struct task_struct *next_p)
8914+{
8915+ struct thread_info *prev, *next;
8916+
8917+ /*
8918+ * gcc should eliminate the ->thread_info dereference if
8919+ * has_secure_computing returns 0 at compile time (SECCOMP=n).
8920+ */
8921+ prev = task_thread_info(prev_p);
8922+ next = task_thread_info(next_p);
8923+
8924+ if (has_secure_computing(prev) || has_secure_computing(next)) {
8925+ /* slow path here */
8926+ if (has_secure_computing(prev) &&
8927+ !has_secure_computing(next)) {
8928+ write_cr4(read_cr4() & ~X86_CR4_TSD);
8929+ } else if (!has_secure_computing(prev) &&
8930+ has_secure_computing(next))
8931+ write_cr4(read_cr4() | X86_CR4_TSD);
8932+ }
8933+}
8934+
8935+/*
8936+ * switch_to(x,yn) should switch tasks from x to y.
8937+ *
8938+ * We fsave/fwait so that an exception goes off at the right time
8939+ * (as a call from the fsave or fwait in effect) rather than to
8940+ * the wrong process. Lazy FP saving no longer makes any sense
8941+ * with modern CPU's, and this simplifies a lot of things (SMP
8942+ * and UP become the same).
8943+ *
8944+ * NOTE! We used to use the x86 hardware context switching. The
8945+ * reason for not using it any more becomes apparent when you
8946+ * try to recover gracefully from saved state that is no longer
8947+ * valid (stale segment register values in particular). With the
8948+ * hardware task-switch, there is no way to fix up bad state in
8949+ * a reasonable manner.
8950+ *
8951+ * The fact that Intel documents the hardware task-switching to
8952+ * be slow is a fairly red herring - this code is not noticeably
8953+ * faster. However, there _is_ some room for improvement here,
8954+ * so the performance issues may eventually be a valid point.
8955+ * More important, however, is the fact that this allows us much
8956+ * more flexibility.
8957+ *
8958+ * The return value (in %eax) will be the "prev" task after
8959+ * the task-switch, and shows up in ret_from_fork in entry.S,
8960+ * for example.
8961+ */
8962+struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
8963+{
8964+ struct thread_struct *prev = &prev_p->thread,
8965+ *next = &next_p->thread;
8966+ int cpu = smp_processor_id();
8967+#ifndef CONFIG_X86_NO_TSS
8968+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
8969+#endif
8970+#if CONFIG_XEN_COMPAT > 0x030002
8971+ struct physdev_set_iopl iopl_op;
8972+ struct physdev_set_iobitmap iobmp_op;
8973+#else
8974+ struct physdev_op _pdo[2], *pdo = _pdo;
8975+#define iopl_op pdo->u.set_iopl
8976+#define iobmp_op pdo->u.set_iobitmap
8977+#endif
8978+ multicall_entry_t _mcl[8], *mcl = _mcl;
8979+
8980+ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
8981+
8982+ /*
8983+ * This is basically '__unlazy_fpu', except that we queue a
8984+ * multicall to indicate FPU task switch, rather than
8985+ * synchronously trapping to Xen.
8986+ */
8987+ if (prev_p->thread_info->status & TS_USEDFPU) {
8988+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
8989+ mcl->op = __HYPERVISOR_fpu_taskswitch;
8990+ mcl->args[0] = 1;
8991+ mcl++;
8992+ }
8993+#if 0 /* lazy fpu sanity check */
8994+ else BUG_ON(!(read_cr0() & 8));
8995+#endif
8996+
8997+ /*
8998+ * Reload esp0.
8999+ * This is load_esp0(tss, next) with a multicall.
9000+ */
9001+ mcl->op = __HYPERVISOR_stack_switch;
9002+ mcl->args[0] = __KERNEL_DS;
9003+ mcl->args[1] = next->esp0;
9004+ mcl++;
9005+
9006+ /*
9007+ * Load the per-thread Thread-Local Storage descriptor.
9008+ * This is load_TLS(next, cpu) with multicalls.
9009+ */
9010+#define C(i) do { \
9011+ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
9012+ next->tls_array[i].b != prev->tls_array[i].b)) { \
9013+ mcl->op = __HYPERVISOR_update_descriptor; \
9014+ *(u64 *)&mcl->args[0] = virt_to_machine( \
9015+ &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
9016+ *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \
9017+ mcl++; \
9018+ } \
9019+} while (0)
9020+ C(0); C(1); C(2);
9021+#undef C
9022+
9023+ if (unlikely(prev->iopl != next->iopl)) {
9024+ iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
9025+#if CONFIG_XEN_COMPAT > 0x030002
9026+ mcl->op = __HYPERVISOR_physdev_op;
9027+ mcl->args[0] = PHYSDEVOP_set_iopl;
9028+ mcl->args[1] = (unsigned long)&iopl_op;
9029+#else
9030+ mcl->op = __HYPERVISOR_physdev_op_compat;
9031+ pdo->cmd = PHYSDEVOP_set_iopl;
9032+ mcl->args[0] = (unsigned long)pdo++;
9033+#endif
9034+ mcl++;
9035+ }
9036+
9037+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
9038+ set_xen_guest_handle(iobmp_op.bitmap,
9039+ (char *)next->io_bitmap_ptr);
9040+ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
9041+#if CONFIG_XEN_COMPAT > 0x030002
9042+ mcl->op = __HYPERVISOR_physdev_op;
9043+ mcl->args[0] = PHYSDEVOP_set_iobitmap;
9044+ mcl->args[1] = (unsigned long)&iobmp_op;
9045+#else
9046+ mcl->op = __HYPERVISOR_physdev_op_compat;
9047+ pdo->cmd = PHYSDEVOP_set_iobitmap;
9048+ mcl->args[0] = (unsigned long)pdo++;
9049+#endif
9050+ mcl++;
9051+ }
9052+
9053+#if CONFIG_XEN_COMPAT <= 0x030002
9054+ BUG_ON(pdo > _pdo + ARRAY_SIZE(_pdo));
9055+#endif
9056+ BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl));
9057+ if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL)))
9058+ BUG();
9059+
9060+ /*
9061+ * Restore %fs and %gs if needed.
9062+ *
9063+ * Glibc normally makes %fs be zero, and %gs is one of
9064+ * the TLS segments.
9065+ */
9066+ if (unlikely(next->fs))
9067+ loadsegment(fs, next->fs);
9068+
9069+ if (next->gs)
9070+ loadsegment(gs, next->gs);
9071+
9072+ /*
9073+ * Now maybe handle debug registers
9074+ */
9075+ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
9076+ __switch_to_xtra(next_p);
9077+
9078+ disable_tsc(prev_p, next_p);
9079+
9080+ return prev_p;
9081+}
9082+
9083+asmlinkage int sys_fork(struct pt_regs regs)
9084+{
9085+ return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
9086+}
9087+
9088+asmlinkage int sys_clone(struct pt_regs regs)
9089+{
9090+ unsigned long clone_flags;
9091+ unsigned long newsp;
9092+ int __user *parent_tidptr, *child_tidptr;
9093+
9094+ clone_flags = regs.ebx;
9095+ newsp = regs.ecx;
9096+ parent_tidptr = (int __user *)regs.edx;
9097+ child_tidptr = (int __user *)regs.edi;
9098+ if (!newsp)
9099+ newsp = regs.esp;
9100+ return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
9101+}
9102+
9103+/*
9104+ * This is trivial, and on the face of it looks like it
9105+ * could equally well be done in user mode.
9106+ *
9107+ * Not so, for quite unobvious reasons - register pressure.
9108+ * In user mode vfork() cannot have a stack frame, and if
9109+ * done by calling the "clone()" system call directly, you
9110+ * do not have enough call-clobbered registers to hold all
9111+ * the information you need.
9112+ */
9113+asmlinkage int sys_vfork(struct pt_regs regs)
9114+{
9115+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
9116+}
9117+
9118+/*
9119+ * sys_execve() executes a new program.
9120+ */
9121+asmlinkage int sys_execve(struct pt_regs regs)
9122+{
9123+ int error;
9124+ char * filename;
9125+
9126+ filename = getname((char __user *) regs.ebx);
9127+ error = PTR_ERR(filename);
9128+ if (IS_ERR(filename))
9129+ goto out;
9130+ error = do_execve(filename,
9131+ (char __user * __user *) regs.ecx,
9132+ (char __user * __user *) regs.edx,
9133+ &regs);
9134+ if (error == 0) {
9135+ task_lock(current);
9136+ current->ptrace &= ~PT_DTRACE;
9137+ task_unlock(current);
9138+ /* Make sure we don't return using sysenter.. */
9139+ set_thread_flag(TIF_IRET);
9140+ }
9141+ putname(filename);
9142+out:
9143+ return error;
9144+}
9145+
9146+#define top_esp (THREAD_SIZE - sizeof(unsigned long))
9147+#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
9148+
9149+unsigned long get_wchan(struct task_struct *p)
9150+{
9151+ unsigned long ebp, esp, eip;
9152+ unsigned long stack_page;
9153+ int count = 0;
9154+ if (!p || p == current || p->state == TASK_RUNNING)
9155+ return 0;
9156+ stack_page = (unsigned long)task_stack_page(p);
9157+ esp = p->thread.esp;
9158+ if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
9159+ return 0;
9160+ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
9161+ ebp = *(unsigned long *) esp;
9162+ do {
9163+ if (ebp < stack_page || ebp > top_ebp+stack_page)
9164+ return 0;
9165+ eip = *(unsigned long *) (ebp+4);
9166+ if (!in_sched_functions(eip))
9167+ return eip;
9168+ ebp = *(unsigned long *) ebp;
9169+ } while (count++ < 16);
9170+ return 0;
9171+}
9172+
9173+/*
9174+ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
9175+ */
9176+static int get_free_idx(void)
9177+{
9178+ struct thread_struct *t = &current->thread;
9179+ int idx;
9180+
9181+ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
9182+ if (desc_empty(t->tls_array + idx))
9183+ return idx + GDT_ENTRY_TLS_MIN;
9184+ return -ESRCH;
9185+}
9186+
9187+/*
9188+ * Set a given TLS descriptor:
9189+ */
9190+asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
9191+{
9192+ struct thread_struct *t = &current->thread;
9193+ struct user_desc info;
9194+ struct desc_struct *desc;
9195+ int cpu, idx;
9196+
9197+ if (copy_from_user(&info, u_info, sizeof(info)))
9198+ return -EFAULT;
9199+ idx = info.entry_number;
9200+
9201+ /*
9202+ * index -1 means the kernel should try to find and
9203+ * allocate an empty descriptor:
9204+ */
9205+ if (idx == -1) {
9206+ idx = get_free_idx();
9207+ if (idx < 0)
9208+ return idx;
9209+ if (put_user(idx, &u_info->entry_number))
9210+ return -EFAULT;
9211+ }
9212+
9213+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
9214+ return -EINVAL;
9215+
9216+ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
9217+
9218+ /*
9219+ * We must not get preempted while modifying the TLS.
9220+ */
9221+ cpu = get_cpu();
9222+
9223+ if (LDT_empty(&info)) {
9224+ desc->a = 0;
9225+ desc->b = 0;
9226+ } else {
9227+ desc->a = LDT_entry_a(&info);
9228+ desc->b = LDT_entry_b(&info);
9229+ }
9230+ load_TLS(t, cpu);
9231+
9232+ put_cpu();
9233+
9234+ return 0;
9235+}
9236+
9237+/*
9238+ * Get the current Thread-Local Storage area:
9239+ */
9240+
9241+#define GET_BASE(desc) ( \
9242+ (((desc)->a >> 16) & 0x0000ffff) | \
9243+ (((desc)->b << 16) & 0x00ff0000) | \
9244+ ( (desc)->b & 0xff000000) )
9245+
9246+#define GET_LIMIT(desc) ( \
9247+ ((desc)->a & 0x0ffff) | \
9248+ ((desc)->b & 0xf0000) )
9249+
9250+#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
9251+#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
9252+#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
9253+#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
9254+#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
9255+#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
9256+
9257+asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
9258+{
9259+ struct user_desc info;
9260+ struct desc_struct *desc;
9261+ int idx;
9262+
9263+ if (get_user(idx, &u_info->entry_number))
9264+ return -EFAULT;
9265+ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
9266+ return -EINVAL;
9267+
9268+ memset(&info, 0, sizeof(info));
9269+
9270+ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
9271+
9272+ info.entry_number = idx;
9273+ info.base_addr = GET_BASE(desc);
9274+ info.limit = GET_LIMIT(desc);
9275+ info.seg_32bit = GET_32BIT(desc);
9276+ info.contents = GET_CONTENTS(desc);
9277+ info.read_exec_only = !GET_WRITABLE(desc);
9278+ info.limit_in_pages = GET_LIMIT_PAGES(desc);
9279+ info.seg_not_present = !GET_PRESENT(desc);
9280+ info.useable = GET_USEABLE(desc);
9281+
9282+ if (copy_to_user(u_info, &info, sizeof(info)))
9283+ return -EFAULT;
9284+ return 0;
9285+}
9286+
9287+unsigned long arch_align_stack(unsigned long sp)
9288+{
9289+ if (randomize_va_space)
9290+ sp -= get_random_int() % 8192;
9291+ return sp & ~0xf;
9292+}
9293Index: head-2008-11-25/arch/x86/kernel/quirks-xen.c
9294===================================================================
9295--- /dev/null 1970-01-01 00:00:00.000000000 +0000
9296+++ head-2008-11-25/arch/x86/kernel/quirks-xen.c 2008-01-28 12:24:19.000000000 +0100
9297@@ -0,0 +1,47 @@
9298+/*
9299+ * This file contains work-arounds for x86 and x86_64 platform bugs.
9300+ */
9301+#include <linux/pci.h>
9302+#include <linux/irq.h>
9303+
9304+#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
9305+
9306+static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
9307+{
9308+ u8 config, rev;
9309+ u32 word;
9310+
9311+ /* BIOS may enable hardware IRQ balancing for
9312+ * E7520/E7320/E7525(revision ID 0x9 and below)
9313+ * based platforms.
9314+ * Disable SW irqbalance/affinity on those platforms.
9315+ */
9316+ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
9317+ if (rev > 0x9)
9318+ return;
9319+
9320+ printk(KERN_INFO "Intel E7520/7320/7525 detected.");
9321+
9322+ /* enable access to config space*/
9323+ pci_read_config_byte(dev, 0xf4, &config);
9324+ pci_write_config_byte(dev, 0xf4, config|0x2);
9325+
9326+ /* read xTPR register */
9327+ raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
9328+
9329+ if (!(word & (1 << 13))) {
9330+ struct xen_platform_op op;
9331+ printk(KERN_INFO "Disabling irq balancing and affinity\n");
9332+ op.cmd = XENPF_platform_quirk;
9333+ op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
9334+ WARN_ON(HYPERVISOR_platform_op(&op));
9335+ }
9336+
9337+ /* put back the original value for config space*/
9338+ if (!(config & 0x2))
9339+ pci_write_config_byte(dev, 0xf4, config);
9340+}
9341+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
9342+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
9343+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
9344+#endif
9345Index: head-2008-11-25/arch/x86/kernel/setup_32-xen.c
9346===================================================================
9347--- /dev/null 1970-01-01 00:00:00.000000000 +0000
9348+++ head-2008-11-25/arch/x86/kernel/setup_32-xen.c 2008-04-22 15:41:51.000000000 +0200
9349@@ -0,0 +1,1919 @@
9350+/*
9351+ * linux/arch/i386/kernel/setup.c
9352+ *
9353+ * Copyright (C) 1995 Linus Torvalds
9354+ *
9355+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
9356+ *
9357+ * Memory region support
9358+ * David Parsons <orc@pell.chi.il.us>, July-August 1999
9359+ *
9360+ * Added E820 sanitization routine (removes overlapping memory regions);
9361+ * Brian Moyle <bmoyle@mvista.com>, February 2001
9362+ *
9363+ * Moved CPU detection code to cpu/${cpu}.c
9364+ * Patrick Mochel <mochel@osdl.org>, March 2002
9365+ *
9366+ * Provisions for empty E820 memory regions (reported by certain BIOSes).
9367+ * Alex Achenbach <xela@slit.de>, December 2002.
9368+ *
9369+ */
9370+
9371+/*
9372+ * This file handles the architecture-dependent parts of initialization
9373+ */
9374+
9375+#include <linux/sched.h>
9376+#include <linux/mm.h>
9377+#include <linux/mmzone.h>
9378+#include <linux/screen_info.h>
9379+#include <linux/ioport.h>
9380+#include <linux/acpi.h>
9381+#include <linux/apm_bios.h>
9382+#include <linux/initrd.h>
9383+#include <linux/bootmem.h>
9384+#include <linux/seq_file.h>
9385+#include <linux/platform_device.h>
9386+#include <linux/console.h>
9387+#include <linux/mca.h>
9388+#include <linux/root_dev.h>
9389+#include <linux/highmem.h>
9390+#include <linux/module.h>
9391+#include <linux/efi.h>
9392+#include <linux/init.h>
9393+#include <linux/edd.h>
9394+#include <linux/nodemask.h>
9395+#include <linux/kernel.h>
9396+#include <linux/percpu.h>
9397+#include <linux/notifier.h>
9398+#include <linux/kexec.h>
9399+#include <linux/crash_dump.h>
9400+#include <linux/dmi.h>
9401+#include <linux/pfn.h>
9402+
9403+#include <video/edid.h>
9404+
9405+#include <asm/apic.h>
9406+#include <asm/e820.h>
9407+#include <asm/mpspec.h>
9408+#include <asm/setup.h>
9409+#include <asm/arch_hooks.h>
9410+#include <asm/sections.h>
9411+#include <asm/io_apic.h>
9412+#include <asm/ist.h>
9413+#include <asm/io.h>
9414+#include <asm/hypervisor.h>
9415+#include <xen/interface/physdev.h>
9416+#include <xen/interface/memory.h>
9417+#include <xen/features.h>
9418+#include <xen/firmware.h>
9419+#include <xen/xencons.h>
9420+#include <setup_arch.h>
9421+#include <bios_ebda.h>
9422+
9423+#ifdef CONFIG_XEN
9424+#include <xen/interface/kexec.h>
9425+#endif
9426+
9427+/* Forward Declaration. */
9428+void __init find_max_pfn(void);
9429+
9430+static int xen_panic_event(struct notifier_block *, unsigned long, void *);
9431+static struct notifier_block xen_panic_block = {
9432+ xen_panic_event, NULL, 0 /* try to go last */
9433+};
9434+
9435+extern char hypercall_page[PAGE_SIZE];
9436+EXPORT_SYMBOL(hypercall_page);
9437+
9438+int disable_pse __devinitdata = 0;
9439+
9440+/*
9441+ * Machine setup..
9442+ */
9443+
9444+#ifdef CONFIG_EFI
9445+int efi_enabled = 0;
9446+EXPORT_SYMBOL(efi_enabled);
9447+#endif
9448+
9449+/* cpu data as detected by the assembly code in head.S */
9450+struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
9451+/* common cpu data for all cpus */
9452+struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
9453+EXPORT_SYMBOL(boot_cpu_data);
9454+
9455+unsigned long mmu_cr4_features;
9456+
9457+#ifdef CONFIG_ACPI
9458+ int acpi_disabled = 0;
9459+#else
9460+ int acpi_disabled = 1;
9461+#endif
9462+EXPORT_SYMBOL(acpi_disabled);
9463+
9464+#ifdef CONFIG_ACPI
9465+int __initdata acpi_force = 0;
9466+extern acpi_interrupt_flags acpi_sci_flags;
9467+#endif
9468+
9469+/* for MCA, but anyone else can use it if they want */
9470+unsigned int machine_id;
9471+#ifdef CONFIG_MCA
9472+EXPORT_SYMBOL(machine_id);
9473+#endif
9474+unsigned int machine_submodel_id;
9475+unsigned int BIOS_revision;
9476+unsigned int mca_pentium_flag;
9477+
9478+/* For PCI or other memory-mapped resources */
9479+unsigned long pci_mem_start = 0x10000000;
9480+#ifdef CONFIG_PCI
9481+EXPORT_SYMBOL(pci_mem_start);
9482+#endif
9483+
9484+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
9485+int bootloader_type;
9486+
9487+/* user-defined highmem size */
9488+static unsigned int highmem_pages = -1;
9489+
9490+/*
9491+ * Setup options
9492+ */
9493+struct drive_info_struct { char dummy[32]; } drive_info;
9494+#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
9495+ defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
9496+EXPORT_SYMBOL(drive_info);
9497+#endif
9498+struct screen_info screen_info;
9499+EXPORT_SYMBOL(screen_info);
9500+struct apm_info apm_info;
9501+EXPORT_SYMBOL(apm_info);
9502+struct sys_desc_table_struct {
9503+ unsigned short length;
9504+ unsigned char table[0];
9505+};
9506+struct edid_info edid_info;
9507+EXPORT_SYMBOL_GPL(edid_info);
9508+#ifndef CONFIG_XEN
9509+#define copy_edid() (edid_info = EDID_INFO)
9510+#endif
9511+struct ist_info ist_info;
9512+#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
9513+ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
9514+EXPORT_SYMBOL(ist_info);
9515+#endif
9516+struct e820map e820;
9517+#ifdef CONFIG_XEN
9518+struct e820map machine_e820;
9519+#endif
9520+
9521+extern void early_cpu_init(void);
9522+extern void generic_apic_probe(char *);
9523+extern int root_mountflags;
9524+
9525+unsigned long saved_videomode;
9526+
9527+#define RAMDISK_IMAGE_START_MASK 0x07FF
9528+#define RAMDISK_PROMPT_FLAG 0x8000
9529+#define RAMDISK_LOAD_FLAG 0x4000
9530+
9531+static char command_line[COMMAND_LINE_SIZE];
9532+
9533+unsigned char __initdata boot_params[PARAM_SIZE];
9534+
9535+static struct resource data_resource = {
9536+ .name = "Kernel data",
9537+ .start = 0,
9538+ .end = 0,
9539+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
9540+};
9541+
9542+static struct resource code_resource = {
9543+ .name = "Kernel code",
9544+ .start = 0,
9545+ .end = 0,
9546+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
9547+};
9548+
9549+static struct resource system_rom_resource = {
9550+ .name = "System ROM",
9551+ .start = 0xf0000,
9552+ .end = 0xfffff,
9553+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
9554+};
9555+
9556+static struct resource extension_rom_resource = {
9557+ .name = "Extension ROM",
9558+ .start = 0xe0000,
9559+ .end = 0xeffff,
9560+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
9561+};
9562+
9563+static struct resource adapter_rom_resources[] = { {
9564+ .name = "Adapter ROM",
9565+ .start = 0xc8000,
9566+ .end = 0,
9567+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
9568+}, {
9569+ .name = "Adapter ROM",
9570+ .start = 0,
9571+ .end = 0,
9572+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
9573+}, {
9574+ .name = "Adapter ROM",
9575+ .start = 0,
9576+ .end = 0,
9577+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
9578+}, {
9579+ .name = "Adapter ROM",
9580+ .start = 0,
9581+ .end = 0,
9582+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
9583+}, {
9584+ .name = "Adapter ROM",
9585+ .start = 0,
9586+ .end = 0,
9587+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
9588+}, {
9589+ .name = "Adapter ROM",
9590+ .start = 0,
9591+ .end = 0,
9592+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
9593+} };
9594+
9595+#define ADAPTER_ROM_RESOURCES \
9596+ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
9597+
9598+static struct resource video_rom_resource = {
9599+ .name = "Video ROM",
9600+ .start = 0xc0000,
9601+ .end = 0xc7fff,
9602+ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
9603+};
9604+
9605+static struct resource video_ram_resource = {
9606+ .name = "Video RAM area",
9607+ .start = 0xa0000,
9608+ .end = 0xbffff,
9609+ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
9610+};
9611+
9612+static struct resource standard_io_resources[] = { {
9613+ .name = "dma1",
9614+ .start = 0x0000,
9615+ .end = 0x001f,
9616+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
9617+}, {
9618+ .name = "pic1",
9619+ .start = 0x0020,
9620+ .end = 0x0021,
9621+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
9622+}, {
9623+ .name = "timer0",
9624+ .start = 0x0040,
9625+ .end = 0x0043,
9626+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
9627+}, {
9628+ .name = "timer1",
9629+ .start = 0x0050,
9630+ .end = 0x0053,
9631+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
9632+}, {
9633+ .name = "keyboard",
9634+ .start = 0x0060,
9635+ .end = 0x006f,
9636+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
9637+}, {
9638+ .name = "dma page reg",
9639+ .start = 0x0080,
9640+ .end = 0x008f,
9641+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
9642+}, {
9643+ .name = "pic2",
9644+ .start = 0x00a0,
9645+ .end = 0x00a1,
9646+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
9647+}, {
9648+ .name = "dma2",
9649+ .start = 0x00c0,
9650+ .end = 0x00df,
9651+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
9652+}, {
9653+ .name = "fpu",
9654+ .start = 0x00f0,
9655+ .end = 0x00ff,
9656+ .flags = IORESOURCE_BUSY | IORESOURCE_IO
9657+} };
9658+
9659+#define STANDARD_IO_RESOURCES \
9660+ (sizeof standard_io_resources / sizeof standard_io_resources[0])
9661+
9662+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
9663+
9664+static int __init romchecksum(unsigned char *rom, unsigned long length)
9665+{
9666+ unsigned char *p, sum = 0;
9667+
9668+ for (p = rom; p < rom + length; p++)
9669+ sum += *p;
9670+ return sum == 0;
9671+}
9672+
9673+static void __init probe_roms(void)
9674+{
9675+ unsigned long start, length, upper;
9676+ unsigned char *rom;
9677+ int i;
9678+
9679+#ifdef CONFIG_XEN
9680+ /* Nothing to do if not running in dom0. */
9681+ if (!is_initial_xendomain())
9682+ return;
9683+#endif
9684+
9685+ /* video rom */
9686+ upper = adapter_rom_resources[0].start;
9687+ for (start = video_rom_resource.start; start < upper; start += 2048) {
9688+ rom = isa_bus_to_virt(start);
9689+ if (!romsignature(rom))
9690+ continue;
9691+
9692+ video_rom_resource.start = start;
9693+
9694+ /* 0 < length <= 0x7f * 512, historically */
9695+ length = rom[2] * 512;
9696+
9697+ /* if checksum okay, trust length byte */
9698+ if (length && romchecksum(rom, length))
9699+ video_rom_resource.end = start + length - 1;
9700+
9701+ request_resource(&iomem_resource, &video_rom_resource);
9702+ break;
9703+ }
9704+
9705+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
9706+ if (start < upper)
9707+ start = upper;
9708+
9709+ /* system rom */
9710+ request_resource(&iomem_resource, &system_rom_resource);
9711+ upper = system_rom_resource.start;
9712+
9713+ /* check for extension rom (ignore length byte!) */
9714+ rom = isa_bus_to_virt(extension_rom_resource.start);
9715+ if (romsignature(rom)) {
9716+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
9717+ if (romchecksum(rom, length)) {
9718+ request_resource(&iomem_resource, &extension_rom_resource);
9719+ upper = extension_rom_resource.start;
9720+ }
9721+ }
9722+
9723+ /* check for adapter roms on 2k boundaries */
9724+ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
9725+ rom = isa_bus_to_virt(start);
9726+ if (!romsignature(rom))
9727+ continue;
9728+
9729+ /* 0 < length <= 0x7f * 512, historically */
9730+ length = rom[2] * 512;
9731+
9732+ /* but accept any length that fits if checksum okay */
9733+ if (!length || start + length > upper || !romchecksum(rom, length))
9734+ continue;
9735+
9736+ adapter_rom_resources[i].start = start;
9737+ adapter_rom_resources[i].end = start + length - 1;
9738+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
9739+
9740+ start = adapter_rom_resources[i++].end & ~2047UL;
9741+ }
9742+}
9743+
9744+/*
9745+ * Point at the empty zero page to start with. We map the real shared_info
9746+ * page as soon as fixmap is up and running.
9747+ */
9748+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
9749+EXPORT_SYMBOL(HYPERVISOR_shared_info);
9750+
9751+unsigned long *phys_to_machine_mapping;
9752+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
9753+EXPORT_SYMBOL(phys_to_machine_mapping);
9754+
9755+/* Raw start-of-day parameters from the hypervisor. */
9756+start_info_t *xen_start_info;
9757+EXPORT_SYMBOL(xen_start_info);
9758+
9759+void __init add_memory_region(unsigned long long start,
9760+ unsigned long long size, int type)
9761+{
9762+ int x;
9763+
9764+ if (!efi_enabled) {
9765+ x = e820.nr_map;
9766+
9767+ if (x == E820MAX) {
9768+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
9769+ return;
9770+ }
9771+
9772+ e820.map[x].addr = start;
9773+ e820.map[x].size = size;
9774+ e820.map[x].type = type;
9775+ e820.nr_map++;
9776+ }
9777+} /* add_memory_region */
9778+
9779+static void __init limit_regions(unsigned long long size)
9780+{
9781+ unsigned long long current_addr = 0;
9782+ int i;
9783+
9784+ if (efi_enabled) {
9785+ efi_memory_desc_t *md;
9786+ void *p;
9787+
9788+ for (p = memmap.map, i = 0; p < memmap.map_end;
9789+ p += memmap.desc_size, i++) {
9790+ md = p;
9791+ current_addr = md->phys_addr + (md->num_pages << 12);
9792+ if (md->type == EFI_CONVENTIONAL_MEMORY) {
9793+ if (current_addr >= size) {
9794+ md->num_pages -=
9795+ (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
9796+ memmap.nr_map = i + 1;
9797+ return;
9798+ }
9799+ }
9800+ }
9801+ }
9802+ for (i = 0; i < e820.nr_map; i++) {
9803+ current_addr = e820.map[i].addr + e820.map[i].size;
9804+ if (current_addr < size)
9805+ continue;
9806+
9807+ if (e820.map[i].type != E820_RAM)
9808+ continue;
9809+
9810+ if (e820.map[i].addr >= size) {
9811+ /*
9812+ * This region starts past the end of the
9813+ * requested size, skip it completely.
9814+ */
9815+ e820.nr_map = i;
9816+ } else {
9817+ e820.nr_map = i + 1;
9818+ e820.map[i].size -= current_addr - size;
9819+ }
9820+ return;
9821+ }
9822+#ifdef CONFIG_XEN
9823+ if (i==e820.nr_map && current_addr < size) {
9824+ /*
9825+ * The e820 map finished before our requested size so
9826+ * extend the final entry to the requested address.
9827+ */
9828+ --i;
9829+ if (e820.map[i].type == E820_RAM)
9830+ e820.map[i].size -= current_addr - size;
9831+ else
9832+ add_memory_region(current_addr, size - current_addr, E820_RAM);
9833+ }
9834+#endif
9835+}
9836+
9837+#define E820_DEBUG 1
9838+
9839+static void __init print_memory_map(char *who)
9840+{
9841+ int i;
9842+
9843+ for (i = 0; i < e820.nr_map; i++) {
9844+ printk(" %s: %016Lx - %016Lx ", who,
9845+ e820.map[i].addr,
9846+ e820.map[i].addr + e820.map[i].size);
9847+ switch (e820.map[i].type) {
9848+ case E820_RAM: printk("(usable)\n");
9849+ break;
9850+ case E820_RESERVED:
9851+ printk("(reserved)\n");
9852+ break;
9853+ case E820_ACPI:
9854+ printk("(ACPI data)\n");
9855+ break;
9856+ case E820_NVS:
9857+ printk("(ACPI NVS)\n");
9858+ break;
9859+ default: printk("type %lu\n", e820.map[i].type);
9860+ break;
9861+ }
9862+ }
9863+}
9864+
9865+/*
9866+ * Sanitize the BIOS e820 map.
9867+ *
9868+ * Some e820 responses include overlapping entries. The following
9869+ * replaces the original e820 map with a new one, removing overlaps.
9870+ *
9871+ */
9872+struct change_member {
9873+ struct e820entry *pbios; /* pointer to original bios entry */
9874+ unsigned long long addr; /* address for this change point */
9875+};
9876+static struct change_member change_point_list[2*E820MAX] __initdata;
9877+static struct change_member *change_point[2*E820MAX] __initdata;
9878+static struct e820entry *overlap_list[E820MAX] __initdata;
9879+static struct e820entry new_bios[E820MAX] __initdata;
9880+
9881+int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
9882+{
9883+ struct change_member *change_tmp;
9884+ unsigned long current_type, last_type;
9885+ unsigned long long last_addr;
9886+ int chgidx, still_changing;
9887+ int overlap_entries;
9888+ int new_bios_entry;
9889+ int old_nr, new_nr, chg_nr;
9890+ int i;
9891+
9892+ /*
9893+ Visually we're performing the following (1,2,3,4 = memory types)...
9894+
9895+ Sample memory map (w/overlaps):
9896+ ____22__________________
9897+ ______________________4_
9898+ ____1111________________
9899+ _44_____________________
9900+ 11111111________________
9901+ ____________________33__
9902+ ___________44___________
9903+ __________33333_________
9904+ ______________22________
9905+ ___________________2222_
9906+ _________111111111______
9907+ _____________________11_
9908+ _________________4______
9909+
9910+ Sanitized equivalent (no overlap):
9911+ 1_______________________
9912+ _44_____________________
9913+ ___1____________________
9914+ ____22__________________
9915+ ______11________________
9916+ _________1______________
9917+ __________3_____________
9918+ ___________44___________
9919+ _____________33_________
9920+ _______________2________
9921+ ________________1_______
9922+ _________________4______
9923+ ___________________2____
9924+ ____________________33__
9925+ ______________________4_
9926+ */
9927+
9928+ /* if there's only one memory region, don't bother */
9929+ if (*pnr_map < 2)
9930+ return -1;
9931+
9932+ old_nr = *pnr_map;
9933+
9934+ /* bail out if we find any unreasonable addresses in bios map */
9935+ for (i=0; i<old_nr; i++)
9936+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
9937+ return -1;
9938+
9939+ /* create pointers for initial change-point information (for sorting) */
9940+ for (i=0; i < 2*old_nr; i++)
9941+ change_point[i] = &change_point_list[i];
9942+
9943+ /* record all known change-points (starting and ending addresses),
9944+ omitting those that are for empty memory regions */
9945+ chgidx = 0;
9946+ for (i=0; i < old_nr; i++) {
9947+ if (biosmap[i].size != 0) {
9948+ change_point[chgidx]->addr = biosmap[i].addr;
9949+ change_point[chgidx++]->pbios = &biosmap[i];
9950+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
9951+ change_point[chgidx++]->pbios = &biosmap[i];
9952+ }
9953+ }
9954+ chg_nr = chgidx; /* true number of change-points */
9955+
9956+ /* sort change-point list by memory addresses (low -> high) */
9957+ still_changing = 1;
9958+ while (still_changing) {
9959+ still_changing = 0;
9960+ for (i=1; i < chg_nr; i++) {
9961+ /* if <current_addr> > <last_addr>, swap */
9962+ /* or, if current=<start_addr> & last=<end_addr>, swap */
9963+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
9964+ ((change_point[i]->addr == change_point[i-1]->addr) &&
9965+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
9966+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
9967+ )
9968+ {
9969+ change_tmp = change_point[i];
9970+ change_point[i] = change_point[i-1];
9971+ change_point[i-1] = change_tmp;
9972+ still_changing=1;
9973+ }
9974+ }
9975+ }
9976+
9977+ /* create a new bios memory map, removing overlaps */
9978+ overlap_entries=0; /* number of entries in the overlap table */
9979+ new_bios_entry=0; /* index for creating new bios map entries */
9980+ last_type = 0; /* start with undefined memory type */
9981+ last_addr = 0; /* start with 0 as last starting address */
9982+ /* loop through change-points, determining affect on the new bios map */
9983+ for (chgidx=0; chgidx < chg_nr; chgidx++)
9984+ {
9985+ /* keep track of all overlapping bios entries */
9986+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
9987+ {
9988+ /* add map entry to overlap list (> 1 entry implies an overlap) */
9989+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
9990+ }
9991+ else
9992+ {
9993+ /* remove entry from list (order independent, so swap with last) */
9994+ for (i=0; i<overlap_entries; i++)
9995+ {
9996+ if (overlap_list[i] == change_point[chgidx]->pbios)
9997+ overlap_list[i] = overlap_list[overlap_entries-1];
9998+ }
9999+ overlap_entries--;
10000+ }
10001+ /* if there are overlapping entries, decide which "type" to use */
10002+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
10003+ current_type = 0;
10004+ for (i=0; i<overlap_entries; i++)
10005+ if (overlap_list[i]->type > current_type)
10006+ current_type = overlap_list[i]->type;
10007+ /* continue building up new bios map based on this information */
10008+ if (current_type != last_type) {
10009+ if (last_type != 0) {
10010+ new_bios[new_bios_entry].size =
10011+ change_point[chgidx]->addr - last_addr;
10012+ /* move forward only if the new size was non-zero */
10013+ if (new_bios[new_bios_entry].size != 0)
10014+ if (++new_bios_entry >= E820MAX)
10015+ break; /* no more space left for new bios entries */
10016+ }
10017+ if (current_type != 0) {
10018+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
10019+ new_bios[new_bios_entry].type = current_type;
10020+ last_addr=change_point[chgidx]->addr;
10021+ }
10022+ last_type = current_type;
10023+ }
10024+ }
10025+ new_nr = new_bios_entry; /* retain count for new bios entries */
10026+
10027+ /* copy new bios mapping into original location */
10028+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
10029+ *pnr_map = new_nr;
10030+
10031+ return 0;
10032+}
10033+
10034+/*
10035+ * Copy the BIOS e820 map into a safe place.
10036+ *
10037+ * Sanity-check it while we're at it..
10038+ *
10039+ * If we're lucky and live on a modern system, the setup code
10040+ * will have given us a memory map that we can use to properly
10041+ * set up memory. If we aren't, we'll fake a memory map.
10042+ *
10043+ * We check to see that the memory map contains at least 2 elements
10044+ * before we'll use it, because the detection code in setup.S may
10045+ * not be perfect and most every PC known to man has two memory
10046+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
10047+ * thinkpad 560x, for example, does not cooperate with the memory
10048+ * detection code.)
10049+ */
10050+int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
10051+{
10052+#ifndef CONFIG_XEN
10053+ /* Only one memory region (or negative)? Ignore it */
10054+ if (nr_map < 2)
10055+ return -1;
10056+#else
10057+ BUG_ON(nr_map < 1);
10058+#endif
10059+
10060+ do {
10061+ unsigned long long start = biosmap->addr;
10062+ unsigned long long size = biosmap->size;
10063+ unsigned long long end = start + size;
10064+ unsigned long type = biosmap->type;
10065+
10066+ /* Overflow in 64 bits? Ignore the memory map. */
10067+ if (start > end)
10068+ return -1;
10069+
10070+#ifndef CONFIG_XEN
10071+ /*
10072+ * Some BIOSes claim RAM in the 640k - 1M region.
10073+ * Not right. Fix it up.
10074+ */
10075+ if (type == E820_RAM) {
10076+ if (start < 0x100000ULL && end > 0xA0000ULL) {
10077+ if (start < 0xA0000ULL)
10078+ add_memory_region(start, 0xA0000ULL-start, type);
10079+ if (end <= 0x100000ULL)
10080+ continue;
10081+ start = 0x100000ULL;
10082+ size = end - start;
10083+ }
10084+ }
10085+#endif
10086+ add_memory_region(start, size, type);
10087+ } while (biosmap++,--nr_map);
10088+
10089+#ifdef CONFIG_XEN
10090+ if (is_initial_xendomain()) {
10091+ struct xen_memory_map memmap;
10092+
10093+ memmap.nr_entries = E820MAX;
10094+ set_xen_guest_handle(memmap.buffer, machine_e820.map);
10095+
10096+ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
10097+ BUG();
10098+ machine_e820.nr_map = memmap.nr_entries;
10099+ } else
10100+ machine_e820 = e820;
10101+#endif
10102+
10103+ return 0;
10104+}
10105+
10106+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
10107+struct edd edd;
10108+#ifdef CONFIG_EDD_MODULE
10109+EXPORT_SYMBOL(edd);
10110+#endif
10111+#ifndef CONFIG_XEN
10112+/**
10113+ * copy_edd() - Copy the BIOS EDD information
10114+ * from boot_params into a safe place.
10115+ *
10116+ */
10117+static inline void copy_edd(void)
10118+{
10119+ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
10120+ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
10121+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
10122+ edd.edd_info_nr = EDD_NR;
10123+}
10124+#endif
10125+#else
10126+static inline void copy_edd(void)
10127+{
10128+}
10129+#endif
10130+
10131+static void __init parse_cmdline_early (char ** cmdline_p)
10132+{
10133+ char c = ' ', *to = command_line, *from = saved_command_line;
10134+ int len = 0, max_cmdline;
10135+ int userdef = 0;
10136+
10137+ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
10138+ max_cmdline = COMMAND_LINE_SIZE;
10139+ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
10140+ /* Save unparsed command line copy for /proc/cmdline */
10141+ saved_command_line[max_cmdline-1] = '\0';
10142+
10143+ for (;;) {
10144+ if (c != ' ')
10145+ goto next_char;
10146+ /*
10147+ * "mem=nopentium" disables the 4MB page tables.
10148+ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
10149+ * to <mem>, overriding the bios size.
10150+ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
10151+ * <start> to <start>+<mem>, overriding the bios size.
10152+ *
10153+ * HPA tells me bootloaders need to parse mem=, so no new
10154+ * option should be mem= [also see Documentation/i386/boot.txt]
10155+ */
10156+ if (!memcmp(from, "mem=", 4)) {
10157+ if (to != command_line)
10158+ to--;
10159+ if (!memcmp(from+4, "nopentium", 9)) {
10160+ from += 9+4;
10161+ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
10162+ disable_pse = 1;
10163+ } else {
10164+ /* If the user specifies memory size, we
10165+ * limit the BIOS-provided memory map to
10166+ * that size. exactmap can be used to specify
10167+ * the exact map. mem=number can be used to
10168+ * trim the existing memory map.
10169+ */
10170+ unsigned long long mem_size;
10171+
10172+ mem_size = memparse(from+4, &from);
10173+ limit_regions(mem_size);
10174+ userdef=1;
10175+ }
10176+ }
10177+
10178+ else if (!memcmp(from, "memmap=", 7)) {
10179+ if (to != command_line)
10180+ to--;
10181+ if (!memcmp(from+7, "exactmap", 8)) {
10182+#ifdef CONFIG_CRASH_DUMP
10183+ /* If we are doing a crash dump, we
10184+ * still need to know the real mem
10185+ * size before original memory map is
10186+ * reset.
10187+ */
10188+ find_max_pfn();
10189+ saved_max_pfn = max_pfn;
10190+#endif
10191+ from += 8+7;
10192+ e820.nr_map = 0;
10193+ userdef = 1;
10194+ } else {
10195+ /* If the user specifies memory size, we
10196+ * limit the BIOS-provided memory map to
10197+ * that size. exactmap can be used to specify
10198+ * the exact map. mem=number can be used to
10199+ * trim the existing memory map.
10200+ */
10201+ unsigned long long start_at, mem_size;
10202+
10203+ mem_size = memparse(from+7, &from);
10204+ if (*from == '@') {
10205+ start_at = memparse(from+1, &from);
10206+ add_memory_region(start_at, mem_size, E820_RAM);
10207+ } else if (*from == '#') {
10208+ start_at = memparse(from+1, &from);
10209+ add_memory_region(start_at, mem_size, E820_ACPI);
10210+ } else if (*from == '$') {
10211+ start_at = memparse(from+1, &from);
10212+ add_memory_region(start_at, mem_size, E820_RESERVED);
10213+ } else {
10214+ limit_regions(mem_size);
10215+ userdef=1;
10216+ }
10217+ }
10218+ }
10219+
10220+ else if (!memcmp(from, "noexec=", 7))
10221+ noexec_setup(from + 7);
10222+
10223+
10224+#ifdef CONFIG_X86_MPPARSE
10225+ /*
10226+ * If the BIOS enumerates physical processors before logical,
10227+ * maxcpus=N at enumeration-time can be used to disable HT.
10228+ */
10229+ else if (!memcmp(from, "maxcpus=", 8)) {
10230+ extern unsigned int maxcpus;
10231+
10232+ maxcpus = simple_strtoul(from + 8, NULL, 0);
10233+ }
10234+#endif
10235+
10236+#ifdef CONFIG_ACPI
10237+ /* "acpi=off" disables both ACPI table parsing and interpreter */
10238+ else if (!memcmp(from, "acpi=off", 8)) {
10239+ disable_acpi();
10240+ }
10241+
10242+ /* acpi=force to over-ride black-list */
10243+ else if (!memcmp(from, "acpi=force", 10)) {
10244+ acpi_force = 1;
10245+ acpi_ht = 1;
10246+ acpi_disabled = 0;
10247+ }
10248+
10249+ /* acpi=strict disables out-of-spec workarounds */
10250+ else if (!memcmp(from, "acpi=strict", 11)) {
10251+ acpi_strict = 1;
10252+ }
10253+
10254+ /* Limit ACPI just to boot-time to enable HT */
10255+ else if (!memcmp(from, "acpi=ht", 7)) {
10256+ if (!acpi_force)
10257+ disable_acpi();
10258+ acpi_ht = 1;
10259+ }
10260+
10261+ /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
10262+ else if (!memcmp(from, "pci=noacpi", 10)) {
10263+ acpi_disable_pci();
10264+ }
10265+ /* "acpi=noirq" disables ACPI interrupt routing */
10266+ else if (!memcmp(from, "acpi=noirq", 10)) {
10267+ acpi_noirq_set();
10268+ }
10269+
10270+ else if (!memcmp(from, "acpi_sci=edge", 13))
10271+ acpi_sci_flags.trigger = 1;
10272+
10273+ else if (!memcmp(from, "acpi_sci=level", 14))
10274+ acpi_sci_flags.trigger = 3;
10275+
10276+ else if (!memcmp(from, "acpi_sci=high", 13))
10277+ acpi_sci_flags.polarity = 1;
10278+
10279+ else if (!memcmp(from, "acpi_sci=low", 12))
10280+ acpi_sci_flags.polarity = 3;
10281+
10282+#ifdef CONFIG_X86_IO_APIC
10283+ else if (!memcmp(from, "acpi_skip_timer_override", 24))
10284+ acpi_skip_timer_override = 1;
10285+
10286+ if (!memcmp(from, "disable_timer_pin_1", 19))
10287+ disable_timer_pin_1 = 1;
10288+ if (!memcmp(from, "enable_timer_pin_1", 18))
10289+ disable_timer_pin_1 = -1;
10290+
10291+ /* disable IO-APIC */
10292+ else if (!memcmp(from, "noapic", 6))
10293+ disable_ioapic_setup();
10294+#endif /* CONFIG_X86_IO_APIC */
10295+#endif /* CONFIG_ACPI */
10296+
10297+#ifdef CONFIG_X86_LOCAL_APIC
10298+ /* enable local APIC */
10299+ else if (!memcmp(from, "lapic", 5))
10300+ lapic_enable();
10301+
10302+ /* disable local APIC */
10303+ else if (!memcmp(from, "nolapic", 6))
10304+ lapic_disable();
10305+#endif /* CONFIG_X86_LOCAL_APIC */
10306+
10307+#ifdef CONFIG_KEXEC
10308+ /* crashkernel=size@addr specifies the location to reserve for
10309+ * a crash kernel. By reserving this memory we guarantee
10310+ * that linux never set's it up as a DMA target.
10311+ * Useful for holding code to do something appropriate
10312+ * after a kernel panic.
10313+ */
10314+ else if (!memcmp(from, "crashkernel=", 12)) {
10315+#ifndef CONFIG_XEN
10316+ unsigned long size, base;
10317+ size = memparse(from+12, &from);
10318+ if (*from == '@') {
10319+ base = memparse(from+1, &from);
10320+ /* FIXME: Do I want a sanity check
10321+ * to validate the memory range?
10322+ */
10323+ crashk_res.start = base;
10324+ crashk_res.end = base + size - 1;
10325+ }
10326+#else
10327+ printk("Ignoring crashkernel command line, "
10328+ "parameter will be supplied by xen\n");
10329+#endif
10330+ }
10331+#endif
10332+#ifdef CONFIG_PROC_VMCORE
10333+ /* elfcorehdr= specifies the location of elf core header
10334+ * stored by the crashed kernel.
10335+ */
10336+ else if (!memcmp(from, "elfcorehdr=", 11))
10337+ elfcorehdr_addr = memparse(from+11, &from);
10338+#endif
10339+
10340+ /*
10341+ * highmem=size forces highmem to be exactly 'size' bytes.
10342+ * This works even on boxes that have no highmem otherwise.
10343+ * This also works to reduce highmem size on bigger boxes.
10344+ */
10345+ else if (!memcmp(from, "highmem=", 8))
10346+ highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
10347+
10348+ /*
10349+ * vmalloc=size forces the vmalloc area to be exactly 'size'
10350+ * bytes. This can be used to increase (or decrease) the
10351+ * vmalloc area - the default is 128m.
10352+ */
10353+ else if (!memcmp(from, "vmalloc=", 8))
10354+ __VMALLOC_RESERVE = memparse(from+8, &from);
10355+
10356+ next_char:
10357+ c = *(from++);
10358+ if (!c)
10359+ break;
10360+ if (COMMAND_LINE_SIZE <= ++len)
10361+ break;
10362+ *(to++) = c;
10363+ }
10364+ *to = '\0';
10365+ *cmdline_p = command_line;
10366+ if (userdef) {
10367+ printk(KERN_INFO "user-defined physical RAM map:\n");
10368+ print_memory_map("user");
10369+ }
10370+}
10371+
10372+/*
10373+ * Callback for efi_memory_walk.
10374+ */
10375+static int __init
10376+efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
10377+{
10378+ unsigned long *max_pfn = arg, pfn;
10379+
10380+ if (start < end) {
10381+ pfn = PFN_UP(end -1);
10382+ if (pfn > *max_pfn)
10383+ *max_pfn = pfn;
10384+ }
10385+ return 0;
10386+}
10387+
10388+static int __init
10389+efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
10390+{
10391+ memory_present(0, start, end);
10392+ return 0;
10393+}
10394+
10395+/*
10396+ * This function checks if any part of the range <start,end> is mapped
10397+ * with type.
10398+ */
10399+int
10400+e820_any_mapped(u64 start, u64 end, unsigned type)
10401+{
10402+ int i;
10403+
10404+#ifndef CONFIG_XEN
10405+ for (i = 0; i < e820.nr_map; i++) {
10406+ const struct e820entry *ei = &e820.map[i];
10407+#else
10408+ if (!is_initial_xendomain())
10409+ return 0;
10410+ for (i = 0; i < machine_e820.nr_map; ++i) {
10411+ const struct e820entry *ei = &machine_e820.map[i];
10412+#endif
10413+
10414+ if (type && ei->type != type)
10415+ continue;
10416+ if (ei->addr >= end || ei->addr + ei->size <= start)
10417+ continue;
10418+ return 1;
10419+ }
10420+ return 0;
10421+}
10422+EXPORT_SYMBOL_GPL(e820_any_mapped);
10423+
10424+ /*
10425+ * This function checks if the entire range <start,end> is mapped with type.
10426+ *
10427+ * Note: this function only works correct if the e820 table is sorted and
10428+ * not-overlapping, which is the case
10429+ */
10430+int __init
10431+e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
10432+{
10433+ u64 start = s;
10434+ u64 end = e;
10435+ int i;
10436+
10437+#ifndef CONFIG_XEN
10438+ for (i = 0; i < e820.nr_map; i++) {
10439+ struct e820entry *ei = &e820.map[i];
10440+#else
10441+ if (!is_initial_xendomain())
10442+ return 0;
10443+ for (i = 0; i < machine_e820.nr_map; ++i) {
10444+ const struct e820entry *ei = &machine_e820.map[i];
10445+#endif
10446+ if (type && ei->type != type)
10447+ continue;
10448+ /* is the region (part) in overlap with the current region ?*/
10449+ if (ei->addr >= end || ei->addr + ei->size <= start)
10450+ continue;
10451+ /* if the region is at the beginning of <start,end> we move
10452+ * start to the end of the region since it's ok until there
10453+ */
10454+ if (ei->addr <= start)
10455+ start = ei->addr + ei->size;
10456+ /* if start is now at or beyond end, we're done, full
10457+ * coverage */
10458+ if (start >= end)
10459+ return 1; /* we're done */
10460+ }
10461+ return 0;
10462+}
10463+
10464+/*
10465+ * Find the highest page frame number we have available
10466+ */
10467+void __init find_max_pfn(void)
10468+{
10469+ int i;
10470+
10471+ max_pfn = 0;
10472+ if (efi_enabled) {
10473+ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
10474+ efi_memmap_walk(efi_memory_present_wrapper, NULL);
10475+ return;
10476+ }
10477+
10478+ for (i = 0; i < e820.nr_map; i++) {
10479+ unsigned long start, end;
10480+ /* RAM? */
10481+ if (e820.map[i].type != E820_RAM)
10482+ continue;
10483+ start = PFN_UP(e820.map[i].addr);
10484+ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
10485+ if (start >= end)
10486+ continue;
10487+ if (end > max_pfn)
10488+ max_pfn = end;
10489+ memory_present(0, start, end);
10490+ }
10491+}
10492+
10493+/*
10494+ * Determine low and high memory ranges:
10495+ */
10496+unsigned long __init find_max_low_pfn(void)
10497+{
10498+ unsigned long max_low_pfn;
10499+
10500+ max_low_pfn = max_pfn;
10501+ if (max_low_pfn > MAXMEM_PFN) {
10502+ if (highmem_pages == -1)
10503+ highmem_pages = max_pfn - MAXMEM_PFN;
10504+ if (highmem_pages + MAXMEM_PFN < max_pfn)
10505+ max_pfn = MAXMEM_PFN + highmem_pages;
10506+ if (highmem_pages + MAXMEM_PFN > max_pfn) {
10507+ printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
10508+ highmem_pages = 0;
10509+ }
10510+ max_low_pfn = MAXMEM_PFN;
10511+#ifndef CONFIG_HIGHMEM
10512+ /* Maximum memory usable is what is directly addressable */
10513+ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
10514+ MAXMEM>>20);
10515+ if (max_pfn > MAX_NONPAE_PFN)
10516+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
10517+ else
10518+ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
10519+ max_pfn = MAXMEM_PFN;
10520+#else /* !CONFIG_HIGHMEM */
10521+#ifndef CONFIG_X86_PAE
10522+ if (max_pfn > MAX_NONPAE_PFN) {
10523+ max_pfn = MAX_NONPAE_PFN;
10524+ printk(KERN_WARNING "Warning only 4GB will be used.\n");
10525+ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
10526+ }
10527+#endif /* !CONFIG_X86_PAE */
10528+#endif /* !CONFIG_HIGHMEM */
10529+ } else {
10530+ if (highmem_pages == -1)
10531+ highmem_pages = 0;
10532+#ifdef CONFIG_HIGHMEM
10533+ if (highmem_pages >= max_pfn) {
10534+ printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
10535+ highmem_pages = 0;
10536+ }
10537+ if (highmem_pages) {
10538+ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
10539+ printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
10540+ highmem_pages = 0;
10541+ }
10542+ max_low_pfn -= highmem_pages;
10543+ }
10544+#else
10545+ if (highmem_pages)
10546+ printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
10547+#endif
10548+ }
10549+ return max_low_pfn;
10550+}
10551+
10552+/*
10553+ * Free all available memory for boot time allocation. Used
10554+ * as a callback function by efi_memory_walk()
10555+ */
10556+
10557+static int __init
10558+free_available_memory(unsigned long start, unsigned long end, void *arg)
10559+{
10560+ /* check max_low_pfn */
10561+ if (start >= (max_low_pfn << PAGE_SHIFT))
10562+ return 0;
10563+ if (end >= (max_low_pfn << PAGE_SHIFT))
10564+ end = max_low_pfn << PAGE_SHIFT;
10565+ if (start < end)
10566+ free_bootmem(start, end - start);
10567+
10568+ return 0;
10569+}
10570+/*
10571+ * Register fully available low RAM pages with the bootmem allocator.
10572+ */
10573+static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
10574+{
10575+ int i;
10576+
10577+ if (efi_enabled) {
10578+ efi_memmap_walk(free_available_memory, NULL);
10579+ return;
10580+ }
10581+ for (i = 0; i < e820.nr_map; i++) {
10582+ unsigned long curr_pfn, last_pfn, size;
10583+ /*
10584+ * Reserve usable low memory
10585+ */
10586+ if (e820.map[i].type != E820_RAM)
10587+ continue;
10588+ /*
10589+ * We are rounding up the start address of usable memory:
10590+ */
10591+ curr_pfn = PFN_UP(e820.map[i].addr);
10592+ if (curr_pfn >= max_low_pfn)
10593+ continue;
10594+ /*
10595+ * ... and at the end of the usable range downwards:
10596+ */
10597+ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
10598+
10599+#ifdef CONFIG_XEN
10600+ /*
10601+ * Truncate to the number of actual pages currently
10602+ * present.
10603+ */
10604+ if (last_pfn > xen_start_info->nr_pages)
10605+ last_pfn = xen_start_info->nr_pages;
10606+#endif
10607+
10608+ if (last_pfn > max_low_pfn)
10609+ last_pfn = max_low_pfn;
10610+
10611+ /*
10612+ * .. finally, did all the rounding and playing
10613+ * around just make the area go away?
10614+ */
10615+ if (last_pfn <= curr_pfn)
10616+ continue;
10617+
10618+ size = last_pfn - curr_pfn;
10619+ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
10620+ }
10621+}
10622+
10623+#ifndef CONFIG_XEN
10624+/*
10625+ * workaround for Dell systems that neglect to reserve EBDA
10626+ */
10627+static void __init reserve_ebda_region(void)
10628+{
10629+ unsigned int addr;
10630+ addr = get_bios_ebda();
10631+ if (addr)
10632+ reserve_bootmem(addr, PAGE_SIZE);
10633+}
10634+#endif
10635+
10636+#ifndef CONFIG_NEED_MULTIPLE_NODES
10637+void __init setup_bootmem_allocator(void);
10638+static unsigned long __init setup_memory(void)
10639+{
10640+ /*
10641+ * partially used pages are not usable - thus
10642+ * we are rounding upwards:
10643+ */
10644+ min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
10645+ xen_start_info->nr_pt_frames;
10646+
10647+ find_max_pfn();
10648+
10649+ max_low_pfn = find_max_low_pfn();
10650+
10651+#ifdef CONFIG_HIGHMEM
10652+ highstart_pfn = highend_pfn = max_pfn;
10653+ if (max_pfn > max_low_pfn) {
10654+ highstart_pfn = max_low_pfn;
10655+ }
10656+ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
10657+ pages_to_mb(highend_pfn - highstart_pfn));
10658+#endif
10659+ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
10660+ pages_to_mb(max_low_pfn));
10661+
10662+ setup_bootmem_allocator();
10663+
10664+ return max_low_pfn;
10665+}
10666+
10667+void __init zone_sizes_init(void)
10668+{
10669+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
10670+ unsigned int max_dma, low;
10671+
10672+ max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
10673+ low = max_low_pfn;
10674+
10675+ if (low < max_dma)
10676+ zones_size[ZONE_DMA] = low;
10677+ else {
10678+ zones_size[ZONE_DMA] = max_dma;
10679+ zones_size[ZONE_NORMAL] = low - max_dma;
10680+#ifdef CONFIG_HIGHMEM
10681+ zones_size[ZONE_HIGHMEM] = highend_pfn - low;
10682+#endif
10683+ }
10684+ free_area_init(zones_size);
10685+}
10686+#else
10687+extern unsigned long __init setup_memory(void);
10688+extern void zone_sizes_init(void);
10689+#endif /* !CONFIG_NEED_MULTIPLE_NODES */
10690+
10691+void __init setup_bootmem_allocator(void)
10692+{
10693+ unsigned long bootmap_size;
10694+ /*
10695+ * Initialize the boot-time allocator (with low memory only):
10696+ */
10697+ bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
10698+
10699+ register_bootmem_low_pages(max_low_pfn);
10700+
10701+ /*
10702+ * Reserve the bootmem bitmap itself as well. We do this in two
10703+ * steps (first step was init_bootmem()) because this catches
10704+ * the (very unlikely) case of us accidentally initializing the
10705+ * bootmem allocator with an invalid RAM area.
10706+ */
10707+ reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
10708+ bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
10709+
10710+#ifndef CONFIG_XEN
10711+ /*
10712+ * reserve physical page 0 - it's a special BIOS page on many boxes,
10713+ * enabling clean reboots, SMP operation, laptop functions.
10714+ */
10715+ reserve_bootmem(0, PAGE_SIZE);
10716+
10717+ /* reserve EBDA region, it's a 4K region */
10718+ reserve_ebda_region();
10719+
10720+ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
10721+ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
10722+ unless you have no PS/2 mouse plugged in. */
10723+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
10724+ boot_cpu_data.x86 == 6)
10725+ reserve_bootmem(0xa0000 - 4096, 4096);
10726+
10727+#ifdef CONFIG_SMP
10728+ /*
10729+ * But first pinch a few for the stack/trampoline stuff
10730+ * FIXME: Don't need the extra page at 4K, but need to fix
10731+ * trampoline before removing it. (see the GDT stuff)
10732+ */
10733+ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
10734+#endif
10735+#ifdef CONFIG_ACPI_SLEEP
10736+ /*
10737+ * Reserve low memory region for sleep support.
10738+ */
10739+ acpi_reserve_bootmem();
10740+#endif
10741+#endif /* !CONFIG_XEN */
10742+
10743+#ifdef CONFIG_BLK_DEV_INITRD
10744+ if (xen_start_info->mod_start) {
10745+ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
10746+ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
10747+ initrd_start = INITRD_START + PAGE_OFFSET;
10748+ initrd_end = initrd_start+INITRD_SIZE;
10749+ initrd_below_start_ok = 1;
10750+ }
10751+ else {
10752+ printk(KERN_ERR "initrd extends beyond end of memory "
10753+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
10754+ INITRD_START + INITRD_SIZE,
10755+ max_low_pfn << PAGE_SHIFT);
10756+ initrd_start = 0;
10757+ }
10758+ }
10759+#endif
10760+#ifdef CONFIG_KEXEC
10761+#ifdef CONFIG_XEN
10762+ xen_machine_kexec_setup_resources();
10763+#else
10764+ if (crashk_res.start != crashk_res.end)
10765+ reserve_bootmem(crashk_res.start,
10766+ crashk_res.end - crashk_res.start + 1);
10767+#endif
10768+#endif
10769+}
10770+
10771+/*
10772+ * The node 0 pgdat is initialized before all of these because
10773+ * it's needed for bootmem. node>0 pgdats have their virtual
10774+ * space allocated before the pagetables are in place to access
10775+ * them, so they can't be cleared then.
10776+ *
10777+ * This should all compile down to nothing when NUMA is off.
10778+ */
10779+void __init remapped_pgdat_init(void)
10780+{
10781+ int nid;
10782+
10783+ for_each_online_node(nid) {
10784+ if (nid != 0)
10785+ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
10786+ }
10787+}
10788+
10789+/*
10790+ * Request address space for all standard RAM and ROM resources
10791+ * and also for regions reported as reserved by the e820.
10792+ */
10793+static void __init
10794+legacy_init_iomem_resources(struct e820entry *e820, int nr_map,
10795+ struct resource *code_resource,
10796+ struct resource *data_resource)
10797+{
10798+ int i;
10799+
10800+ probe_roms();
10801+
10802+ for (i = 0; i < nr_map; i++) {
10803+ struct resource *res;
10804+#ifndef CONFIG_RESOURCES_64BIT
10805+ if (e820[i].addr + e820[i].size > 0x100000000ULL)
10806+ continue;
10807+#endif
10808+ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
10809+ switch (e820[i].type) {
10810+ case E820_RAM: res->name = "System RAM"; break;
10811+ case E820_ACPI: res->name = "ACPI Tables"; break;
10812+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
10813+ default: res->name = "reserved";
10814+ }
10815+ res->start = e820[i].addr;
10816+ res->end = res->start + e820[i].size - 1;
10817+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
10818+ if (request_resource(&iomem_resource, res)) {
10819+ kfree(res);
10820+ continue;
10821+ }
10822+ if (e820[i].type == E820_RAM) {
10823+ /*
10824+ * We don't know which RAM region contains kernel data,
10825+ * so we try it repeatedly and let the resource manager
10826+ * test it.
10827+ */
10828+#ifndef CONFIG_XEN
10829+ request_resource(res, code_resource);
10830+ request_resource(res, data_resource);
10831+#endif
10832+#ifdef CONFIG_KEXEC
10833+ if (crashk_res.start != crashk_res.end)
10834+ request_resource(res, &crashk_res);
10835+#ifdef CONFIG_XEN
10836+ xen_machine_kexec_register_resources(res);
10837+#endif
10838+#endif
10839+ }
10840+ }
10841+}
10842+
10843+/*
10844+ * Locate a unused range of the physical address space below 4G which
10845+ * can be used for PCI mappings.
10846+ */
10847+static void __init
10848+e820_setup_gap(struct e820entry *e820, int nr_map)
10849+{
10850+ unsigned long gapstart, gapsize, round;
10851+ unsigned long long last;
10852+ int i;
10853+
10854+ /*
10855+ * Search for the bigest gap in the low 32 bits of the e820
10856+ * memory space.
10857+ */
10858+ last = 0x100000000ull;
10859+ gapstart = 0x10000000;
10860+ gapsize = 0x400000;
10861+ i = nr_map;
10862+ while (--i >= 0) {
10863+ unsigned long long start = e820[i].addr;
10864+ unsigned long long end = start + e820[i].size;
10865+
10866+ /*
10867+ * Since "last" is at most 4GB, we know we'll
10868+ * fit in 32 bits if this condition is true
10869+ */
10870+ if (last > end) {
10871+ unsigned long gap = last - end;
10872+
10873+ if (gap > gapsize) {
10874+ gapsize = gap;
10875+ gapstart = end;
10876+ }
10877+ }
10878+ if (start < last)
10879+ last = start;
10880+ }
10881+
10882+ /*
10883+ * See how much we want to round up: start off with
10884+ * rounding to the next 1MB area.
10885+ */
10886+ round = 0x100000;
10887+ while ((gapsize >> 4) > round)
10888+ round += round;
10889+ /* Fun with two's complement */
10890+ pci_mem_start = (gapstart + round) & -round;
10891+
10892+ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
10893+ pci_mem_start, gapstart, gapsize);
10894+}
10895+
10896+/*
10897+ * Request address space for all standard resources
10898+ *
10899+ * This is called just before pcibios_init(), which is also a
10900+ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
10901+ */
10902+static int __init request_standard_resources(void)
10903+{
10904+ int i;
10905+
10906+ /* Nothing to do if not running in dom0. */
10907+ if (!is_initial_xendomain())
10908+ return 0;
10909+
10910+ printk("Setting up standard PCI resources\n");
10911+#ifdef CONFIG_XEN
10912+ legacy_init_iomem_resources(machine_e820.map, machine_e820.nr_map,
10913+ &code_resource, &data_resource);
10914+#else
10915+ if (efi_enabled)
10916+ efi_initialize_iomem_resources(&code_resource, &data_resource);
10917+ else
10918+ legacy_init_iomem_resources(e820.map, e820.nr_map,
10919+ &code_resource, &data_resource);
10920+#endif
10921+
10922+ /* EFI systems may still have VGA */
10923+ request_resource(&iomem_resource, &video_ram_resource);
10924+
10925+ /* request I/O space for devices used on all i[345]86 PCs */
10926+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
10927+ request_resource(&ioport_resource, &standard_io_resources[i]);
10928+ return 0;
10929+}
10930+
10931+subsys_initcall(request_standard_resources);
10932+
10933+static void __init register_memory(void)
10934+{
10935+#ifdef CONFIG_XEN
10936+ if (is_initial_xendomain())
10937+ e820_setup_gap(machine_e820.map, machine_e820.nr_map);
10938+ else
10939+#endif
10940+ e820_setup_gap(e820.map, e820.nr_map);
10941+}
10942+
10943+#ifdef CONFIG_MCA
10944+static void set_mca_bus(int x)
10945+{
10946+ MCA_bus = x;
10947+}
10948+#else
10949+static void set_mca_bus(int x) { }
10950+#endif
10951+
10952+/*
10953+ * Determine if we were loaded by an EFI loader. If so, then we have also been
10954+ * passed the efi memmap, systab, etc., so we should use these data structures
10955+ * for initialization. Note, the efi init code path is determined by the
10956+ * global efi_enabled. This allows the same kernel image to be used on existing
10957+ * systems (with a traditional BIOS) as well as on EFI systems.
10958+ */
10959+void __init setup_arch(char **cmdline_p)
10960+{
10961+ int i, j, k, fpp;
10962+ struct physdev_set_iopl set_iopl;
10963+ unsigned long max_low_pfn;
10964+ unsigned long p2m_pages;
10965+
10966+ /* Force a quick death if the kernel panics (not domain 0). */
10967+ extern int panic_timeout;
10968+ if (!panic_timeout && !is_initial_xendomain())
10969+ panic_timeout = 1;
10970+
10971+ /* Register a call for panic conditions. */
10972+ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
10973+
10974+ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
10975+ VMASST_TYPE_4gb_segments));
10976+ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
10977+ VMASST_TYPE_writable_pagetables));
10978+
10979+ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
10980+ pre_setup_arch_hook();
10981+ early_cpu_init();
10982+#ifdef CONFIG_SMP
10983+ prefill_possible_map();
10984+#endif
10985+
10986+ /*
10987+ * FIXME: This isn't an official loader_type right
10988+ * now but does currently work with elilo.
10989+ * If we were configured as an EFI kernel, check to make
10990+ * sure that we were loaded correctly from elilo and that
10991+ * the system table is valid. If not, then initialize normally.
10992+ */
10993+#ifdef CONFIG_EFI
10994+ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
10995+ efi_enabled = 1;
10996+#endif
10997+
10998+ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
10999+ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
11000+ */
11001+ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
11002+ drive_info = DRIVE_INFO;
11003+ screen_info = SCREEN_INFO;
11004+ copy_edid();
11005+ apm_info.bios = APM_BIOS_INFO;
11006+ ist_info = IST_INFO;
11007+ saved_videomode = VIDEO_MODE;
11008+ if( SYS_DESC_TABLE.length != 0 ) {
11009+ set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
11010+ machine_id = SYS_DESC_TABLE.table[0];
11011+ machine_submodel_id = SYS_DESC_TABLE.table[1];
11012+ BIOS_revision = SYS_DESC_TABLE.table[2];
11013+ }
11014+ bootloader_type = LOADER_TYPE;
11015+
11016+ if (is_initial_xendomain()) {
11017+ const struct dom0_vga_console_info *info =
11018+ (void *)((char *)xen_start_info +
11019+ xen_start_info->console.dom0.info_off);
11020+
11021+ dom0_init_screen_info(info,
11022+ xen_start_info->console.dom0.info_size);
11023+ xen_start_info->console.domU.mfn = 0;
11024+ xen_start_info->console.domU.evtchn = 0;
11025+ } else
11026+ screen_info.orig_video_isVGA = 0;
11027+
11028+#ifdef CONFIG_BLK_DEV_RAM
11029+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
11030+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
11031+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
11032+#endif
11033+
11034+ ARCH_SETUP
11035+ if (efi_enabled)
11036+ efi_init();
11037+ else {
11038+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
11039+ print_memory_map(machine_specific_memory_setup());
11040+ }
11041+
11042+ copy_edd();
11043+
11044+ if (!MOUNT_ROOT_RDONLY)
11045+ root_mountflags &= ~MS_RDONLY;
11046+ init_mm.start_code = (unsigned long) _text;
11047+ init_mm.end_code = (unsigned long) _etext;
11048+ init_mm.end_data = (unsigned long) _edata;
11049+ init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
11050+ xen_start_info->nr_pt_frames) << PAGE_SHIFT;
11051+
11052+ code_resource.start = virt_to_phys(_text);
11053+ code_resource.end = virt_to_phys(_etext)-1;
11054+ data_resource.start = virt_to_phys(_etext);
11055+ data_resource.end = virt_to_phys(_edata)-1;
11056+
11057+ parse_cmdline_early(cmdline_p);
11058+
11059+#ifdef CONFIG_EARLY_PRINTK
11060+ {
11061+ char *s = strstr(*cmdline_p, "earlyprintk=");
11062+ if (s) {
11063+ setup_early_printk(strchr(s, '=') + 1);
11064+ printk("early console enabled\n");
11065+ }
11066+ }
11067+#endif
11068+
11069+ max_low_pfn = setup_memory();
11070+
11071+ /*
11072+ * NOTE: before this point _nobody_ is allowed to allocate
11073+ * any memory using the bootmem allocator. Although the
11074+ * alloctor is now initialised only the first 8Mb of the kernel
11075+ * virtual address space has been mapped. All allocations before
11076+ * paging_init() has completed must use the alloc_bootmem_low_pages()
11077+ * variant (which allocates DMA'able memory) and care must be taken
11078+ * not to exceed the 8Mb limit.
11079+ */
11080+
11081+#ifdef CONFIG_SMP
11082+ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
11083+#endif
11084+ paging_init();
11085+ remapped_pgdat_init();
11086+ sparse_init();
11087+ zone_sizes_init();
11088+
11089+#ifdef CONFIG_X86_FIND_SMP_CONFIG
11090+ /*
11091+ * Find and reserve possible boot-time SMP configuration:
11092+ */
11093+ find_smp_config();
11094+#endif
11095+
11096+ p2m_pages = max_pfn;
11097+ if (xen_start_info->nr_pages > max_pfn) {
11098+ /*
11099+ * the max_pfn was shrunk (probably by mem= or highmem=
11100+ * kernel parameter); shrink reservation with the HV
11101+ */
11102+ struct xen_memory_reservation reservation = {
11103+ .address_bits = 0,
11104+ .extent_order = 0,
11105+ .domid = DOMID_SELF
11106+ };
11107+ unsigned int difference;
11108+ int ret;
11109+
11110+ difference = xen_start_info->nr_pages - max_pfn;
11111+
11112+ set_xen_guest_handle(reservation.extent_start,
11113+ ((unsigned long *)xen_start_info->mfn_list) + max_pfn);
11114+ reservation.nr_extents = difference;
11115+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
11116+ &reservation);
11117+ BUG_ON (ret != difference);
11118+ }
11119+ else if (max_pfn > xen_start_info->nr_pages)
11120+ p2m_pages = xen_start_info->nr_pages;
11121+
11122+ /* Make sure we have a correctly sized P->M table. */
11123+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
11124+ phys_to_machine_mapping = alloc_bootmem_low_pages(
11125+ max_pfn * sizeof(unsigned long));
11126+ memset(phys_to_machine_mapping, ~0,
11127+ max_pfn * sizeof(unsigned long));
11128+ memcpy(phys_to_machine_mapping,
11129+ (unsigned long *)xen_start_info->mfn_list,
11130+ p2m_pages * sizeof(unsigned long));
11131+ free_bootmem(
11132+ __pa(xen_start_info->mfn_list),
11133+ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
11134+ sizeof(unsigned long))));
11135+
11136+ /*
11137+ * Initialise the list of the frames that specify the list of
11138+ * frames that make up the p2m table. Used by save/restore
11139+ */
11140+ pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
11141+
11142+ fpp = PAGE_SIZE/sizeof(unsigned long);
11143+ for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
11144+ if ((j % fpp) == 0) {
11145+ k++;
11146+ BUG_ON(k>=16);
11147+ pfn_to_mfn_frame_list[k] =
11148+ alloc_bootmem_low_pages(PAGE_SIZE);
11149+ pfn_to_mfn_frame_list_list[k] =
11150+ virt_to_mfn(pfn_to_mfn_frame_list[k]);
11151+ j=0;
11152+ }
11153+ pfn_to_mfn_frame_list[k][j] =
11154+ virt_to_mfn(&phys_to_machine_mapping[i]);
11155+ }
11156+ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
11157+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
11158+ virt_to_mfn(pfn_to_mfn_frame_list_list);
11159+ }
11160+
11161+ /* Mark all ISA DMA channels in-use - using them wouldn't work. */
11162+ for (i = 0; i < MAX_DMA_CHANNELS; ++i)
11163+ if (i != 4 && request_dma(i, "xen") != 0)
11164+ BUG();
11165+
11166+ /*
11167+ * NOTE: at this point the bootmem allocator is fully available.
11168+ */
11169+
11170+ if (is_initial_xendomain())
11171+ dmi_scan_machine();
11172+
11173+#ifdef CONFIG_X86_GENERICARCH
11174+ generic_apic_probe(*cmdline_p);
11175+#endif
11176+ if (efi_enabled)
11177+ efi_map_memmap();
11178+
11179+ set_iopl.iopl = 1;
11180+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
11181+
11182+#ifdef CONFIG_ACPI
11183+ if (!is_initial_xendomain()) {
11184+ printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
11185+ acpi_disabled = 1;
11186+ acpi_ht = 0;
11187+ }
11188+
11189+ /*
11190+ * Parse the ACPI tables for possible boot-time SMP configuration.
11191+ */
11192+ acpi_boot_table_init();
11193+#endif
11194+
11195+#ifdef CONFIG_X86_IO_APIC
11196+ check_acpi_pci(); /* Checks more than just ACPI actually */
11197+#endif
11198+
11199+#ifdef CONFIG_ACPI
11200+ acpi_boot_init();
11201+
11202+#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
11203+ if (def_to_bigsmp)
11204+ printk(KERN_WARNING "More than 8 CPUs detected and "
11205+ "CONFIG_X86_PC cannot handle it.\nUse "
11206+ "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
11207+#endif
11208+#endif
11209+#ifdef CONFIG_X86_LOCAL_APIC
11210+ if (smp_found_config)
11211+ get_smp_config();
11212+#endif
11213+
11214+ register_memory();
11215+
11216+ if (is_initial_xendomain()) {
11217+#ifdef CONFIG_VT
11218+#if defined(CONFIG_VGA_CONSOLE)
11219+ if (!efi_enabled ||
11220+ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
11221+ conswitchp = &vga_con;
11222+#elif defined(CONFIG_DUMMY_CONSOLE)
11223+ conswitchp = &dummy_con;
11224+#endif
11225+#endif
11226+ } else {
11227+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
11228+ conswitchp = &dummy_con;
11229+#endif
11230+ }
11231+ tsc_init();
11232+}
11233+
11234+static int
11235+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
11236+{
11237+ HYPERVISOR_shutdown(SHUTDOWN_crash);
11238+ /* we're never actually going to get here... */
11239+ return NOTIFY_DONE;
11240+}
11241+
11242+static __init int add_pcspkr(void)
11243+{
11244+ struct platform_device *pd;
11245+ int ret;
11246+
11247+ if (!is_initial_xendomain())
11248+ return 0;
11249+
11250+ pd = platform_device_alloc("pcspkr", -1);
11251+ if (!pd)
11252+ return -ENOMEM;
11253+
11254+ ret = platform_device_add(pd);
11255+ if (ret)
11256+ platform_device_put(pd);
11257+
11258+ return ret;
11259+}
11260+device_initcall(add_pcspkr);
11261+
11262+/*
11263+ * Local Variables:
11264+ * mode:c
11265+ * c-file-style:"k&r"
11266+ * c-basic-offset:8
11267+ * End:
11268+ */
11269Index: head-2008-11-25/arch/x86/kernel/smp_32-xen.c
11270===================================================================
11271--- /dev/null 1970-01-01 00:00:00.000000000 +0000
11272+++ head-2008-11-25/arch/x86/kernel/smp_32-xen.c 2007-12-10 08:47:31.000000000 +0100
11273@@ -0,0 +1,605 @@
11274+/*
11275+ * Intel SMP support routines.
11276+ *
11277+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
11278+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
11279+ *
11280+ * This code is released under the GNU General Public License version 2 or
11281+ * later.
11282+ */
11283+
11284+#include <linux/init.h>
11285+
11286+#include <linux/mm.h>
11287+#include <linux/delay.h>
11288+#include <linux/spinlock.h>
11289+#include <linux/smp_lock.h>
11290+#include <linux/kernel_stat.h>
11291+#include <linux/mc146818rtc.h>
11292+#include <linux/cache.h>
11293+#include <linux/interrupt.h>
11294+#include <linux/cpu.h>
11295+#include <linux/module.h>
11296+
11297+#include <asm/mtrr.h>
11298+#include <asm/tlbflush.h>
11299+#if 0
11300+#include <mach_apic.h>
11301+#endif
11302+#include <xen/evtchn.h>
11303+
11304+/*
11305+ * Some notes on x86 processor bugs affecting SMP operation:
11306+ *
11307+ * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
11308+ * The Linux implications for SMP are handled as follows:
11309+ *
11310+ * Pentium III / [Xeon]
11311+ * None of the E1AP-E3AP errata are visible to the user.
11312+ *
11313+ * E1AP. see PII A1AP
11314+ * E2AP. see PII A2AP
11315+ * E3AP. see PII A3AP
11316+ *
11317+ * Pentium II / [Xeon]
11318+ * None of the A1AP-A3AP errata are visible to the user.
11319+ *
11320+ * A1AP. see PPro 1AP
11321+ * A2AP. see PPro 2AP
11322+ * A3AP. see PPro 7AP
11323+ *
11324+ * Pentium Pro
11325+ * None of 1AP-9AP errata are visible to the normal user,
11326+ * except occasional delivery of 'spurious interrupt' as trap #15.
11327+ * This is very rare and a non-problem.
11328+ *
11329+ * 1AP. Linux maps APIC as non-cacheable
11330+ * 2AP. worked around in hardware
11331+ * 3AP. fixed in C0 and above steppings microcode update.
11332+ * Linux does not use excessive STARTUP_IPIs.
11333+ * 4AP. worked around in hardware
11334+ * 5AP. symmetric IO mode (normal Linux operation) not affected.
11335+ * 'noapic' mode has vector 0xf filled out properly.
11336+ * 6AP. 'noapic' mode might be affected - fixed in later steppings
11337+ * 7AP. We do not assume writes to the LVT deassering IRQs
11338+ * 8AP. We do not enable low power mode (deep sleep) during MP bootup
11339+ * 9AP. We do not use mixed mode
11340+ *
11341+ * Pentium
11342+ * There is a marginal case where REP MOVS on 100MHz SMP
11343+ * machines with B stepping processors can fail. XXX should provide
11344+ * an L1cache=Writethrough or L1cache=off option.
11345+ *
11346+ * B stepping CPUs may hang. There are hardware work arounds
11347+ * for this. We warn about it in case your board doesn't have the work
11348+ * arounds. Basically thats so I can tell anyone with a B stepping
11349+ * CPU and SMP problems "tough".
11350+ *
11351+ * Specific items [From Pentium Processor Specification Update]
11352+ *
11353+ * 1AP. Linux doesn't use remote read
11354+ * 2AP. Linux doesn't trust APIC errors
11355+ * 3AP. We work around this
11356+ * 4AP. Linux never generated 3 interrupts of the same priority
11357+ * to cause a lost local interrupt.
11358+ * 5AP. Remote read is never used
11359+ * 6AP. not affected - worked around in hardware
11360+ * 7AP. not affected - worked around in hardware
11361+ * 8AP. worked around in hardware - we get explicit CS errors if not
11362+ * 9AP. only 'noapic' mode affected. Might generate spurious
11363+ * interrupts, we log only the first one and count the
11364+ * rest silently.
11365+ * 10AP. not affected - worked around in hardware
11366+ * 11AP. Linux reads the APIC between writes to avoid this, as per
11367+ * the documentation. Make sure you preserve this as it affects
11368+ * the C stepping chips too.
11369+ * 12AP. not affected - worked around in hardware
11370+ * 13AP. not affected - worked around in hardware
11371+ * 14AP. we always deassert INIT during bootup
11372+ * 15AP. not affected - worked around in hardware
11373+ * 16AP. not affected - worked around in hardware
11374+ * 17AP. not affected - worked around in hardware
11375+ * 18AP. not affected - worked around in hardware
11376+ * 19AP. not affected - worked around in BIOS
11377+ *
11378+ * If this sounds worrying believe me these bugs are either ___RARE___,
11379+ * or are signal timing bugs worked around in hardware and there's
11380+ * about nothing of note with C stepping upwards.
11381+ */
11382+
11383+DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
11384+
11385+/*
11386+ * the following functions deal with sending IPIs between CPUs.
11387+ *
11388+ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
11389+ */
11390+
11391+static inline int __prepare_ICR (unsigned int shortcut, int vector)
11392+{
11393+ unsigned int icr = shortcut | APIC_DEST_LOGICAL;
11394+
11395+ switch (vector) {
11396+ default:
11397+ icr |= APIC_DM_FIXED | vector;
11398+ break;
11399+ case NMI_VECTOR:
11400+ icr |= APIC_DM_NMI;
11401+ break;
11402+ }
11403+ return icr;
11404+}
11405+
11406+static inline int __prepare_ICR2 (unsigned int mask)
11407+{
11408+ return SET_APIC_DEST_FIELD(mask);
11409+}
11410+
11411+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
11412+
11413+static inline void __send_IPI_one(unsigned int cpu, int vector)
11414+{
11415+ int irq = per_cpu(ipi_to_irq, cpu)[vector];
11416+ BUG_ON(irq < 0);
11417+ notify_remote_via_irq(irq);
11418+}
11419+
11420+void __send_IPI_shortcut(unsigned int shortcut, int vector)
11421+{
11422+ int cpu;
11423+
11424+ switch (shortcut) {
11425+ case APIC_DEST_SELF:
11426+ __send_IPI_one(smp_processor_id(), vector);
11427+ break;
11428+ case APIC_DEST_ALLBUT:
11429+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
11430+ if (cpu == smp_processor_id())
11431+ continue;
11432+ if (cpu_isset(cpu, cpu_online_map)) {
11433+ __send_IPI_one(cpu, vector);
11434+ }
11435+ }
11436+ break;
11437+ default:
11438+ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
11439+ vector);
11440+ break;
11441+ }
11442+}
11443+
11444+void fastcall send_IPI_self(int vector)
11445+{
11446+ __send_IPI_shortcut(APIC_DEST_SELF, vector);
11447+}
11448+
11449+/*
11450+ * This is only used on smaller machines.
11451+ */
11452+void send_IPI_mask_bitmask(cpumask_t mask, int vector)
11453+{
11454+ unsigned long flags;
11455+ unsigned int cpu;
11456+
11457+ local_irq_save(flags);
11458+ WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
11459+
11460+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
11461+ if (cpu_isset(cpu, mask)) {
11462+ __send_IPI_one(cpu, vector);
11463+ }
11464+ }
11465+
11466+ local_irq_restore(flags);
11467+}
11468+
11469+void send_IPI_mask_sequence(cpumask_t mask, int vector)
11470+{
11471+
11472+ send_IPI_mask_bitmask(mask, vector);
11473+}
11474+
11475+#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
11476+
11477+#if 0 /* XEN */
11478+/*
11479+ * Smarter SMP flushing macros.
11480+ * c/o Linus Torvalds.
11481+ *
11482+ * These mean you can really definitely utterly forget about
11483+ * writing to user space from interrupts. (Its not allowed anyway).
11484+ *
11485+ * Optimizations Manfred Spraul <manfred@colorfullife.com>
11486+ */
11487+
11488+static cpumask_t flush_cpumask;
11489+static struct mm_struct * flush_mm;
11490+static unsigned long flush_va;
11491+static DEFINE_SPINLOCK(tlbstate_lock);
11492+#define FLUSH_ALL 0xffffffff
11493+
11494+/*
11495+ * We cannot call mmdrop() because we are in interrupt context,
11496+ * instead update mm->cpu_vm_mask.
11497+ *
11498+ * We need to reload %cr3 since the page tables may be going
11499+ * away from under us..
11500+ */
11501+static inline void leave_mm (unsigned long cpu)
11502+{
11503+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
11504+ BUG();
11505+ cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
11506+ load_cr3(swapper_pg_dir);
11507+}
11508+
11509+/*
11510+ *
11511+ * The flush IPI assumes that a thread switch happens in this order:
11512+ * [cpu0: the cpu that switches]
11513+ * 1) switch_mm() either 1a) or 1b)
11514+ * 1a) thread switch to a different mm
11515+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
11516+ * Stop ipi delivery for the old mm. This is not synchronized with
11517+ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
11518+ * for the wrong mm, and in the worst case we perform a superflous
11519+ * tlb flush.
11520+ * 1a2) set cpu_tlbstate to TLBSTATE_OK
11521+ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
11522+ * was in lazy tlb mode.
11523+ * 1a3) update cpu_tlbstate[].active_mm
11524+ * Now cpu0 accepts tlb flushes for the new mm.
11525+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
11526+ * Now the other cpus will send tlb flush ipis.
11527+ * 1a4) change cr3.
11528+ * 1b) thread switch without mm change
11529+ * cpu_tlbstate[].active_mm is correct, cpu0 already handles
11530+ * flush ipis.
11531+ * 1b1) set cpu_tlbstate to TLBSTATE_OK
11532+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
11533+ * Atomically set the bit [other cpus will start sending flush ipis],
11534+ * and test the bit.
11535+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
11536+ * 2) switch %%esp, ie current
11537+ *
11538+ * The interrupt must handle 2 special cases:
11539+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
11540+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
11541+ * runs in kernel space, the cpu could load tlb entries for user space
11542+ * pages.
11543+ *
11544+ * The good news is that cpu_tlbstate is local to each cpu, no
11545+ * write/read ordering problems.
11546+ */
11547+
11548+/*
11549+ * TLB flush IPI:
11550+ *
11551+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
11552+ * 2) Leave the mm if we are in the lazy tlb mode.
11553+ */
11554+
11555+irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
11556+ struct pt_regs *regs)
11557+{
11558+ unsigned long cpu;
11559+
11560+ cpu = get_cpu();
11561+
11562+ if (!cpu_isset(cpu, flush_cpumask))
11563+ goto out;
11564+ /*
11565+ * This was a BUG() but until someone can quote me the
11566+ * line from the intel manual that guarantees an IPI to
11567+ * multiple CPUs is retried _only_ on the erroring CPUs
11568+ * its staying as a return
11569+ *
11570+ * BUG();
11571+ */
11572+
11573+ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
11574+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
11575+ if (flush_va == FLUSH_ALL)
11576+ local_flush_tlb();
11577+ else
11578+ __flush_tlb_one(flush_va);
11579+ } else
11580+ leave_mm(cpu);
11581+ }
11582+ smp_mb__before_clear_bit();
11583+ cpu_clear(cpu, flush_cpumask);
11584+ smp_mb__after_clear_bit();
11585+out:
11586+ put_cpu_no_resched();
11587+
11588+ return IRQ_HANDLED;
11589+}
11590+
11591+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
11592+ unsigned long va)
11593+{
11594+ /*
11595+ * A couple of (to be removed) sanity checks:
11596+ *
11597+ * - current CPU must not be in mask
11598+ * - mask must exist :)
11599+ */
11600+ BUG_ON(cpus_empty(cpumask));
11601+ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
11602+ BUG_ON(!mm);
11603+
11604+ /* If a CPU which we ran on has gone down, OK. */
11605+ cpus_and(cpumask, cpumask, cpu_online_map);
11606+ if (cpus_empty(cpumask))
11607+ return;
11608+
11609+ /*
11610+ * i'm not happy about this global shared spinlock in the
11611+ * MM hot path, but we'll see how contended it is.
11612+ * Temporarily this turns IRQs off, so that lockups are
11613+ * detected by the NMI watchdog.
11614+ */
11615+ spin_lock(&tlbstate_lock);
11616+
11617+ flush_mm = mm;
11618+ flush_va = va;
11619+#if NR_CPUS <= BITS_PER_LONG
11620+ atomic_set_mask(cpumask, &flush_cpumask);
11621+#else
11622+ {
11623+ int k;
11624+ unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
11625+ unsigned long *cpu_mask = (unsigned long *)&cpumask;
11626+ for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
11627+ atomic_set_mask(cpu_mask[k], &flush_mask[k]);
11628+ }
11629+#endif
11630+ /*
11631+ * We have to send the IPI only to
11632+ * CPUs affected.
11633+ */
11634+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
11635+
11636+ while (!cpus_empty(flush_cpumask))
11637+ /* nothing. lockup detection does not belong here */
11638+ mb();
11639+
11640+ flush_mm = NULL;
11641+ flush_va = 0;
11642+ spin_unlock(&tlbstate_lock);
11643+}
11644+
11645+void flush_tlb_current_task(void)
11646+{
11647+ struct mm_struct *mm = current->mm;
11648+ cpumask_t cpu_mask;
11649+
11650+ preempt_disable();
11651+ cpu_mask = mm->cpu_vm_mask;
11652+ cpu_clear(smp_processor_id(), cpu_mask);
11653+
11654+ local_flush_tlb();
11655+ if (!cpus_empty(cpu_mask))
11656+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
11657+ preempt_enable();
11658+}
11659+
11660+void flush_tlb_mm (struct mm_struct * mm)
11661+{
11662+ cpumask_t cpu_mask;
11663+
11664+ preempt_disable();
11665+ cpu_mask = mm->cpu_vm_mask;
11666+ cpu_clear(smp_processor_id(), cpu_mask);
11667+
11668+ if (current->active_mm == mm) {
11669+ if (current->mm)
11670+ local_flush_tlb();
11671+ else
11672+ leave_mm(smp_processor_id());
11673+ }
11674+ if (!cpus_empty(cpu_mask))
11675+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
11676+
11677+ preempt_enable();
11678+}
11679+
11680+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
11681+{
11682+ struct mm_struct *mm = vma->vm_mm;
11683+ cpumask_t cpu_mask;
11684+
11685+ preempt_disable();
11686+ cpu_mask = mm->cpu_vm_mask;
11687+ cpu_clear(smp_processor_id(), cpu_mask);
11688+
11689+ if (current->active_mm == mm) {
11690+ if(current->mm)
11691+ __flush_tlb_one(va);
11692+ else
11693+ leave_mm(smp_processor_id());
11694+ }
11695+
11696+ if (!cpus_empty(cpu_mask))
11697+ flush_tlb_others(cpu_mask, mm, va);
11698+
11699+ preempt_enable();
11700+}
11701+EXPORT_SYMBOL(flush_tlb_page);
11702+
11703+static void do_flush_tlb_all(void* info)
11704+{
11705+ unsigned long cpu = smp_processor_id();
11706+
11707+ __flush_tlb_all();
11708+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
11709+ leave_mm(cpu);
11710+}
11711+
11712+void flush_tlb_all(void)
11713+{
11714+ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
11715+}
11716+
11717+#endif /* XEN */
11718+
11719+/*
11720+ * this function sends a 'reschedule' IPI to another CPU.
11721+ * it goes straight through and wastes no time serializing
11722+ * anything. Worst case is that we lose a reschedule ...
11723+ */
11724+void smp_send_reschedule(int cpu)
11725+{
11726+ WARN_ON(cpu_is_offline(cpu));
11727+ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
11728+}
11729+
11730+/*
11731+ * Structure and data for smp_call_function(). This is designed to minimise
11732+ * static memory requirements. It also looks cleaner.
11733+ */
11734+static DEFINE_SPINLOCK(call_lock);
11735+
11736+struct call_data_struct {
11737+ void (*func) (void *info);
11738+ void *info;
11739+ atomic_t started;
11740+ atomic_t finished;
11741+ int wait;
11742+};
11743+
11744+void lock_ipi_call_lock(void)
11745+{
11746+ spin_lock_irq(&call_lock);
11747+}
11748+
11749+void unlock_ipi_call_lock(void)
11750+{
11751+ spin_unlock_irq(&call_lock);
11752+}
11753+
11754+static struct call_data_struct *call_data;
11755+
11756+/**
11757+ * smp_call_function(): Run a function on all other CPUs.
11758+ * @func: The function to run. This must be fast and non-blocking.
11759+ * @info: An arbitrary pointer to pass to the function.
11760+ * @nonatomic: currently unused.
11761+ * @wait: If true, wait (atomically) until function has completed on other CPUs.
11762+ *
11763+ * Returns 0 on success, else a negative status code. Does not return until
11764+ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
11765+ *
11766+ * You must not call this function with disabled interrupts or from a
11767+ * hardware interrupt handler or from a bottom half handler.
11768+ */
11769+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
11770+ int wait)
11771+{
11772+ struct call_data_struct data;
11773+ int cpus;
11774+
11775+ /* Holding any lock stops cpus from going down. */
11776+ spin_lock(&call_lock);
11777+ cpus = num_online_cpus() - 1;
11778+ if (!cpus) {
11779+ spin_unlock(&call_lock);
11780+ return 0;
11781+ }
11782+
11783+ /* Can deadlock when called with interrupts disabled */
11784+ WARN_ON(irqs_disabled());
11785+
11786+ data.func = func;
11787+ data.info = info;
11788+ atomic_set(&data.started, 0);
11789+ data.wait = wait;
11790+ if (wait)
11791+ atomic_set(&data.finished, 0);
11792+
11793+ call_data = &data;
11794+ mb();
11795+
11796+ /* Send a message to all other CPUs and wait for them to respond */
11797+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
11798+
11799+ /* Wait for response */
11800+ while (atomic_read(&data.started) != cpus)
11801+ cpu_relax();
11802+
11803+ if (wait)
11804+ while (atomic_read(&data.finished) != cpus)
11805+ cpu_relax();
11806+ spin_unlock(&call_lock);
11807+
11808+ return 0;
11809+}
11810+EXPORT_SYMBOL(smp_call_function);
11811+
11812+static void stop_this_cpu (void * dummy)
11813+{
11814+ /*
11815+ * Remove this CPU:
11816+ */
11817+ cpu_clear(smp_processor_id(), cpu_online_map);
11818+ local_irq_disable();
11819+ disable_all_local_evtchn();
11820+ if (cpu_data[smp_processor_id()].hlt_works_ok)
11821+ for(;;) halt();
11822+ for (;;);
11823+}
11824+
11825+/*
11826+ * this function calls the 'stop' function on all other CPUs in the system.
11827+ */
11828+
11829+void smp_send_stop(void)
11830+{
11831+ smp_call_function(stop_this_cpu, NULL, 1, 0);
11832+
11833+ local_irq_disable();
11834+ disable_all_local_evtchn();
11835+ local_irq_enable();
11836+}
11837+
11838+/*
11839+ * Reschedule call back. Nothing to do,
11840+ * all the work is done automatically when
11841+ * we return from the interrupt.
11842+ */
11843+irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
11844+ struct pt_regs *regs)
11845+{
11846+
11847+ return IRQ_HANDLED;
11848+}
11849+
11850+#include <linux/kallsyms.h>
11851+irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
11852+ struct pt_regs *regs)
11853+{
11854+ void (*func) (void *info) = call_data->func;
11855+ void *info = call_data->info;
11856+ int wait = call_data->wait;
11857+
11858+ /*
11859+ * Notify initiating CPU that I've grabbed the data and am
11860+ * about to execute the function
11861+ */
11862+ mb();
11863+ atomic_inc(&call_data->started);
11864+ /*
11865+ * At this point the info structure may be out of scope unless wait==1
11866+ */
11867+ irq_enter();
11868+ (*func)(info);
11869+ irq_exit();
11870+
11871+ if (wait) {
11872+ mb();
11873+ atomic_inc(&call_data->finished);
11874+ }
11875+
11876+ return IRQ_HANDLED;
11877+}
11878+
11879Index: head-2008-11-25/arch/x86/kernel/time_32-xen.c
11880===================================================================
11881--- /dev/null 1970-01-01 00:00:00.000000000 +0000
11882+++ head-2008-11-25/arch/x86/kernel/time_32-xen.c 2008-09-01 12:07:31.000000000 +0200
11883@@ -0,0 +1,1209 @@
11884+/*
11885+ * linux/arch/i386/kernel/time.c
11886+ *
11887+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
11888+ *
11889+ * This file contains the PC-specific time handling details:
11890+ * reading the RTC at bootup, etc..
11891+ * 1994-07-02 Alan Modra
11892+ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
11893+ * 1995-03-26 Markus Kuhn
11894+ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
11895+ * precision CMOS clock update
11896+ * 1996-05-03 Ingo Molnar
11897+ * fixed time warps in do_[slow|fast]_gettimeoffset()
11898+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11899+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
11900+ * 1998-09-05 (Various)
11901+ * More robust do_fast_gettimeoffset() algorithm implemented
11902+ * (works with APM, Cyrix 6x86MX and Centaur C6),
11903+ * monotonic gettimeofday() with fast_get_timeoffset(),
11904+ * drift-proof precision TSC calibration on boot
11905+ * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
11906+ * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
11907+ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
11908+ * 1998-12-16 Andrea Arcangeli
11909+ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
11910+ * because was not accounting lost_ticks.
11911+ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
11912+ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
11913+ * serialize accesses to xtime/lost_ticks).
11914+ */
11915+
11916+#include <linux/errno.h>
11917+#include <linux/sched.h>
11918+#include <linux/kernel.h>
11919+#include <linux/param.h>
11920+#include <linux/string.h>
11921+#include <linux/mm.h>
11922+#include <linux/interrupt.h>
11923+#include <linux/time.h>
11924+#include <linux/delay.h>
11925+#include <linux/init.h>
11926+#include <linux/smp.h>
11927+#include <linux/module.h>
11928+#include <linux/sysdev.h>
11929+#include <linux/bcd.h>
11930+#include <linux/efi.h>
11931+#include <linux/mca.h>
11932+#include <linux/sysctl.h>
11933+#include <linux/percpu.h>
11934+#include <linux/kernel_stat.h>
11935+#include <linux/posix-timers.h>
11936+#include <linux/cpufreq.h>
11937+
11938+#include <asm/io.h>
11939+#include <asm/smp.h>
11940+#include <asm/irq.h>
11941+#include <asm/msr.h>
11942+#include <asm/delay.h>
11943+#include <asm/mpspec.h>
11944+#include <asm/uaccess.h>
11945+#include <asm/processor.h>
11946+#include <asm/timer.h>
11947+#include <asm/sections.h>
11948+
11949+#include "mach_time.h"
11950+
11951+#include <linux/timex.h>
11952+
11953+#include <asm/hpet.h>
11954+
11955+#include <asm/arch_hooks.h>
11956+
11957+#include <xen/evtchn.h>
11958+#include <xen/interface/vcpu.h>
11959+
11960+#if defined (__i386__)
11961+#include <asm/i8259.h>
11962+#endif
11963+
11964+int pit_latch_buggy; /* extern */
11965+
11966+#if defined(__x86_64__)
11967+unsigned long vxtime_hz = PIT_TICK_RATE;
11968+struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
11969+volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
11970+unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
11971+struct timespec __xtime __section_xtime;
11972+struct timezone __sys_tz __section_sys_tz;
11973+#endif
11974+
11975+unsigned int cpu_khz; /* Detected as we calibrate the TSC */
11976+EXPORT_SYMBOL(cpu_khz);
11977+
11978+extern unsigned long wall_jiffies;
11979+
11980+DEFINE_SPINLOCK(rtc_lock);
11981+EXPORT_SYMBOL(rtc_lock);
11982+
11983+extern struct init_timer_opts timer_tsc_init;
11984+extern struct timer_opts timer_tsc;
11985+#define timer_none timer_tsc
11986+
11987+/* These are peridically updated in shared_info, and then copied here. */
11988+struct shadow_time_info {
11989+ u64 tsc_timestamp; /* TSC at last update of time vals. */
11990+ u64 system_timestamp; /* Time, in nanosecs, since boot. */
11991+ u32 tsc_to_nsec_mul;
11992+ u32 tsc_to_usec_mul;
11993+ int tsc_shift;
11994+ u32 version;
11995+};
11996+static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
11997+static struct timespec shadow_tv;
11998+static u32 shadow_tv_version;
11999+
12000+static struct timeval monotonic_tv;
12001+static spinlock_t monotonic_lock = SPIN_LOCK_UNLOCKED;
12002+
12003+/* Keep track of last time we did processing/updating of jiffies and xtime. */
12004+static u64 processed_system_time; /* System time (ns) at last processing. */
12005+static DEFINE_PER_CPU(u64, processed_system_time);
12006+
12007+/* How much CPU time was spent blocked and how much was 'stolen'? */
12008+static DEFINE_PER_CPU(u64, processed_stolen_time);
12009+static DEFINE_PER_CPU(u64, processed_blocked_time);
12010+
12011+/* Current runstate of each CPU (updated automatically by the hypervisor). */
12012+static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
12013+
12014+/* Must be signed, as it's compared with s64 quantities which can be -ve. */
12015+#define NS_PER_TICK (1000000000LL/HZ)
12016+
12017+static void __clock_was_set(void *unused)
12018+{
12019+ clock_was_set();
12020+}
12021+static DECLARE_WORK(clock_was_set_work, __clock_was_set, NULL);
12022+
12023+/*
12024+ * GCC 4.3 can turn loops over an induction variable into division. We do
12025+ * not support arbitrary 64-bit division, and so must break the induction.
12026+ */
12027+#define clobber_induction_variable(v) asm ( "" : "+r" (v) )
12028+
12029+static inline void __normalize_time(time_t *sec, s64 *nsec)
12030+{
12031+ while (*nsec >= NSEC_PER_SEC) {
12032+ clobber_induction_variable(*nsec);
12033+ (*nsec) -= NSEC_PER_SEC;
12034+ (*sec)++;
12035+ }
12036+ while (*nsec < 0) {
12037+ clobber_induction_variable(*nsec);
12038+ (*nsec) += NSEC_PER_SEC;
12039+ (*sec)--;
12040+ }
12041+}
12042+
12043+/* Does this guest OS track Xen time, or set its wall clock independently? */
12044+static int independent_wallclock = 0;
12045+static int __init __independent_wallclock(char *str)
12046+{
12047+ independent_wallclock = 1;
12048+ return 1;
12049+}
12050+__setup("independent_wallclock", __independent_wallclock);
12051+
12052+/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
12053+static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
12054+static int __init __permitted_clock_jitter(char *str)
12055+{
12056+ permitted_clock_jitter = simple_strtoul(str, NULL, 0);
12057+ return 1;
12058+}
12059+__setup("permitted_clock_jitter=", __permitted_clock_jitter);
12060+
12061+#if 0
12062+static void delay_tsc(unsigned long loops)
12063+{
12064+ unsigned long bclock, now;
12065+
12066+ rdtscl(bclock);
12067+ do {
12068+ rep_nop();
12069+ rdtscl(now);
12070+ } while ((now - bclock) < loops);
12071+}
12072+
12073+struct timer_opts timer_tsc = {
12074+ .name = "tsc",
12075+ .delay = delay_tsc,
12076+};
12077+#endif
12078+
12079+/*
12080+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
12081+ * yielding a 64-bit result.
12082+ */
12083+static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
12084+{
12085+ u64 product;
12086+#ifdef __i386__
12087+ u32 tmp1, tmp2;
12088+#endif
12089+
12090+ if (shift < 0)
12091+ delta >>= -shift;
12092+ else
12093+ delta <<= shift;
12094+
12095+#ifdef __i386__
12096+ __asm__ (
12097+ "mul %5 ; "
12098+ "mov %4,%%eax ; "
12099+ "mov %%edx,%4 ; "
12100+ "mul %5 ; "
12101+ "xor %5,%5 ; "
12102+ "add %4,%%eax ; "
12103+ "adc %5,%%edx ; "
12104+ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
12105+ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
12106+#else
12107+ __asm__ (
12108+ "mul %%rdx ; shrd $32,%%rdx,%%rax"
12109+ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
12110+#endif
12111+
12112+ return product;
12113+}
12114+
12115+#if 0 /* defined (__i386__) */
12116+int read_current_timer(unsigned long *timer_val)
12117+{
12118+ rdtscl(*timer_val);
12119+ return 0;
12120+}
12121+#endif
12122+
12123+void init_cpu_khz(void)
12124+{
12125+ u64 __cpu_khz = 1000000ULL << 32;
12126+ struct vcpu_time_info *info = &vcpu_info(0)->time;
12127+ do_div(__cpu_khz, info->tsc_to_system_mul);
12128+ if (info->tsc_shift < 0)
12129+ cpu_khz = __cpu_khz << -info->tsc_shift;
12130+ else
12131+ cpu_khz = __cpu_khz >> info->tsc_shift;
12132+}
12133+
12134+static u64 get_nsec_offset(struct shadow_time_info *shadow)
12135+{
12136+ u64 now, delta;
12137+ rdtscll(now);
12138+ delta = now - shadow->tsc_timestamp;
12139+ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
12140+}
12141+
12142+static unsigned long get_usec_offset(struct shadow_time_info *shadow)
12143+{
12144+ u64 now, delta;
12145+ rdtscll(now);
12146+ delta = now - shadow->tsc_timestamp;
12147+ return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
12148+}
12149+
12150+static void __update_wallclock(time_t sec, long nsec)
12151+{
12152+ long wtm_nsec, xtime_nsec;
12153+ time_t wtm_sec, xtime_sec;
12154+ u64 tmp, wc_nsec;
12155+
12156+ /* Adjust wall-clock time base based on wall_jiffies ticks. */
12157+ wc_nsec = processed_system_time;
12158+ wc_nsec += sec * (u64)NSEC_PER_SEC;
12159+ wc_nsec += nsec;
12160+ wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
12161+
12162+ /* Split wallclock base into seconds and nanoseconds. */
12163+ tmp = wc_nsec;
12164+ xtime_nsec = do_div(tmp, 1000000000);
12165+ xtime_sec = (time_t)tmp;
12166+
12167+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
12168+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
12169+
12170+ set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
12171+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
12172+
12173+ ntp_clear();
12174+}
12175+
12176+static void update_wallclock(void)
12177+{
12178+ shared_info_t *s = HYPERVISOR_shared_info;
12179+
12180+ do {
12181+ shadow_tv_version = s->wc_version;
12182+ rmb();
12183+ shadow_tv.tv_sec = s->wc_sec;
12184+ shadow_tv.tv_nsec = s->wc_nsec;
12185+ rmb();
12186+ } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
12187+
12188+ if (!independent_wallclock)
12189+ __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
12190+}
12191+
12192+/*
12193+ * Reads a consistent set of time-base values from Xen, into a shadow data
12194+ * area.
12195+ */
12196+static void get_time_values_from_xen(unsigned int cpu)
12197+{
12198+ struct vcpu_time_info *src;
12199+ struct shadow_time_info *dst;
12200+ unsigned long flags;
12201+ u32 pre_version, post_version;
12202+
12203+ src = &vcpu_info(cpu)->time;
12204+ dst = &per_cpu(shadow_time, cpu);
12205+
12206+ local_irq_save(flags);
12207+
12208+ do {
12209+ pre_version = dst->version = src->version;
12210+ rmb();
12211+ dst->tsc_timestamp = src->tsc_timestamp;
12212+ dst->system_timestamp = src->system_time;
12213+ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
12214+ dst->tsc_shift = src->tsc_shift;
12215+ rmb();
12216+ post_version = src->version;
12217+ } while ((pre_version & 1) | (pre_version ^ post_version));
12218+
12219+ dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
12220+
12221+ local_irq_restore(flags);
12222+}
12223+
12224+static inline int time_values_up_to_date(unsigned int cpu)
12225+{
12226+ struct vcpu_time_info *src;
12227+ struct shadow_time_info *dst;
12228+
12229+ src = &vcpu_info(cpu)->time;
12230+ dst = &per_cpu(shadow_time, cpu);
12231+
12232+ rmb();
12233+ return (dst->version == src->version);
12234+}
12235+
12236+/*
12237+ * This is a special lock that is owned by the CPU and holds the index
12238+ * register we are working with. It is required for NMI access to the
12239+ * CMOS/RTC registers. See include/asm-i386/mc146818rtc.h for details.
12240+ */
12241+volatile unsigned long cmos_lock = 0;
12242+EXPORT_SYMBOL(cmos_lock);
12243+
12244+/* Routines for accessing the CMOS RAM/RTC. */
12245+unsigned char rtc_cmos_read(unsigned char addr)
12246+{
12247+ unsigned char val;
12248+ lock_cmos_prefix(addr);
12249+ outb_p(addr, RTC_PORT(0));
12250+ val = inb_p(RTC_PORT(1));
12251+ lock_cmos_suffix(addr);
12252+ return val;
12253+}
12254+EXPORT_SYMBOL(rtc_cmos_read);
12255+
12256+void rtc_cmos_write(unsigned char val, unsigned char addr)
12257+{
12258+ lock_cmos_prefix(addr);
12259+ outb_p(addr, RTC_PORT(0));
12260+ outb_p(val, RTC_PORT(1));
12261+ lock_cmos_suffix(addr);
12262+}
12263+EXPORT_SYMBOL(rtc_cmos_write);
12264+
12265+/*
12266+ * This version of gettimeofday has microsecond resolution
12267+ * and better than microsecond precision on fast x86 machines with TSC.
12268+ */
12269+void do_gettimeofday(struct timeval *tv)
12270+{
12271+ unsigned long seq;
12272+ unsigned long usec, sec;
12273+ unsigned long flags;
12274+ s64 nsec;
12275+ unsigned int cpu;
12276+ struct shadow_time_info *shadow;
12277+ u32 local_time_version;
12278+
12279+ cpu = get_cpu();
12280+ shadow = &per_cpu(shadow_time, cpu);
12281+
12282+ do {
12283+ unsigned long lost;
12284+
12285+ local_time_version = shadow->version;
12286+ seq = read_seqbegin(&xtime_lock);
12287+
12288+ usec = get_usec_offset(shadow);
12289+ lost = jiffies - wall_jiffies;
12290+
12291+ if (unlikely(lost))
12292+ usec += lost * (USEC_PER_SEC / HZ);
12293+
12294+ sec = xtime.tv_sec;
12295+ usec += (xtime.tv_nsec / NSEC_PER_USEC);
12296+
12297+ nsec = shadow->system_timestamp - processed_system_time;
12298+ __normalize_time(&sec, &nsec);
12299+ usec += (long)nsec / NSEC_PER_USEC;
12300+
12301+ if (unlikely(!time_values_up_to_date(cpu))) {
12302+ /*
12303+ * We may have blocked for a long time,
12304+ * rendering our calculations invalid
12305+ * (e.g. the time delta may have
12306+ * overflowed). Detect that and recalculate
12307+ * with fresh values.
12308+ */
12309+ get_time_values_from_xen(cpu);
12310+ continue;
12311+ }
12312+ } while (read_seqretry(&xtime_lock, seq) ||
12313+ (local_time_version != shadow->version));
12314+
12315+ put_cpu();
12316+
12317+ while (usec >= USEC_PER_SEC) {
12318+ usec -= USEC_PER_SEC;
12319+ sec++;
12320+ }
12321+
12322+ spin_lock_irqsave(&monotonic_lock, flags);
12323+ if ((sec > monotonic_tv.tv_sec) ||
12324+ ((sec == monotonic_tv.tv_sec) && (usec > monotonic_tv.tv_usec)))
12325+ {
12326+ monotonic_tv.tv_sec = sec;
12327+ monotonic_tv.tv_usec = usec;
12328+ } else {
12329+ sec = monotonic_tv.tv_sec;
12330+ usec = monotonic_tv.tv_usec;
12331+ }
12332+ spin_unlock_irqrestore(&monotonic_lock, flags);
12333+
12334+ tv->tv_sec = sec;
12335+ tv->tv_usec = usec;
12336+}
12337+
12338+EXPORT_SYMBOL(do_gettimeofday);
12339+
12340+int do_settimeofday(struct timespec *tv)
12341+{
12342+ time_t sec;
12343+ s64 nsec;
12344+ unsigned int cpu;
12345+ struct shadow_time_info *shadow;
12346+ struct xen_platform_op op;
12347+
12348+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
12349+ return -EINVAL;
12350+
12351+ cpu = get_cpu();
12352+ shadow = &per_cpu(shadow_time, cpu);
12353+
12354+ write_seqlock_irq(&xtime_lock);
12355+
12356+ /*
12357+ * Ensure we don't get blocked for a long time so that our time delta
12358+ * overflows. If that were to happen then our shadow time values would
12359+ * be stale, so we can retry with fresh ones.
12360+ */
12361+ for (;;) {
12362+ nsec = tv->tv_nsec - get_nsec_offset(shadow);
12363+ if (time_values_up_to_date(cpu))
12364+ break;
12365+ get_time_values_from_xen(cpu);
12366+ }
12367+ sec = tv->tv_sec;
12368+ __normalize_time(&sec, &nsec);
12369+
12370+ if (is_initial_xendomain() && !independent_wallclock) {
12371+ op.cmd = XENPF_settime;
12372+ op.u.settime.secs = sec;
12373+ op.u.settime.nsecs = nsec;
12374+ op.u.settime.system_time = shadow->system_timestamp;
12375+ WARN_ON(HYPERVISOR_platform_op(&op));
12376+ update_wallclock();
12377+ } else if (independent_wallclock) {
12378+ nsec -= shadow->system_timestamp;
12379+ __normalize_time(&sec, &nsec);
12380+ __update_wallclock(sec, nsec);
12381+ }
12382+
12383+ /* Reset monotonic gettimeofday() timeval. */
12384+ spin_lock(&monotonic_lock);
12385+ monotonic_tv.tv_sec = 0;
12386+ monotonic_tv.tv_usec = 0;
12387+ spin_unlock(&monotonic_lock);
12388+
12389+ write_sequnlock_irq(&xtime_lock);
12390+
12391+ put_cpu();
12392+
12393+ clock_was_set();
12394+ return 0;
12395+}
12396+
12397+EXPORT_SYMBOL(do_settimeofday);
12398+
12399+static void sync_xen_wallclock(unsigned long dummy);
12400+static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
12401+static void sync_xen_wallclock(unsigned long dummy)
12402+{
12403+ time_t sec;
12404+ s64 nsec;
12405+ struct xen_platform_op op;
12406+
12407+ if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
12408+ return;
12409+
12410+ write_seqlock_irq(&xtime_lock);
12411+
12412+ sec = xtime.tv_sec;
12413+ nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
12414+ __normalize_time(&sec, &nsec);
12415+
12416+ op.cmd = XENPF_settime;
12417+ op.u.settime.secs = sec;
12418+ op.u.settime.nsecs = nsec;
12419+ op.u.settime.system_time = processed_system_time;
12420+ WARN_ON(HYPERVISOR_platform_op(&op));
12421+
12422+ update_wallclock();
12423+
12424+ write_sequnlock_irq(&xtime_lock);
12425+
12426+ /* Once per minute. */
12427+ mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
12428+}
12429+
12430+static int set_rtc_mmss(unsigned long nowtime)
12431+{
12432+ int retval;
12433+ unsigned long flags;
12434+
12435+ if (independent_wallclock || !is_initial_xendomain())
12436+ return 0;
12437+
12438+ /* gets recalled with irq locally disabled */
12439+ /* XXX - does irqsave resolve this? -johnstul */
12440+ spin_lock_irqsave(&rtc_lock, flags);
12441+ if (efi_enabled)
12442+ retval = efi_set_rtc_mmss(nowtime);
12443+ else
12444+ retval = mach_set_rtc_mmss(nowtime);
12445+ spin_unlock_irqrestore(&rtc_lock, flags);
12446+
12447+ return retval;
12448+}
12449+
12450+/* monotonic_clock(): returns # of nanoseconds passed since time_init()
12451+ * Note: This function is required to return accurate
12452+ * time even in the absence of multiple timer ticks.
12453+ */
12454+unsigned long long monotonic_clock(void)
12455+{
12456+ unsigned int cpu = get_cpu();
12457+ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
12458+ u64 time;
12459+ u32 local_time_version;
12460+
12461+ do {
12462+ local_time_version = shadow->version;
12463+ barrier();
12464+ time = shadow->system_timestamp + get_nsec_offset(shadow);
12465+ if (!time_values_up_to_date(cpu))
12466+ get_time_values_from_xen(cpu);
12467+ barrier();
12468+ } while (local_time_version != shadow->version);
12469+
12470+ put_cpu();
12471+
12472+ return time;
12473+}
12474+EXPORT_SYMBOL(monotonic_clock);
12475+
12476+#ifdef __x86_64__
12477+unsigned long long sched_clock(void)
12478+{
12479+ return monotonic_clock();
12480+}
12481+#endif
12482+
12483+#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
12484+unsigned long profile_pc(struct pt_regs *regs)
12485+{
12486+ unsigned long pc = instruction_pointer(regs);
12487+
12488+#ifdef __x86_64__
12489+ /* Assume the lock function has either no stack frame or only a single word.
12490+ This checks if the address on the stack looks like a kernel text address.
12491+ There is a small window for false hits, but in that case the tick
12492+ is just accounted to the spinlock function.
12493+ Better would be to write these functions in assembler again
12494+ and check exactly. */
12495+ if (!user_mode_vm(regs) && in_lock_functions(pc)) {
12496+ char *v = *(char **)regs->rsp;
12497+ if ((v >= _stext && v <= _etext) ||
12498+ (v >= _sinittext && v <= _einittext) ||
12499+ (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
12500+ return (unsigned long)v;
12501+ return ((unsigned long *)regs->rsp)[1];
12502+ }
12503+#else
12504+ if (!user_mode_vm(regs) && in_lock_functions(pc))
12505+ return *(unsigned long *)(regs->ebp + 4);
12506+#endif
12507+
12508+ return pc;
12509+}
12510+EXPORT_SYMBOL(profile_pc);
12511+#endif
12512+
12513+/*
12514+ * This is the same as the above, except we _also_ save the current
12515+ * Time Stamp Counter value at the time of the timer interrupt, so that
12516+ * we later on can estimate the time of day more exactly.
12517+ */
12518+irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
12519+{
12520+ s64 delta, delta_cpu, stolen, blocked;
12521+ u64 sched_time;
12522+ unsigned int i, cpu = smp_processor_id();
12523+ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
12524+ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
12525+
12526+ /*
12527+ * Here we are in the timer irq handler. We just have irqs locally
12528+ * disabled but we don't know if the timer_bh is running on the other
12529+ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
12530+ * the irq version of write_lock because as just said we have irq
12531+ * locally disabled. -arca
12532+ */
12533+ write_seqlock(&xtime_lock);
12534+
12535+ do {
12536+ get_time_values_from_xen(cpu);
12537+
12538+ /* Obtain a consistent snapshot of elapsed wallclock cycles. */
12539+ delta = delta_cpu =
12540+ shadow->system_timestamp + get_nsec_offset(shadow);
12541+ delta -= processed_system_time;
12542+ delta_cpu -= per_cpu(processed_system_time, cpu);
12543+
12544+ /*
12545+ * Obtain a consistent snapshot of stolen/blocked cycles. We
12546+ * can use state_entry_time to detect if we get preempted here.
12547+ */
12548+ do {
12549+ sched_time = runstate->state_entry_time;
12550+ barrier();
12551+ stolen = runstate->time[RUNSTATE_runnable] +
12552+ runstate->time[RUNSTATE_offline] -
12553+ per_cpu(processed_stolen_time, cpu);
12554+ blocked = runstate->time[RUNSTATE_blocked] -
12555+ per_cpu(processed_blocked_time, cpu);
12556+ barrier();
12557+ } while (sched_time != runstate->state_entry_time);
12558+ } while (!time_values_up_to_date(cpu));
12559+
12560+ if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
12561+ unlikely(delta_cpu < -(s64)permitted_clock_jitter))
12562+ && printk_ratelimit()) {
12563+ printk("Timer ISR/%u: Time went backwards: "
12564+ "delta=%lld delta_cpu=%lld shadow=%lld "
12565+ "off=%lld processed=%lld cpu_processed=%lld\n",
12566+ cpu, delta, delta_cpu, shadow->system_timestamp,
12567+ (s64)get_nsec_offset(shadow),
12568+ processed_system_time,
12569+ per_cpu(processed_system_time, cpu));
12570+ for (i = 0; i < num_online_cpus(); i++)
12571+ printk(" %d: %lld\n", i,
12572+ per_cpu(processed_system_time, i));
12573+ }
12574+
12575+ /* System-wide jiffy work. */
12576+ while (delta >= NS_PER_TICK) {
12577+ delta -= NS_PER_TICK;
12578+ processed_system_time += NS_PER_TICK;
12579+ do_timer(regs);
12580+ }
12581+
12582+ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
12583+ update_wallclock();
12584+ if (keventd_up())
12585+ schedule_work(&clock_was_set_work);
12586+ }
12587+
12588+ write_sequnlock(&xtime_lock);
12589+
12590+ /*
12591+ * Account stolen ticks.
12592+ * HACK: Passing NULL to account_steal_time()
12593+ * ensures that the ticks are accounted as stolen.
12594+ */
12595+ if ((stolen > 0) && (delta_cpu > 0)) {
12596+ delta_cpu -= stolen;
12597+ if (unlikely(delta_cpu < 0))
12598+ stolen += delta_cpu; /* clamp local-time progress */
12599+ do_div(stolen, NS_PER_TICK);
12600+ per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
12601+ per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
12602+ account_steal_time(NULL, (cputime_t)stolen);
12603+ }
12604+
12605+ /*
12606+ * Account blocked ticks.
12607+ * HACK: Passing idle_task to account_steal_time()
12608+ * ensures that the ticks are accounted as idle/wait.
12609+ */
12610+ if ((blocked > 0) && (delta_cpu > 0)) {
12611+ delta_cpu -= blocked;
12612+ if (unlikely(delta_cpu < 0))
12613+ blocked += delta_cpu; /* clamp local-time progress */
12614+ do_div(blocked, NS_PER_TICK);
12615+ per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
12616+ per_cpu(processed_system_time, cpu) += blocked * NS_PER_TICK;
12617+ account_steal_time(idle_task(cpu), (cputime_t)blocked);
12618+ }
12619+
12620+ /* Account user/system ticks. */
12621+ if (delta_cpu > 0) {
12622+ do_div(delta_cpu, NS_PER_TICK);
12623+ per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
12624+ if (user_mode_vm(regs))
12625+ account_user_time(current, (cputime_t)delta_cpu);
12626+ else
12627+ account_system_time(current, HARDIRQ_OFFSET,
12628+ (cputime_t)delta_cpu);
12629+ }
12630+
12631+ /* Offlined for more than a few seconds? Avoid lockup warnings. */
12632+ if (stolen > 5*HZ)
12633+ touch_softlockup_watchdog();
12634+
12635+ /* Local timer processing (see update_process_times()). */
12636+ run_local_timers();
12637+ if (rcu_pending(cpu))
12638+ rcu_check_callbacks(cpu, user_mode_vm(regs));
12639+ scheduler_tick();
12640+ run_posix_cpu_timers(current);
12641+ profile_tick(CPU_PROFILING, regs);
12642+
12643+ return IRQ_HANDLED;
12644+}
12645+
12646+static void init_missing_ticks_accounting(unsigned int cpu)
12647+{
12648+ struct vcpu_register_runstate_memory_area area;
12649+ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
12650+ int rc;
12651+
12652+ memset(runstate, 0, sizeof(*runstate));
12653+
12654+ area.addr.v = runstate;
12655+ rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
12656+ WARN_ON(rc && rc != -ENOSYS);
12657+
12658+ per_cpu(processed_blocked_time, cpu) =
12659+ runstate->time[RUNSTATE_blocked];
12660+ per_cpu(processed_stolen_time, cpu) =
12661+ runstate->time[RUNSTATE_runnable] +
12662+ runstate->time[RUNSTATE_offline];
12663+}
12664+
12665+/* not static: needed by APM */
12666+unsigned long get_cmos_time(void)
12667+{
12668+ unsigned long retval;
12669+ unsigned long flags;
12670+
12671+ spin_lock_irqsave(&rtc_lock, flags);
12672+
12673+ if (efi_enabled)
12674+ retval = efi_get_time();
12675+ else
12676+ retval = mach_get_cmos_time();
12677+
12678+ spin_unlock_irqrestore(&rtc_lock, flags);
12679+
12680+ return retval;
12681+}
12682+EXPORT_SYMBOL(get_cmos_time);
12683+
12684+static void sync_cmos_clock(unsigned long dummy);
12685+
12686+static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
12687+
12688+static void sync_cmos_clock(unsigned long dummy)
12689+{
12690+ struct timeval now, next;
12691+ int fail = 1;
12692+
12693+ /*
12694+ * If we have an externally synchronized Linux clock, then update
12695+ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
12696+ * called as close as possible to 500 ms before the new second starts.
12697+ * This code is run on a timer. If the clock is set, that timer
12698+ * may not expire at the correct time. Thus, we adjust...
12699+ */
12700+ if (!ntp_synced())
12701+ /*
12702+ * Not synced, exit, do not restart a timer (if one is
12703+ * running, let it run out).
12704+ */
12705+ return;
12706+
12707+ do_gettimeofday(&now);
12708+ if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
12709+ now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
12710+ fail = set_rtc_mmss(now.tv_sec);
12711+
12712+ next.tv_usec = USEC_AFTER - now.tv_usec;
12713+ if (next.tv_usec <= 0)
12714+ next.tv_usec += USEC_PER_SEC;
12715+
12716+ if (!fail)
12717+ next.tv_sec = 659;
12718+ else
12719+ next.tv_sec = 0;
12720+
12721+ if (next.tv_usec >= USEC_PER_SEC) {
12722+ next.tv_sec++;
12723+ next.tv_usec -= USEC_PER_SEC;
12724+ }
12725+ mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
12726+}
12727+
12728+void notify_arch_cmos_timer(void)
12729+{
12730+ mod_timer(&sync_cmos_timer, jiffies + 1);
12731+ mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
12732+}
12733+
12734+static int timer_resume(struct sys_device *dev)
12735+{
12736+ extern void time_resume(void);
12737+ time_resume();
12738+ return 0;
12739+}
12740+
12741+static struct sysdev_class timer_sysclass = {
12742+ .resume = timer_resume,
12743+ set_kset_name("timer"),
12744+};
12745+
12746+
12747+/* XXX this driverfs stuff should probably go elsewhere later -john */
12748+static struct sys_device device_timer = {
12749+ .id = 0,
12750+ .cls = &timer_sysclass,
12751+};
12752+
12753+static int time_init_device(void)
12754+{
12755+ int error = sysdev_class_register(&timer_sysclass);
12756+ if (!error)
12757+ error = sysdev_register(&device_timer);
12758+ return error;
12759+}
12760+
12761+device_initcall(time_init_device);
12762+
12763+#ifdef CONFIG_HPET_TIMER
12764+extern void (*late_time_init)(void);
12765+/* Duplicate of time_init() below, with hpet_enable part added */
12766+static void __init hpet_time_init(void)
12767+{
12768+ xtime.tv_sec = get_cmos_time();
12769+ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
12770+ set_normalized_timespec(&wall_to_monotonic,
12771+ -xtime.tv_sec, -xtime.tv_nsec);
12772+
12773+ if ((hpet_enable() >= 0) && hpet_use_timer) {
12774+ printk("Using HPET for base-timer\n");
12775+ }
12776+
12777+ time_init_hook();
12778+}
12779+#endif
12780+
12781+/* Dynamically-mapped IRQ. */
12782+DEFINE_PER_CPU(int, timer_irq);
12783+
12784+extern void (*late_time_init)(void);
12785+static void setup_cpu0_timer_irq(void)
12786+{
12787+ per_cpu(timer_irq, 0) =
12788+ bind_virq_to_irqhandler(
12789+ VIRQ_TIMER,
12790+ 0,
12791+ timer_interrupt,
12792+ SA_INTERRUPT,
12793+ "timer0",
12794+ NULL);
12795+ BUG_ON(per_cpu(timer_irq, 0) < 0);
12796+}
12797+
12798+static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
12799+ .period_ns = NS_PER_TICK
12800+};
12801+
12802+void __init time_init(void)
12803+{
12804+#ifdef CONFIG_HPET_TIMER
12805+ if (is_hpet_capable()) {
12806+ /*
12807+ * HPET initialization needs to do memory-mapped io. So, let
12808+ * us do a late initialization after mem_init().
12809+ */
12810+ late_time_init = hpet_time_init;
12811+ return;
12812+ }
12813+#endif
12814+
12815+ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0,
12816+ &xen_set_periodic_tick)) {
12817+ case 0:
12818+#if CONFIG_XEN_COMPAT <= 0x030004
12819+ case -ENOSYS:
12820+#endif
12821+ break;
12822+ default:
12823+ BUG();
12824+ }
12825+
12826+ get_time_values_from_xen(0);
12827+
12828+ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
12829+ per_cpu(processed_system_time, 0) = processed_system_time;
12830+ init_missing_ticks_accounting(0);
12831+
12832+ update_wallclock();
12833+
12834+ init_cpu_khz();
12835+ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
12836+ cpu_khz / 1000, cpu_khz % 1000);
12837+
12838+#if defined(__x86_64__)
12839+ vxtime.mode = VXTIME_TSC;
12840+ vxtime.quot = (1000000L << 32) / vxtime_hz;
12841+ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
12842+ sync_core();
12843+ rdtscll(vxtime.last_tsc);
12844+#endif
12845+
12846+ /* Cannot request_irq() until kmem is initialised. */
12847+ late_time_init = setup_cpu0_timer_irq;
12848+}
12849+
12850+/* Convert jiffies to system time. */
12851+u64 jiffies_to_st(unsigned long j)
12852+{
12853+ unsigned long seq;
12854+ long delta;
12855+ u64 st;
12856+
12857+ do {
12858+ seq = read_seqbegin(&xtime_lock);
12859+ delta = j - jiffies;
12860+ if (delta < 1) {
12861+ /* Triggers in some wrap-around cases, but that's okay:
12862+ * we just end up with a shorter timeout. */
12863+ st = processed_system_time + NS_PER_TICK;
12864+ } else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
12865+ /* Very long timeout means there is no pending timer.
12866+ * We indicate this to Xen by passing zero timeout. */
12867+ st = 0;
12868+ } else {
12869+ st = processed_system_time + delta * (u64)NS_PER_TICK;
12870+ }
12871+ } while (read_seqretry(&xtime_lock, seq));
12872+
12873+ return st;
12874+}
12875+EXPORT_SYMBOL(jiffies_to_st);
12876+
12877+/*
12878+ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
12879+ * These functions are based on implementations from arch/s390/kernel/time.c
12880+ */
12881+static void stop_hz_timer(void)
12882+{
12883+ struct vcpu_set_singleshot_timer singleshot;
12884+ unsigned int cpu = smp_processor_id();
12885+ unsigned long j;
12886+ int rc;
12887+
12888+ cpu_set(cpu, nohz_cpu_mask);
12889+
12890+ /* See matching smp_mb in rcu_start_batch in rcupdate.c. These mbs */
12891+ /* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a */
12892+ /* value of rcp->cur that matches rdp->quiescbatch and allows us to */
12893+ /* stop the hz timer then the cpumasks created for subsequent values */
12894+ /* of cur in rcu_start_batch are guaranteed to pick up the updated */
12895+ /* nohz_cpu_mask and so will not depend on this cpu. */
12896+
12897+ smp_mb();
12898+
12899+ /* Leave ourselves in tick mode if rcu or softirq or timer pending. */
12900+ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
12901+ (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
12902+ cpu_clear(cpu, nohz_cpu_mask);
12903+ j = jiffies + 1;
12904+ }
12905+
12906+ singleshot.timeout_abs_ns = jiffies_to_st(j) + NS_PER_TICK/2;
12907+ singleshot.flags = 0;
12908+ rc = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &singleshot);
12909+#if CONFIG_XEN_COMPAT <= 0x030004
12910+ if (rc) {
12911+ BUG_ON(rc != -ENOSYS);
12912+ rc = HYPERVISOR_set_timer_op(singleshot.timeout_abs_ns);
12913+ }
12914+#endif
12915+ BUG_ON(rc);
12916+}
12917+
12918+static void start_hz_timer(void)
12919+{
12920+ cpu_clear(smp_processor_id(), nohz_cpu_mask);
12921+}
12922+
12923+void raw_safe_halt(void)
12924+{
12925+ stop_hz_timer();
12926+ /* Blocking includes an implicit local_irq_enable(). */
12927+ HYPERVISOR_block();
12928+ start_hz_timer();
12929+}
12930+EXPORT_SYMBOL(raw_safe_halt);
12931+
12932+void halt(void)
12933+{
12934+ if (irqs_disabled())
12935+ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
12936+}
12937+EXPORT_SYMBOL(halt);
12938+
12939+/* No locking required. Interrupts are disabled on all CPUs. */
12940+void time_resume(void)
12941+{
12942+ unsigned int cpu;
12943+
12944+ init_cpu_khz();
12945+
12946+ for_each_online_cpu(cpu) {
12947+ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
12948+ &xen_set_periodic_tick)) {
12949+ case 0:
12950+#if CONFIG_XEN_COMPAT <= 0x030004
12951+ case -ENOSYS:
12952+#endif
12953+ break;
12954+ default:
12955+ BUG();
12956+ }
12957+ get_time_values_from_xen(cpu);
12958+ per_cpu(processed_system_time, cpu) =
12959+ per_cpu(shadow_time, 0).system_timestamp;
12960+ init_missing_ticks_accounting(cpu);
12961+ }
12962+
12963+ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
12964+
12965+ update_wallclock();
12966+}
12967+
12968+#ifdef CONFIG_SMP
12969+static char timer_name[NR_CPUS][15];
12970+
12971+int __cpuinit local_setup_timer(unsigned int cpu)
12972+{
12973+ int seq, irq;
12974+
12975+ BUG_ON(cpu == 0);
12976+
12977+ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
12978+ &xen_set_periodic_tick)) {
12979+ case 0:
12980+#if CONFIG_XEN_COMPAT <= 0x030004
12981+ case -ENOSYS:
12982+#endif
12983+ break;
12984+ default:
12985+ BUG();
12986+ }
12987+
12988+ do {
12989+ seq = read_seqbegin(&xtime_lock);
12990+ /* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
12991+ per_cpu(processed_system_time, cpu) =
12992+ per_cpu(shadow_time, 0).system_timestamp;
12993+ init_missing_ticks_accounting(cpu);
12994+ } while (read_seqretry(&xtime_lock, seq));
12995+
12996+ sprintf(timer_name[cpu], "timer%u", cpu);
12997+ irq = bind_virq_to_irqhandler(VIRQ_TIMER,
12998+ cpu,
12999+ timer_interrupt,
13000+ SA_INTERRUPT,
13001+ timer_name[cpu],
13002+ NULL);
13003+ if (irq < 0)
13004+ return irq;
13005+ per_cpu(timer_irq, cpu) = irq;
13006+
13007+ return 0;
13008+}
13009+
13010+void __cpuexit local_teardown_timer(unsigned int cpu)
13011+{
13012+ BUG_ON(cpu == 0);
13013+ unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
13014+}
13015+#endif
13016+
13017+#ifdef CONFIG_CPU_FREQ
13018+static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
13019+ void *data)
13020+{
13021+ struct cpufreq_freqs *freq = data;
13022+ struct xen_platform_op op;
13023+
13024+ if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
13025+ return 0;
13026+
13027+ if (val == CPUFREQ_PRECHANGE)
13028+ return 0;
13029+
13030+ op.cmd = XENPF_change_freq;
13031+ op.u.change_freq.flags = 0;
13032+ op.u.change_freq.cpu = freq->cpu;
13033+ op.u.change_freq.freq = (u64)freq->new * 1000;
13034+ WARN_ON(HYPERVISOR_platform_op(&op));
13035+
13036+ return 0;
13037+}
13038+
13039+static struct notifier_block time_cpufreq_notifier_block = {
13040+ .notifier_call = time_cpufreq_notifier
13041+};
13042+
13043+static int __init cpufreq_time_setup(void)
13044+{
13045+ if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
13046+ CPUFREQ_TRANSITION_NOTIFIER)) {
13047+ printk(KERN_ERR "failed to set up cpufreq notifier\n");
13048+ return -ENODEV;
13049+ }
13050+ return 0;
13051+}
13052+
13053+core_initcall(cpufreq_time_setup);
13054+#endif
13055+
13056+/*
13057+ * /proc/sys/xen: This really belongs in another file. It can stay here for
13058+ * now however.
13059+ */
13060+static ctl_table xen_subtable[] = {
13061+ {
13062+ .ctl_name = 1,
13063+ .procname = "independent_wallclock",
13064+ .data = &independent_wallclock,
13065+ .maxlen = sizeof(independent_wallclock),
13066+ .mode = 0644,
13067+ .proc_handler = proc_dointvec
13068+ },
13069+ {
13070+ .ctl_name = 2,
13071+ .procname = "permitted_clock_jitter",
13072+ .data = &permitted_clock_jitter,
13073+ .maxlen = sizeof(permitted_clock_jitter),
13074+ .mode = 0644,
13075+ .proc_handler = proc_doulongvec_minmax
13076+ },
13077+ { 0 }
13078+};
13079+static ctl_table xen_table[] = {
13080+ {
13081+ .ctl_name = 123,
13082+ .procname = "xen",
13083+ .mode = 0555,
13084+ .child = xen_subtable},
13085+ { 0 }
13086+};
13087+static int __init xen_sysctl_init(void)
13088+{
13089+ (void)register_sysctl_table(xen_table, 0);
13090+ return 0;
13091+}
13092+__initcall(xen_sysctl_init);
13093Index: head-2008-11-25/arch/x86/kernel/traps_32-xen.c
13094===================================================================
13095--- /dev/null 1970-01-01 00:00:00.000000000 +0000
13096+++ head-2008-11-25/arch/x86/kernel/traps_32-xen.c 2008-04-02 12:34:02.000000000 +0200
13097@@ -0,0 +1,1190 @@
13098+/*
13099+ * linux/arch/i386/traps.c
13100+ *
13101+ * Copyright (C) 1991, 1992 Linus Torvalds
13102+ *
13103+ * Pentium III FXSR, SSE support
13104+ * Gareth Hughes <gareth@valinux.com>, May 2000
13105+ */
13106+
13107+/*
13108+ * 'Traps.c' handles hardware traps and faults after we have saved some
13109+ * state in 'asm.s'.
13110+ */
13111+#include <linux/sched.h>
13112+#include <linux/kernel.h>
13113+#include <linux/string.h>
13114+#include <linux/errno.h>
13115+#include <linux/timer.h>
13116+#include <linux/mm.h>
13117+#include <linux/init.h>
13118+#include <linux/delay.h>
13119+#include <linux/spinlock.h>
13120+#include <linux/interrupt.h>
13121+#include <linux/highmem.h>
13122+#include <linux/kallsyms.h>
13123+#include <linux/ptrace.h>
13124+#include <linux/utsname.h>
13125+#include <linux/kprobes.h>
13126+#include <linux/kexec.h>
13127+#include <linux/unwind.h>
13128+
13129+#ifdef CONFIG_EISA
13130+#include <linux/ioport.h>
13131+#include <linux/eisa.h>
13132+#endif
13133+
13134+#ifdef CONFIG_MCA
13135+#include <linux/mca.h>
13136+#endif
13137+
13138+#include <asm/processor.h>
13139+#include <asm/system.h>
13140+#include <asm/uaccess.h>
13141+#include <asm/io.h>
13142+#include <asm/atomic.h>
13143+#include <asm/debugreg.h>
13144+#include <asm/desc.h>
13145+#include <asm/i387.h>
13146+#include <asm/nmi.h>
13147+#include <asm/unwind.h>
13148+#include <asm/smp.h>
13149+#include <asm/arch_hooks.h>
13150+#include <asm/kdebug.h>
13151+
13152+#include <linux/module.h>
13153+
13154+#include "mach_traps.h"
13155+
13156+asmlinkage int system_call(void);
13157+
13158+struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
13159+ { 0, 0 }, { 0, 0 } };
13160+
13161+/* Do we ignore FPU interrupts ? */
13162+char ignore_fpu_irq = 0;
13163+
13164+#ifndef CONFIG_X86_NO_IDT
13165+/*
13166+ * The IDT has to be page-aligned to simplify the Pentium
13167+ * F0 0F bug workaround.. We have a special link segment
13168+ * for this.
13169+ */
13170+struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
13171+#endif
13172+
13173+asmlinkage void divide_error(void);
13174+asmlinkage void debug(void);
13175+asmlinkage void nmi(void);
13176+asmlinkage void int3(void);
13177+asmlinkage void overflow(void);
13178+asmlinkage void bounds(void);
13179+asmlinkage void invalid_op(void);
13180+asmlinkage void device_not_available(void);
13181+asmlinkage void coprocessor_segment_overrun(void);
13182+asmlinkage void invalid_TSS(void);
13183+asmlinkage void segment_not_present(void);
13184+asmlinkage void stack_segment(void);
13185+asmlinkage void general_protection(void);
13186+asmlinkage void page_fault(void);
13187+asmlinkage void coprocessor_error(void);
13188+asmlinkage void simd_coprocessor_error(void);
13189+asmlinkage void alignment_check(void);
13190+#ifndef CONFIG_XEN
13191+asmlinkage void spurious_interrupt_bug(void);
13192+#else
13193+asmlinkage void fixup_4gb_segment(void);
13194+#endif
13195+asmlinkage void machine_check(void);
13196+
13197+static int kstack_depth_to_print = 24;
13198+#ifdef CONFIG_STACK_UNWIND
13199+static int call_trace = 1;
13200+#else
13201+#define call_trace (-1)
13202+#endif
13203+ATOMIC_NOTIFIER_HEAD(i386die_chain);
13204+
13205+int register_die_notifier(struct notifier_block *nb)
13206+{
13207+ vmalloc_sync_all();
13208+ return atomic_notifier_chain_register(&i386die_chain, nb);
13209+}
13210+EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
13211+
13212+int unregister_die_notifier(struct notifier_block *nb)
13213+{
13214+ return atomic_notifier_chain_unregister(&i386die_chain, nb);
13215+}
13216+EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
13217+
13218+static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
13219+{
13220+ return p > (void *)tinfo &&
13221+ p < (void *)tinfo + THREAD_SIZE - 3;
13222+}
13223+
13224+/*
13225+ * Print one address/symbol entries per line.
13226+ */
13227+static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
13228+{
13229+ printk(" [<%08lx>] ", addr);
13230+
13231+ print_symbol("%s\n", addr);
13232+}
13233+
13234+static inline unsigned long print_context_stack(struct thread_info *tinfo,
13235+ unsigned long *stack, unsigned long ebp,
13236+ char *log_lvl)
13237+{
13238+ unsigned long addr;
13239+
13240+#ifdef CONFIG_FRAME_POINTER
13241+ while (valid_stack_ptr(tinfo, (void *)ebp)) {
13242+ addr = *(unsigned long *)(ebp + 4);
13243+ print_addr_and_symbol(addr, log_lvl);
13244+ /*
13245+ * break out of recursive entries (such as
13246+ * end_of_stack_stop_unwind_function):
13247+ */
13248+ if (ebp == *(unsigned long *)ebp)
13249+ break;
13250+ ebp = *(unsigned long *)ebp;
13251+ }
13252+#else
13253+ while (valid_stack_ptr(tinfo, stack)) {
13254+ addr = *stack++;
13255+ if (__kernel_text_address(addr))
13256+ print_addr_and_symbol(addr, log_lvl);
13257+ }
13258+#endif
13259+ return ebp;
13260+}
13261+
13262+static asmlinkage int
13263+show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
13264+{
13265+ int n = 0;
13266+
13267+ while (unwind(info) == 0 && UNW_PC(info)) {
13268+ n++;
13269+ print_addr_and_symbol(UNW_PC(info), log_lvl);
13270+ if (arch_unw_user_mode(info))
13271+ break;
13272+ }
13273+ return n;
13274+}
13275+
13276+static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
13277+ unsigned long *stack, char *log_lvl)
13278+{
13279+ unsigned long ebp;
13280+
13281+ if (!task)
13282+ task = current;
13283+
13284+ if (call_trace >= 0) {
13285+ int unw_ret = 0;
13286+ struct unwind_frame_info info;
13287+
13288+ if (regs) {
13289+ if (unwind_init_frame_info(&info, task, regs) == 0)
13290+ unw_ret = show_trace_unwind(&info, log_lvl);
13291+ } else if (task == current)
13292+ unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
13293+ else {
13294+ if (unwind_init_blocked(&info, task) == 0)
13295+ unw_ret = show_trace_unwind(&info, log_lvl);
13296+ }
13297+ if (unw_ret > 0) {
13298+ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
13299+ print_symbol("DWARF2 unwinder stuck at %s\n",
13300+ UNW_PC(&info));
13301+ if (UNW_SP(&info) >= PAGE_OFFSET) {
13302+ printk("Leftover inexact backtrace:\n");
13303+ stack = (void *)UNW_SP(&info);
13304+ } else
13305+ printk("Full inexact backtrace again:\n");
13306+ } else if (call_trace >= 1)
13307+ return;
13308+ else
13309+ printk("Full inexact backtrace again:\n");
13310+ } else
13311+ printk("Inexact backtrace:\n");
13312+ }
13313+
13314+ if (task == current) {
13315+ /* Grab ebp right from our regs */
13316+ asm ("movl %%ebp, %0" : "=r" (ebp) : );
13317+ } else {
13318+ /* ebp is the last reg pushed by switch_to */
13319+ ebp = *(unsigned long *) task->thread.esp;
13320+ }
13321+
13322+ while (1) {
13323+ struct thread_info *context;
13324+ context = (struct thread_info *)
13325+ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
13326+ ebp = print_context_stack(context, stack, ebp, log_lvl);
13327+ stack = (unsigned long*)context->previous_esp;
13328+ if (!stack)
13329+ break;
13330+ printk("%s =======================\n", log_lvl);
13331+ }
13332+}
13333+
13334+void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
13335+{
13336+ show_trace_log_lvl(task, regs, stack, "");
13337+}
13338+
13339+static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
13340+ unsigned long *esp, char *log_lvl)
13341+{
13342+ unsigned long *stack;
13343+ int i;
13344+
13345+ if (esp == NULL) {
13346+ if (task)
13347+ esp = (unsigned long*)task->thread.esp;
13348+ else
13349+ esp = (unsigned long *)&esp;
13350+ }
13351+
13352+ stack = esp;
13353+ for(i = 0; i < kstack_depth_to_print; i++) {
13354+ if (kstack_end(stack))
13355+ break;
13356+ if (i && ((i % 8) == 0))
13357+ printk("\n%s ", log_lvl);
13358+ printk("%08lx ", *stack++);
13359+ }
13360+ printk("\n%sCall Trace:\n", log_lvl);
13361+ show_trace_log_lvl(task, regs, esp, log_lvl);
13362+}
13363+
13364+void show_stack(struct task_struct *task, unsigned long *esp)
13365+{
13366+ printk(" ");
13367+ show_stack_log_lvl(task, NULL, esp, "");
13368+}
13369+
13370+/*
13371+ * The architecture-independent dump_stack generator
13372+ */
13373+void dump_stack(void)
13374+{
13375+ unsigned long stack;
13376+
13377+ show_trace(current, NULL, &stack);
13378+}
13379+
13380+EXPORT_SYMBOL(dump_stack);
13381+
13382+void show_registers(struct pt_regs *regs)
13383+{
13384+ int i;
13385+ int in_kernel = 1;
13386+ unsigned long esp;
13387+ unsigned short ss;
13388+
13389+ esp = (unsigned long) (&regs->esp);
13390+ savesegment(ss, ss);
13391+ if (user_mode_vm(regs)) {
13392+ in_kernel = 0;
13393+ esp = regs->esp;
13394+ ss = regs->xss & 0xffff;
13395+ }
13396+ print_modules();
13397+ printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
13398+ "EFLAGS: %08lx (%s %.*s) \n",
13399+ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
13400+ print_tainted(), regs->eflags, system_utsname.release,
13401+ (int)strcspn(system_utsname.version, " "),
13402+ system_utsname.version);
13403+ print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
13404+ printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
13405+ regs->eax, regs->ebx, regs->ecx, regs->edx);
13406+ printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
13407+ regs->esi, regs->edi, regs->ebp, esp);
13408+ printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
13409+ regs->xds & 0xffff, regs->xes & 0xffff, ss);
13410+ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
13411+ TASK_COMM_LEN, current->comm, current->pid,
13412+ current_thread_info(), current, current->thread_info);
13413+ /*
13414+ * When in-kernel, we also print out the stack and code at the
13415+ * time of the fault..
13416+ */
13417+ if (in_kernel) {
13418+ u8 __user *eip;
13419+
13420+ printk("\n" KERN_EMERG "Stack: ");
13421+ show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
13422+
13423+ printk(KERN_EMERG "Code: ");
13424+
13425+ eip = (u8 __user *)regs->eip - 43;
13426+ for (i = 0; i < 64; i++, eip++) {
13427+ unsigned char c;
13428+
13429+ if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
13430+ printk(" Bad EIP value.");
13431+ break;
13432+ }
13433+ if (eip == (u8 __user *)regs->eip)
13434+ printk("<%02x> ", c);
13435+ else
13436+ printk("%02x ", c);
13437+ }
13438+ }
13439+ printk("\n");
13440+}
13441+
13442+static void handle_BUG(struct pt_regs *regs)
13443+{
13444+ unsigned long eip = regs->eip;
13445+ unsigned short ud2;
13446+
13447+ if (eip < PAGE_OFFSET)
13448+ return;
13449+ if (__get_user(ud2, (unsigned short __user *)eip))
13450+ return;
13451+ if (ud2 != 0x0b0f)
13452+ return;
13453+
13454+ printk(KERN_EMERG "------------[ cut here ]------------\n");
13455+
13456+#ifdef CONFIG_DEBUG_BUGVERBOSE
13457+ do {
13458+ unsigned short line;
13459+ char *file;
13460+ char c;
13461+
13462+ if (__get_user(line, (unsigned short __user *)(eip + 2)))
13463+ break;
13464+ if (__get_user(file, (char * __user *)(eip + 4)) ||
13465+ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
13466+ file = "<bad filename>";
13467+
13468+ printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
13469+ return;
13470+ } while (0);
13471+#endif
13472+ printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
13473+}
13474+
13475+/* This is gone through when something in the kernel
13476+ * has done something bad and is about to be terminated.
13477+*/
13478+void die(const char * str, struct pt_regs * regs, long err)
13479+{
13480+ static struct {
13481+ spinlock_t lock;
13482+ u32 lock_owner;
13483+ int lock_owner_depth;
13484+ } die = {
13485+ .lock = SPIN_LOCK_UNLOCKED,
13486+ .lock_owner = -1,
13487+ .lock_owner_depth = 0
13488+ };
13489+ static int die_counter;
13490+ unsigned long flags;
13491+
13492+ oops_enter();
13493+
13494+ if (die.lock_owner != raw_smp_processor_id()) {
13495+ console_verbose();
13496+ spin_lock_irqsave(&die.lock, flags);
13497+ die.lock_owner = smp_processor_id();
13498+ die.lock_owner_depth = 0;
13499+ bust_spinlocks(1);
13500+ }
13501+ else
13502+ local_save_flags(flags);
13503+
13504+ if (++die.lock_owner_depth < 3) {
13505+ int nl = 0;
13506+ unsigned long esp;
13507+ unsigned short ss;
13508+
13509+ handle_BUG(regs);
13510+ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
13511+#ifdef CONFIG_PREEMPT
13512+ printk(KERN_EMERG "PREEMPT ");
13513+ nl = 1;
13514+#endif
13515+#ifdef CONFIG_SMP
13516+ if (!nl)
13517+ printk(KERN_EMERG);
13518+ printk("SMP ");
13519+ nl = 1;
13520+#endif
13521+#ifdef CONFIG_DEBUG_PAGEALLOC
13522+ if (!nl)
13523+ printk(KERN_EMERG);
13524+ printk("DEBUG_PAGEALLOC");
13525+ nl = 1;
13526+#endif
13527+ if (nl)
13528+ printk("\n");
13529+ if (notify_die(DIE_OOPS, str, regs, err,
13530+ current->thread.trap_no, SIGSEGV) !=
13531+ NOTIFY_STOP) {
13532+ show_registers(regs);
13533+ /* Executive summary in case the oops scrolled away */
13534+ esp = (unsigned long) (&regs->esp);
13535+ savesegment(ss, ss);
13536+ if (user_mode(regs)) {
13537+ esp = regs->esp;
13538+ ss = regs->xss & 0xffff;
13539+ }
13540+ printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
13541+ print_symbol("%s", regs->eip);
13542+ printk(" SS:ESP %04x:%08lx\n", ss, esp);
13543+ }
13544+ else
13545+ regs = NULL;
13546+ } else
13547+ printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
13548+
13549+ bust_spinlocks(0);
13550+ die.lock_owner = -1;
13551+ spin_unlock_irqrestore(&die.lock, flags);
13552+
13553+ if (!regs)
13554+ return;
13555+
13556+ if (kexec_should_crash(current))
13557+ crash_kexec(regs);
13558+
13559+ if (in_interrupt())
13560+ panic("Fatal exception in interrupt");
13561+
13562+ if (panic_on_oops)
13563+ panic("Fatal exception");
13564+
13565+ oops_exit();
13566+ do_exit(SIGSEGV);
13567+}
13568+
13569+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
13570+{
13571+ if (!user_mode_vm(regs))
13572+ die(str, regs, err);
13573+}
13574+
13575+static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
13576+ struct pt_regs * regs, long error_code,
13577+ siginfo_t *info)
13578+{
13579+ struct task_struct *tsk = current;
13580+ tsk->thread.error_code = error_code;
13581+ tsk->thread.trap_no = trapnr;
13582+
13583+ if (regs->eflags & VM_MASK) {
13584+ if (vm86)
13585+ goto vm86_trap;
13586+ goto trap_signal;
13587+ }
13588+
13589+ if (!user_mode(regs))
13590+ goto kernel_trap;
13591+
13592+ trap_signal: {
13593+ if (info)
13594+ force_sig_info(signr, info, tsk);
13595+ else
13596+ force_sig(signr, tsk);
13597+ return;
13598+ }
13599+
13600+ kernel_trap: {
13601+ if (!fixup_exception(regs))
13602+ die(str, regs, error_code);
13603+ return;
13604+ }
13605+
13606+ vm86_trap: {
13607+ int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
13608+ if (ret) goto trap_signal;
13609+ return;
13610+ }
13611+}
13612+
13613+#define DO_ERROR(trapnr, signr, str, name) \
13614+fastcall void do_##name(struct pt_regs * regs, long error_code) \
13615+{ \
13616+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
13617+ == NOTIFY_STOP) \
13618+ return; \
13619+ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
13620+}
13621+
13622+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
13623+fastcall void do_##name(struct pt_regs * regs, long error_code) \
13624+{ \
13625+ siginfo_t info; \
13626+ info.si_signo = signr; \
13627+ info.si_errno = 0; \
13628+ info.si_code = sicode; \
13629+ info.si_addr = (void __user *)siaddr; \
13630+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
13631+ == NOTIFY_STOP) \
13632+ return; \
13633+ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
13634+}
13635+
13636+#define DO_VM86_ERROR(trapnr, signr, str, name) \
13637+fastcall void do_##name(struct pt_regs * regs, long error_code) \
13638+{ \
13639+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
13640+ == NOTIFY_STOP) \
13641+ return; \
13642+ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
13643+}
13644+
13645+#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
13646+fastcall void do_##name(struct pt_regs * regs, long error_code) \
13647+{ \
13648+ siginfo_t info; \
13649+ info.si_signo = signr; \
13650+ info.si_errno = 0; \
13651+ info.si_code = sicode; \
13652+ info.si_addr = (void __user *)siaddr; \
13653+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
13654+ == NOTIFY_STOP) \
13655+ return; \
13656+ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
13657+}
13658+
13659+DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
13660+#ifndef CONFIG_KPROBES
13661+DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
13662+#endif
13663+DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
13664+DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
13665+DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
13666+DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
13667+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
13668+DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
13669+DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
13670+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
13671+DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
13672+
13673+fastcall void __kprobes do_general_protection(struct pt_regs * regs,
13674+ long error_code)
13675+{
13676+ current->thread.error_code = error_code;
13677+ current->thread.trap_no = 13;
13678+
13679+ if (regs->eflags & VM_MASK)
13680+ goto gp_in_vm86;
13681+
13682+ if (!user_mode(regs))
13683+ goto gp_in_kernel;
13684+
13685+ current->thread.error_code = error_code;
13686+ current->thread.trap_no = 13;
13687+ force_sig(SIGSEGV, current);
13688+ return;
13689+
13690+gp_in_vm86:
13691+ local_irq_enable();
13692+ handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
13693+ return;
13694+
13695+gp_in_kernel:
13696+ if (!fixup_exception(regs)) {
13697+ if (notify_die(DIE_GPF, "general protection fault", regs,
13698+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
13699+ return;
13700+ die("general protection fault", regs, error_code);
13701+ }
13702+}
13703+
13704+static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
13705+{
13706+ printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
13707+ "to continue\n");
13708+ printk(KERN_EMERG "You probably have a hardware problem with your RAM "
13709+ "chips\n");
13710+
13711+ /* Clear and disable the memory parity error line. */
13712+ clear_mem_error(reason);
13713+}
13714+
13715+static void io_check_error(unsigned char reason, struct pt_regs * regs)
13716+{
13717+ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
13718+ show_registers(regs);
13719+
13720+ /* Re-enable the IOCK line, wait for a few seconds */
13721+ clear_io_check_error(reason);
13722+}
13723+
13724+static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
13725+{
13726+#ifdef CONFIG_MCA
13727+ /* Might actually be able to figure out what the guilty party
13728+ * is. */
13729+ if( MCA_bus ) {
13730+ mca_handle_nmi();
13731+ return;
13732+ }
13733+#endif
13734+ printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
13735+ reason, smp_processor_id());
13736+ printk("Dazed and confused, but trying to continue\n");
13737+ printk("Do you have a strange power saving mode enabled?\n");
13738+}
13739+
13740+static DEFINE_SPINLOCK(nmi_print_lock);
13741+
13742+void die_nmi (struct pt_regs *regs, const char *msg)
13743+{
13744+ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
13745+ NOTIFY_STOP)
13746+ return;
13747+
13748+ spin_lock(&nmi_print_lock);
13749+ /*
13750+ * We are in trouble anyway, lets at least try
13751+ * to get a message out.
13752+ */
13753+ bust_spinlocks(1);
13754+ printk(KERN_EMERG "%s", msg);
13755+ printk(" on CPU%d, eip %08lx, registers:\n",
13756+ smp_processor_id(), regs->eip);
13757+ show_registers(regs);
13758+ printk(KERN_EMERG "console shuts up ...\n");
13759+ console_silent();
13760+ spin_unlock(&nmi_print_lock);
13761+ bust_spinlocks(0);
13762+
13763+ /* If we are in kernel we are probably nested up pretty bad
13764+ * and might aswell get out now while we still can.
13765+ */
13766+ if (!user_mode_vm(regs)) {
13767+ current->thread.trap_no = 2;
13768+ crash_kexec(regs);
13769+ }
13770+
13771+ do_exit(SIGSEGV);
13772+}
13773+
13774+static void default_do_nmi(struct pt_regs * regs)
13775+{
13776+ unsigned char reason = 0;
13777+
13778+ /* Only the BSP gets external NMIs from the system. */
13779+ if (!smp_processor_id())
13780+ reason = get_nmi_reason();
13781+
13782+ if (!(reason & 0xc0)) {
13783+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
13784+ == NOTIFY_STOP)
13785+ return;
13786+#ifdef CONFIG_X86_LOCAL_APIC
13787+ /*
13788+ * Ok, so this is none of the documented NMI sources,
13789+ * so it must be the NMI watchdog.
13790+ */
13791+ if (nmi_watchdog) {
13792+ nmi_watchdog_tick(regs);
13793+ return;
13794+ }
13795+#endif
13796+ unknown_nmi_error(reason, regs);
13797+ return;
13798+ }
13799+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
13800+ return;
13801+ if (reason & 0x80)
13802+ mem_parity_error(reason, regs);
13803+ if (reason & 0x40)
13804+ io_check_error(reason, regs);
13805+ /*
13806+ * Reassert NMI in case it became active meanwhile
13807+ * as it's edge-triggered.
13808+ */
13809+ reassert_nmi();
13810+}
13811+
13812+static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
13813+{
13814+ return 0;
13815+}
13816+
13817+static nmi_callback_t nmi_callback = dummy_nmi_callback;
13818+
13819+fastcall void do_nmi(struct pt_regs * regs, long error_code)
13820+{
13821+ int cpu;
13822+
13823+ nmi_enter();
13824+
13825+ cpu = smp_processor_id();
13826+
13827+ ++nmi_count(cpu);
13828+
13829+ if (!rcu_dereference(nmi_callback)(regs, cpu))
13830+ default_do_nmi(regs);
13831+
13832+ nmi_exit();
13833+}
13834+
13835+void set_nmi_callback(nmi_callback_t callback)
13836+{
13837+ vmalloc_sync_all();
13838+ rcu_assign_pointer(nmi_callback, callback);
13839+}
13840+EXPORT_SYMBOL_GPL(set_nmi_callback);
13841+
13842+void unset_nmi_callback(void)
13843+{
13844+ nmi_callback = dummy_nmi_callback;
13845+}
13846+EXPORT_SYMBOL_GPL(unset_nmi_callback);
13847+
13848+#ifdef CONFIG_KPROBES
13849+fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
13850+{
13851+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
13852+ == NOTIFY_STOP)
13853+ return;
13854+ /* This is an interrupt gate, because kprobes wants interrupts
13855+ disabled. Normal trap handlers don't. */
13856+ restore_interrupts(regs);
13857+ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
13858+}
13859+#endif
13860+
13861+/*
13862+ * Our handling of the processor debug registers is non-trivial.
13863+ * We do not clear them on entry and exit from the kernel. Therefore
13864+ * it is possible to get a watchpoint trap here from inside the kernel.
13865+ * However, the code in ./ptrace.c has ensured that the user can
13866+ * only set watchpoints on userspace addresses. Therefore the in-kernel
13867+ * watchpoint trap can only occur in code which is reading/writing
13868+ * from user space. Such code must not hold kernel locks (since it
13869+ * can equally take a page fault), therefore it is safe to call
13870+ * force_sig_info even though that claims and releases locks.
13871+ *
13872+ * Code in ./signal.c ensures that the debug control register
13873+ * is restored before we deliver any signal, and therefore that
13874+ * user code runs with the correct debug control register even though
13875+ * we clear it here.
13876+ *
13877+ * Being careful here means that we don't have to be as careful in a
13878+ * lot of more complicated places (task switching can be a bit lazy
13879+ * about restoring all the debug state, and ptrace doesn't have to
13880+ * find every occurrence of the TF bit that could be saved away even
13881+ * by user code)
13882+ */
13883+fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
13884+{
13885+ unsigned int condition;
13886+ struct task_struct *tsk = current;
13887+
13888+ get_debugreg(condition, 6);
13889+
13890+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
13891+ SIGTRAP) == NOTIFY_STOP)
13892+ return;
13893+ /* It's safe to allow irq's after DR6 has been saved */
13894+ if (regs->eflags & X86_EFLAGS_IF)
13895+ local_irq_enable();
13896+
13897+ /* Mask out spurious debug traps due to lazy DR7 setting */
13898+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
13899+ if (!tsk->thread.debugreg[7])
13900+ goto clear_dr7;
13901+ }
13902+
13903+ if (regs->eflags & VM_MASK)
13904+ goto debug_vm86;
13905+
13906+ /* Save debug status register where ptrace can see it */
13907+ tsk->thread.debugreg[6] = condition;
13908+
13909+ /*
13910+ * Single-stepping through TF: make sure we ignore any events in
13911+ * kernel space (but re-enable TF when returning to user mode).
13912+ */
13913+ if (condition & DR_STEP) {
13914+ /*
13915+ * We already checked v86 mode above, so we can
13916+ * check for kernel mode by just checking the CPL
13917+ * of CS.
13918+ */
13919+ if (!user_mode(regs))
13920+ goto clear_TF_reenable;
13921+ }
13922+
13923+ /* Ok, finally something we can handle */
13924+ send_sigtrap(tsk, regs, error_code);
13925+
13926+ /* Disable additional traps. They'll be re-enabled when
13927+ * the signal is delivered.
13928+ */
13929+clear_dr7:
13930+ set_debugreg(0, 7);
13931+ return;
13932+
13933+debug_vm86:
13934+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
13935+ return;
13936+
13937+clear_TF_reenable:
13938+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
13939+ regs->eflags &= ~TF_MASK;
13940+ return;
13941+}
13942+
13943+/*
13944+ * Note that we play around with the 'TS' bit in an attempt to get
13945+ * the correct behaviour even in the presence of the asynchronous
13946+ * IRQ13 behaviour
13947+ */
13948+void math_error(void __user *eip)
13949+{
13950+ struct task_struct * task;
13951+ siginfo_t info;
13952+ unsigned short cwd, swd;
13953+
13954+ /*
13955+ * Save the info for the exception handler and clear the error.
13956+ */
13957+ task = current;
13958+ save_init_fpu(task);
13959+ task->thread.trap_no = 16;
13960+ task->thread.error_code = 0;
13961+ info.si_signo = SIGFPE;
13962+ info.si_errno = 0;
13963+ info.si_code = __SI_FAULT;
13964+ info.si_addr = eip;
13965+ /*
13966+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
13967+ * status. 0x3f is the exception bits in these regs, 0x200 is the
13968+ * C1 reg you need in case of a stack fault, 0x040 is the stack
13969+ * fault bit. We should only be taking one exception at a time,
13970+ * so if this combination doesn't produce any single exception,
13971+ * then we have a bad program that isn't syncronizing its FPU usage
13972+ * and it will suffer the consequences since we won't be able to
13973+ * fully reproduce the context of the exception
13974+ */
13975+ cwd = get_fpu_cwd(task);
13976+ swd = get_fpu_swd(task);
13977+ switch (swd & ~cwd & 0x3f) {
13978+ case 0x000: /* No unmasked exception */
13979+ return;
13980+ default: /* Multiple exceptions */
13981+ break;
13982+ case 0x001: /* Invalid Op */
13983+ /*
13984+ * swd & 0x240 == 0x040: Stack Underflow
13985+ * swd & 0x240 == 0x240: Stack Overflow
13986+ * User must clear the SF bit (0x40) if set
13987+ */
13988+ info.si_code = FPE_FLTINV;
13989+ break;
13990+ case 0x002: /* Denormalize */
13991+ case 0x010: /* Underflow */
13992+ info.si_code = FPE_FLTUND;
13993+ break;
13994+ case 0x004: /* Zero Divide */
13995+ info.si_code = FPE_FLTDIV;
13996+ break;
13997+ case 0x008: /* Overflow */
13998+ info.si_code = FPE_FLTOVF;
13999+ break;
14000+ case 0x020: /* Precision */
14001+ info.si_code = FPE_FLTRES;
14002+ break;
14003+ }
14004+ force_sig_info(SIGFPE, &info, task);
14005+}
14006+
14007+fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
14008+{
14009+ ignore_fpu_irq = 1;
14010+ math_error((void __user *)regs->eip);
14011+}
14012+
14013+static void simd_math_error(void __user *eip)
14014+{
14015+ struct task_struct * task;
14016+ siginfo_t info;
14017+ unsigned short mxcsr;
14018+
14019+ /*
14020+ * Save the info for the exception handler and clear the error.
14021+ */
14022+ task = current;
14023+ save_init_fpu(task);
14024+ task->thread.trap_no = 19;
14025+ task->thread.error_code = 0;
14026+ info.si_signo = SIGFPE;
14027+ info.si_errno = 0;
14028+ info.si_code = __SI_FAULT;
14029+ info.si_addr = eip;
14030+ /*
14031+ * The SIMD FPU exceptions are handled a little differently, as there
14032+ * is only a single status/control register. Thus, to determine which
14033+ * unmasked exception was caught we must mask the exception mask bits
14034+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
14035+ */
14036+ mxcsr = get_fpu_mxcsr(task);
14037+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
14038+ case 0x000:
14039+ default:
14040+ break;
14041+ case 0x001: /* Invalid Op */
14042+ info.si_code = FPE_FLTINV;
14043+ break;
14044+ case 0x002: /* Denormalize */
14045+ case 0x010: /* Underflow */
14046+ info.si_code = FPE_FLTUND;
14047+ break;
14048+ case 0x004: /* Zero Divide */
14049+ info.si_code = FPE_FLTDIV;
14050+ break;
14051+ case 0x008: /* Overflow */
14052+ info.si_code = FPE_FLTOVF;
14053+ break;
14054+ case 0x020: /* Precision */
14055+ info.si_code = FPE_FLTRES;
14056+ break;
14057+ }
14058+ force_sig_info(SIGFPE, &info, task);
14059+}
14060+
14061+fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
14062+ long error_code)
14063+{
14064+ if (cpu_has_xmm) {
14065+ /* Handle SIMD FPU exceptions on PIII+ processors. */
14066+ ignore_fpu_irq = 1;
14067+ simd_math_error((void __user *)regs->eip);
14068+ } else {
14069+ /*
14070+ * Handle strange cache flush from user space exception
14071+ * in all other cases. This is undocumented behaviour.
14072+ */
14073+ if (regs->eflags & VM_MASK) {
14074+ handle_vm86_fault((struct kernel_vm86_regs *)regs,
14075+ error_code);
14076+ return;
14077+ }
14078+ current->thread.trap_no = 19;
14079+ current->thread.error_code = error_code;
14080+ die_if_kernel("cache flush denied", regs, error_code);
14081+ force_sig(SIGSEGV, current);
14082+ }
14083+}
14084+
14085+#ifndef CONFIG_XEN
14086+fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
14087+ long error_code)
14088+{
14089+#if 0
14090+ /* No need to warn about this any longer. */
14091+ printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
14092+#endif
14093+}
14094+
14095+fastcall void setup_x86_bogus_stack(unsigned char * stk)
14096+{
14097+ unsigned long *switch16_ptr, *switch32_ptr;
14098+ struct pt_regs *regs;
14099+ unsigned long stack_top, stack_bot;
14100+ unsigned short iret_frame16_off;
14101+ int cpu = smp_processor_id();
14102+ /* reserve the space on 32bit stack for the magic switch16 pointer */
14103+ memmove(stk, stk + 8, sizeof(struct pt_regs));
14104+ switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
14105+ regs = (struct pt_regs *)stk;
14106+ /* now the switch32 on 16bit stack */
14107+ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
14108+ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
14109+ switch32_ptr = (unsigned long *)(stack_top - 8);
14110+ iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
14111+ /* copy iret frame on 16bit stack */
14112+ memcpy((void *)(stack_bot + iret_frame16_off), &regs->eip, 20);
14113+ /* fill in the switch pointers */
14114+ switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
14115+ switch16_ptr[1] = __ESPFIX_SS;
14116+ switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
14117+ 8 - CPU_16BIT_STACK_SIZE;
14118+ switch32_ptr[1] = __KERNEL_DS;
14119+}
14120+
14121+fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
14122+{
14123+ unsigned long *switch32_ptr;
14124+ unsigned char *stack16, *stack32;
14125+ unsigned long stack_top, stack_bot;
14126+ int len;
14127+ int cpu = smp_processor_id();
14128+ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
14129+ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
14130+ switch32_ptr = (unsigned long *)(stack_top - 8);
14131+ /* copy the data from 16bit stack to 32bit stack */
14132+ len = CPU_16BIT_STACK_SIZE - 8 - sp;
14133+ stack16 = (unsigned char *)(stack_bot + sp);
14134+ stack32 = (unsigned char *)
14135+ (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
14136+ memcpy(stack32, stack16, len);
14137+ return stack32;
14138+}
14139+#endif
14140+
14141+/*
14142+ * 'math_state_restore()' saves the current math information in the
14143+ * old math state array, and gets the new ones from the current task
14144+ *
14145+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
14146+ * Don't touch unless you *really* know how it works.
14147+ *
14148+ * Must be called with kernel preemption disabled (in this case,
14149+ * local interrupts are disabled at the call-site in entry.S).
14150+ */
14151+asmlinkage void math_state_restore(struct pt_regs regs)
14152+{
14153+ struct thread_info *thread = current_thread_info();
14154+ struct task_struct *tsk = thread->task;
14155+
14156+ /* NB. 'clts' is done for us by Xen during virtual trap. */
14157+ if (!tsk_used_math(tsk))
14158+ init_fpu(tsk);
14159+ restore_fpu(tsk);
14160+ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
14161+}
14162+
14163+#ifndef CONFIG_MATH_EMULATION
14164+
14165+asmlinkage void math_emulate(long arg)
14166+{
14167+ printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
14168+ printk(KERN_EMERG "killing %s.\n",current->comm);
14169+ force_sig(SIGFPE,current);
14170+ schedule();
14171+}
14172+
14173+#endif /* CONFIG_MATH_EMULATION */
14174+
14175+#ifdef CONFIG_X86_F00F_BUG
14176+void __init trap_init_f00f_bug(void)
14177+{
14178+ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
14179+
14180+ /*
14181+ * Update the IDT descriptor and reload the IDT so that
14182+ * it uses the read-only mapped virtual address.
14183+ */
14184+ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14185+ load_idt(&idt_descr);
14186+}
14187+#endif
14188+
14189+
14190+/*
14191+ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
14192+ * for those that specify <dpl>|4 in the second field.
14193+ */
14194+static trap_info_t __cpuinitdata trap_table[] = {
14195+ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
14196+ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
14197+ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
14198+ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
14199+ { 5, 0, __KERNEL_CS, (unsigned long)bounds },
14200+ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
14201+ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
14202+ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
14203+ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
14204+ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
14205+ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
14206+ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
14207+ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
14208+ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
14209+ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
14210+ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
14211+#ifdef CONFIG_X86_MCE
14212+ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
14213+#endif
14214+ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
14215+ { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
14216+ { 0, 0, 0, 0 }
14217+};
14218+
14219+void __init trap_init(void)
14220+{
14221+ int ret;
14222+
14223+ ret = HYPERVISOR_set_trap_table(trap_table);
14224+ if (ret)
14225+ printk("HYPERVISOR_set_trap_table failed: error %d\n", ret);
14226+
14227+ if (cpu_has_fxsr) {
14228+ /*
14229+ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
14230+ * Generates a compile-time "error: zero width for bit-field" if
14231+ * the alignment is wrong.
14232+ */
14233+ struct fxsrAlignAssert {
14234+ int _:!(offsetof(struct task_struct,
14235+ thread.i387.fxsave) & 15);
14236+ };
14237+
14238+ printk(KERN_INFO "Enabling fast FPU save and restore... ");
14239+ set_in_cr4(X86_CR4_OSFXSR);
14240+ printk("done.\n");
14241+ }
14242+ if (cpu_has_xmm) {
14243+ printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
14244+ "support... ");
14245+ set_in_cr4(X86_CR4_OSXMMEXCPT);
14246+ printk("done.\n");
14247+ }
14248+
14249+ /*
14250+ * Should be a barrier for any external CPU state.
14251+ */
14252+ cpu_init();
14253+}
14254+
14255+void __cpuinit smp_trap_init(trap_info_t *trap_ctxt)
14256+{
14257+ const trap_info_t *t = trap_table;
14258+
14259+ for (t = trap_table; t->address; t++) {
14260+ trap_ctxt[t->vector].flags = t->flags;
14261+ trap_ctxt[t->vector].cs = t->cs;
14262+ trap_ctxt[t->vector].address = t->address;
14263+ }
14264+}
14265+
14266+static int __init kstack_setup(char *s)
14267+{
14268+ kstack_depth_to_print = simple_strtoul(s, NULL, 0);
14269+ return 1;
14270+}
14271+__setup("kstack=", kstack_setup);
14272+
14273+#ifdef CONFIG_STACK_UNWIND
14274+static int __init call_trace_setup(char *s)
14275+{
14276+ if (strcmp(s, "old") == 0)
14277+ call_trace = -1;
14278+ else if (strcmp(s, "both") == 0)
14279+ call_trace = 0;
14280+ else if (strcmp(s, "newfallback") == 0)
14281+ call_trace = 1;
14282+ else if (strcmp(s, "new") == 2)
14283+ call_trace = 2;
14284+ return 1;
14285+}
14286+__setup("call_trace=", call_trace_setup);
14287+#endif
14288Index: head-2008-11-25/arch/x86/mach-xen/Makefile
14289===================================================================
14290--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14291+++ head-2008-11-25/arch/x86/mach-xen/Makefile 2007-06-12 13:12:48.000000000 +0200
14292@@ -0,0 +1,5 @@
14293+#
14294+# Makefile for the linux kernel.
14295+#
14296+
14297+obj-y := setup.o
14298Index: head-2008-11-25/arch/x86/mach-xen/setup.c
14299===================================================================
14300--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14301+++ head-2008-11-25/arch/x86/mach-xen/setup.c 2008-04-02 12:34:02.000000000 +0200
14302@@ -0,0 +1,158 @@
14303+/*
14304+ * Machine specific setup for generic
14305+ */
14306+
14307+#include <linux/mm.h>
14308+#include <linux/smp.h>
14309+#include <linux/init.h>
14310+#include <linux/interrupt.h>
14311+#include <linux/module.h>
14312+#include <asm/acpi.h>
14313+#include <asm/arch_hooks.h>
14314+#include <asm/e820.h>
14315+#include <asm/setup.h>
14316+#include <asm/fixmap.h>
14317+
14318+#include <xen/interface/callback.h>
14319+#include <xen/interface/memory.h>
14320+
14321+#ifdef CONFIG_HOTPLUG_CPU
14322+#define DEFAULT_SEND_IPI (1)
14323+#else
14324+#define DEFAULT_SEND_IPI (0)
14325+#endif
14326+
14327+int no_broadcast=DEFAULT_SEND_IPI;
14328+
14329+static __init int no_ipi_broadcast(char *str)
14330+{
14331+ get_option(&str, &no_broadcast);
14332+ printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
14333+ "IPI Broadcast");
14334+ return 1;
14335+}
14336+
14337+__setup("no_ipi_broadcast", no_ipi_broadcast);
14338+
14339+static int __init print_ipi_mode(void)
14340+{
14341+ printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
14342+ "Shortcut");
14343+ return 0;
14344+}
14345+
14346+late_initcall(print_ipi_mode);
14347+
14348+/**
14349+ * machine_specific_memory_setup - Hook for machine specific memory setup.
14350+ *
14351+ * Description:
14352+ * This is included late in kernel/setup.c so that it can make
14353+ * use of all of the static functions.
14354+ **/
14355+
14356+char * __init machine_specific_memory_setup(void)
14357+{
14358+ int rc;
14359+ struct xen_memory_map memmap;
14360+ /*
14361+ * This is rather large for a stack variable but this early in
14362+ * the boot process we know we have plenty slack space.
14363+ */
14364+ struct e820entry map[E820MAX];
14365+
14366+ memmap.nr_entries = E820MAX;
14367+ set_xen_guest_handle(memmap.buffer, map);
14368+
14369+ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
14370+ if ( rc == -ENOSYS ) {
14371+ memmap.nr_entries = 1;
14372+ map[0].addr = 0ULL;
14373+ map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
14374+ /* 8MB slack (to balance backend allocations). */
14375+ map[0].size += 8ULL << 20;
14376+ map[0].type = E820_RAM;
14377+ rc = 0;
14378+ }
14379+ BUG_ON(rc);
14380+
14381+ sanitize_e820_map(map, (char *)&memmap.nr_entries);
14382+
14383+ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
14384+
14385+ return "Xen";
14386+}
14387+
14388+
14389+extern void hypervisor_callback(void);
14390+extern void failsafe_callback(void);
14391+extern void nmi(void);
14392+
14393+unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
14394+EXPORT_SYMBOL(machine_to_phys_mapping);
14395+unsigned int machine_to_phys_order;
14396+EXPORT_SYMBOL(machine_to_phys_order);
14397+
14398+void __init pre_setup_arch_hook(void)
14399+{
14400+ struct xen_machphys_mapping mapping;
14401+ unsigned long machine_to_phys_nr_ents;
14402+ struct xen_platform_parameters pp;
14403+
14404+ init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;
14405+
14406+ setup_xen_features();
14407+
14408+ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
14409+ set_fixaddr_top(pp.virt_start);
14410+
14411+ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
14412+ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
14413+ machine_to_phys_nr_ents = mapping.max_mfn + 1;
14414+ } else
14415+ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
14416+ machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
14417+
14418+ if (!xen_feature(XENFEAT_auto_translated_physmap))
14419+ phys_to_machine_mapping =
14420+ (unsigned long *)xen_start_info->mfn_list;
14421+}
14422+
14423+void __init machine_specific_arch_setup(void)
14424+{
14425+ int ret;
14426+ static struct callback_register __initdata event = {
14427+ .type = CALLBACKTYPE_event,
14428+ .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
14429+ };
14430+ static struct callback_register __initdata failsafe = {
14431+ .type = CALLBACKTYPE_failsafe,
14432+ .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
14433+ };
14434+ static struct callback_register __initdata nmi_cb = {
14435+ .type = CALLBACKTYPE_nmi,
14436+ .address = { __KERNEL_CS, (unsigned long)nmi },
14437+ };
14438+
14439+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
14440+ if (ret == 0)
14441+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
14442+#if CONFIG_XEN_COMPAT <= 0x030002
14443+ if (ret == -ENOSYS)
14444+ ret = HYPERVISOR_set_callbacks(
14445+ event.address.cs, event.address.eip,
14446+ failsafe.address.cs, failsafe.address.eip);
14447+#endif
14448+ BUG_ON(ret);
14449+
14450+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
14451+#if CONFIG_XEN_COMPAT <= 0x030002
14452+ if (ret == -ENOSYS) {
14453+ static struct xennmi_callback __initdata cb = {
14454+ .handler_address = (unsigned long)nmi
14455+ };
14456+
14457+ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
14458+ }
14459+#endif
14460+}
14461Index: head-2008-11-25/arch/x86/lib/scrub.c
14462===================================================================
14463--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14464+++ head-2008-11-25/arch/x86/lib/scrub.c 2008-02-08 12:30:51.000000000 +0100
14465@@ -0,0 +1,21 @@
14466+#include <asm/cpufeature.h>
14467+#include <asm/page.h>
14468+#include <asm/processor.h>
14469+
14470+void scrub_pages(void *v, unsigned int count)
14471+{
14472+ if (likely(cpu_has_xmm2)) {
14473+ unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4);
14474+
14475+ for (; n--; v += sizeof(long) * 4)
14476+ asm("movnti %1,(%0)\n\t"
14477+ "movnti %1,%c2(%0)\n\t"
14478+ "movnti %1,2*%c2(%0)\n\t"
14479+ "movnti %1,3*%c2(%0)\n\t"
14480+ : : "r" (v), "r" (0L), "i" (sizeof(long))
14481+ : "memory");
14482+ asm volatile("sfence" : : : "memory");
14483+ } else
14484+ for (; count--; v += PAGE_SIZE)
14485+ clear_page(v);
14486+}
14487Index: head-2008-11-25/arch/x86/mm/fault_32-xen.c
14488===================================================================
14489--- /dev/null 1970-01-01 00:00:00.000000000 +0000
14490+++ head-2008-11-25/arch/x86/mm/fault_32-xen.c 2007-12-10 08:47:31.000000000 +0100
14491@@ -0,0 +1,779 @@
14492+/*
14493+ * linux/arch/i386/mm/fault.c
14494+ *
14495+ * Copyright (C) 1995 Linus Torvalds
14496+ */
14497+
14498+#include <linux/signal.h>
14499+#include <linux/sched.h>
14500+#include <linux/kernel.h>
14501+#include <linux/errno.h>
14502+#include <linux/string.h>
14503+#include <linux/types.h>
14504+#include <linux/ptrace.h>
14505+#include <linux/mman.h>
14506+#include <linux/mm.h>
14507+#include <linux/smp.h>
14508+#include <linux/smp_lock.h>
14509+#include <linux/interrupt.h>
14510+#include <linux/init.h>
14511+#include <linux/tty.h>
14512+#include <linux/vt_kern.h> /* For unblank_screen() */
14513+#include <linux/highmem.h>
14514+#include <linux/module.h>
14515+#include <linux/kprobes.h>
14516+
14517+#include <asm/system.h>
14518+#include <asm/uaccess.h>
14519+#include <asm/desc.h>
14520+#include <asm/kdebug.h>
14521+
14522+extern void die(const char *,struct pt_regs *,long);
14523+
14524+#ifdef CONFIG_KPROBES
14525+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
14526+int register_page_fault_notifier(struct notifier_block *nb)
14527+{
14528+ vmalloc_sync_all();
14529+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
14530+}
14531+
14532+int unregister_page_fault_notifier(struct notifier_block *nb)
14533+{
14534+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
14535+}
14536+
14537+static inline int notify_page_fault(enum die_val val, const char *str,
14538+ struct pt_regs *regs, long err, int trap, int sig)
14539+{
14540+ struct die_args args = {
14541+ .regs = regs,
14542+ .str = str,
14543+ .err = err,
14544+ .trapnr = trap,
14545+ .signr = sig
14546+ };
14547+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
14548+}
14549+#else
14550+static inline int notify_page_fault(enum die_val val, const char *str,
14551+ struct pt_regs *regs, long err, int trap, int sig)
14552+{
14553+ return NOTIFY_DONE;
14554+}
14555+#endif
14556+
14557+
14558+/*
14559+ * Unlock any spinlocks which will prevent us from getting the
14560+ * message out
14561+ */
14562+void bust_spinlocks(int yes)
14563+{
14564+ int loglevel_save = console_loglevel;
14565+
14566+ if (yes) {
14567+ oops_in_progress = 1;
14568+ return;
14569+ }
14570+#ifdef CONFIG_VT
14571+ unblank_screen();
14572+#endif
14573+ oops_in_progress = 0;
14574+ /*
14575+ * OK, the message is on the console. Now we call printk()
14576+ * without oops_in_progress set so that printk will give klogd
14577+ * a poke. Hold onto your hats...
14578+ */
14579+ console_loglevel = 15; /* NMI oopser may have shut the console up */
14580+ printk(" ");
14581+ console_loglevel = loglevel_save;
14582+}
14583+
14584+/*
14585+ * Return EIP plus the CS segment base. The segment limit is also
14586+ * adjusted, clamped to the kernel/user address space (whichever is
14587+ * appropriate), and returned in *eip_limit.
14588+ *
14589+ * The segment is checked, because it might have been changed by another
14590+ * task between the original faulting instruction and here.
14591+ *
14592+ * If CS is no longer a valid code segment, or if EIP is beyond the
14593+ * limit, or if it is a kernel address when CS is not a kernel segment,
14594+ * then the returned value will be greater than *eip_limit.
14595+ *
14596+ * This is slow, but is very rarely executed.
14597+ */
14598+static inline unsigned long get_segment_eip(struct pt_regs *regs,
14599+ unsigned long *eip_limit)
14600+{
14601+ unsigned long eip = regs->eip;
14602+ unsigned seg = regs->xcs & 0xffff;
14603+ u32 seg_ar, seg_limit, base, *desc;
14604+
14605+ /* Unlikely, but must come before segment checks. */
14606+ if (unlikely(regs->eflags & VM_MASK)) {
14607+ base = seg << 4;
14608+ *eip_limit = base + 0xffff;
14609+ return base + (eip & 0xffff);
14610+ }
14611+
14612+ /* The standard kernel/user address space limit. */
14613+ *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
14614+
14615+ /* By far the most common cases. */
14616+ if (likely(seg == __USER_CS || seg == GET_KERNEL_CS()))
14617+ return eip;
14618+
14619+ /* Check the segment exists, is within the current LDT/GDT size,
14620+ that kernel/user (ring 0..3) has the appropriate privilege,
14621+ that it's a code segment, and get the limit. */
14622+ __asm__ ("larl %3,%0; lsll %3,%1"
14623+ : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
14624+ if ((~seg_ar & 0x9800) || eip > seg_limit) {
14625+ *eip_limit = 0;
14626+ return 1; /* So that returned eip > *eip_limit. */
14627+ }
14628+
14629+ /* Get the GDT/LDT descriptor base.
14630+ When you look for races in this code remember that
14631+ LDT and other horrors are only used in user space. */
14632+ if (seg & (1<<2)) {
14633+ /* Must lock the LDT while reading it. */
14634+ down(&current->mm->context.sem);
14635+ desc = current->mm->context.ldt;
14636+ desc = (void *)desc + (seg & ~7);
14637+ } else {
14638+ /* Must disable preemption while reading the GDT. */
14639+ desc = (u32 *)get_cpu_gdt_table(get_cpu());
14640+ desc = (void *)desc + (seg & ~7);
14641+ }
14642+
14643+ /* Decode the code segment base from the descriptor */
14644+ base = get_desc_base((unsigned long *)desc);
14645+
14646+ if (seg & (1<<2)) {
14647+ up(&current->mm->context.sem);
14648+ } else
14649+ put_cpu();
14650+
14651+ /* Adjust EIP and segment limit, and clamp at the kernel limit.
14652+ It's legitimate for segments to wrap at 0xffffffff. */
14653+ seg_limit += base;
14654+ if (seg_limit < *eip_limit && seg_limit >= base)
14655+ *eip_limit = seg_limit;
14656+ return eip + base;
14657+}
14658+
14659+/*
14660+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
14661+ * Check that here and ignore it.
14662+ */
14663+static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
14664+{
14665+ unsigned long limit;
14666+ unsigned long instr = get_segment_eip (regs, &limit);
14667+ int scan_more = 1;
14668+ int prefetch = 0;
14669+ int i;
14670+
14671+ for (i = 0; scan_more && i < 15; i++) {
14672+ unsigned char opcode;
14673+ unsigned char instr_hi;
14674+ unsigned char instr_lo;
14675+
14676+ if (instr > limit)
14677+ break;
14678+ if (__get_user(opcode, (unsigned char __user *) instr))
14679+ break;
14680+
14681+ instr_hi = opcode & 0xf0;
14682+ instr_lo = opcode & 0x0f;
14683+ instr++;
14684+
14685+ switch (instr_hi) {
14686+ case 0x20:
14687+ case 0x30:
14688+ /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
14689+ scan_more = ((instr_lo & 7) == 0x6);
14690+ break;
14691+
14692+ case 0x60:
14693+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
14694+ scan_more = (instr_lo & 0xC) == 0x4;
14695+ break;
14696+ case 0xF0:
14697+ /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
14698+ scan_more = !instr_lo || (instr_lo>>1) == 1;
14699+ break;
14700+ case 0x00:
14701+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
14702+ scan_more = 0;
14703+ if (instr > limit)
14704+ break;
14705+ if (__get_user(opcode, (unsigned char __user *) instr))
14706+ break;
14707+ prefetch = (instr_lo == 0xF) &&
14708+ (opcode == 0x0D || opcode == 0x18);
14709+ break;
14710+ default:
14711+ scan_more = 0;
14712+ break;
14713+ }
14714+ }
14715+ return prefetch;
14716+}
14717+
14718+static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
14719+ unsigned long error_code)
14720+{
14721+ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
14722+ boot_cpu_data.x86 >= 6)) {
14723+ /* Catch an obscure case of prefetch inside an NX page. */
14724+ if (nx_enabled && (error_code & 16))
14725+ return 0;
14726+ return __is_prefetch(regs, addr);
14727+ }
14728+ return 0;
14729+}
14730+
14731+static noinline void force_sig_info_fault(int si_signo, int si_code,
14732+ unsigned long address, struct task_struct *tsk)
14733+{
14734+ siginfo_t info;
14735+
14736+ info.si_signo = si_signo;
14737+ info.si_errno = 0;
14738+ info.si_code = si_code;
14739+ info.si_addr = (void __user *)address;
14740+ force_sig_info(si_signo, &info, tsk);
14741+}
14742+
14743+fastcall void do_invalid_op(struct pt_regs *, unsigned long);
14744+
14745+#ifdef CONFIG_X86_PAE
14746+static void dump_fault_path(unsigned long address)
14747+{
14748+ unsigned long *p, page;
14749+ unsigned long mfn;
14750+
14751+ page = read_cr3();
14752+ p = (unsigned long *)__va(page);
14753+ p += (address >> 30) * 2;
14754+ printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
14755+ if (p[0] & _PAGE_PRESENT) {
14756+ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
14757+ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
14758+ p = (unsigned long *)__va(page);
14759+ address &= 0x3fffffff;
14760+ p += (address >> 21) * 2;
14761+ printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
14762+ page, p[1], p[0]);
14763+ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
14764+#ifdef CONFIG_HIGHPTE
14765+ if (mfn_to_pfn(mfn) >= highstart_pfn)
14766+ return;
14767+#endif
14768+ if (p[0] & _PAGE_PRESENT) {
14769+ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
14770+ p = (unsigned long *) __va(page);
14771+ address &= 0x001fffff;
14772+ p += (address >> 12) * 2;
14773+ printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
14774+ page, p[1], p[0]);
14775+ }
14776+ }
14777+}
14778+#else
14779+static void dump_fault_path(unsigned long address)
14780+{
14781+ unsigned long page;
14782+
14783+ page = read_cr3();
14784+ page = ((unsigned long *) __va(page))[address >> 22];
14785+ if (oops_may_print())
14786+ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
14787+ machine_to_phys(page));
14788+ /*
14789+ * We must not directly access the pte in the highpte
14790+ * case if the page table is located in highmem.
14791+ * And lets rather not kmap-atomic the pte, just in case
14792+ * it's allocated already.
14793+ */
14794+#ifdef CONFIG_HIGHPTE
14795+ if ((page >> PAGE_SHIFT) >= highstart_pfn)
14796+ return;
14797+#endif
14798+ if ((page & 1) && oops_may_print()) {
14799+ page &= PAGE_MASK;
14800+ address &= 0x003ff000;
14801+ page = machine_to_phys(page);
14802+ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
14803+ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
14804+ machine_to_phys(page));
14805+ }
14806+}
14807+#endif
14808+
14809+static int spurious_fault(struct pt_regs *regs,
14810+ unsigned long address,
14811+ unsigned long error_code)
14812+{
14813+ pgd_t *pgd;
14814+ pud_t *pud;
14815+ pmd_t *pmd;
14816+ pte_t *pte;
14817+
14818+ /* Reserved-bit violation or user access to kernel space? */
14819+ if (error_code & 0x0c)
14820+ return 0;
14821+
14822+ pgd = init_mm.pgd + pgd_index(address);
14823+ if (!pgd_present(*pgd))
14824+ return 0;
14825+
14826+ pud = pud_offset(pgd, address);
14827+ if (!pud_present(*pud))
14828+ return 0;
14829+
14830+ pmd = pmd_offset(pud, address);
14831+ if (!pmd_present(*pmd))
14832+ return 0;
14833+
14834+ pte = pte_offset_kernel(pmd, address);
14835+ if (!pte_present(*pte))
14836+ return 0;
14837+ if ((error_code & 0x02) && !pte_write(*pte))
14838+ return 0;
14839+#ifdef CONFIG_X86_PAE
14840+ if ((error_code & 0x10) && (__pte_val(*pte) & _PAGE_NX))
14841+ return 0;
14842+#endif
14843+
14844+ return 1;
14845+}
14846+
14847+static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
14848+{
14849+ unsigned index = pgd_index(address);
14850+ pgd_t *pgd_k;
14851+ pud_t *pud, *pud_k;
14852+ pmd_t *pmd, *pmd_k;
14853+
14854+ pgd += index;
14855+ pgd_k = init_mm.pgd + index;
14856+
14857+ if (!pgd_present(*pgd_k))
14858+ return NULL;
14859+
14860+ /*
14861+ * set_pgd(pgd, *pgd_k); here would be useless on PAE
14862+ * and redundant with the set_pmd() on non-PAE. As would
14863+ * set_pud.
14864+ */
14865+
14866+ pud = pud_offset(pgd, address);
14867+ pud_k = pud_offset(pgd_k, address);
14868+ if (!pud_present(*pud_k))
14869+ return NULL;
14870+
14871+ pmd = pmd_offset(pud, address);
14872+ pmd_k = pmd_offset(pud_k, address);
14873+ if (!pmd_present(*pmd_k))
14874+ return NULL;
14875+ if (!pmd_present(*pmd))
14876+#if CONFIG_XEN_COMPAT > 0x030002
14877+ set_pmd(pmd, *pmd_k);
14878+#else
14879+ /*
14880+ * When running on older Xen we must launder *pmd_k through
14881+ * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
14882+ */
14883+ set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
14884+#endif
14885+ else
14886+ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
14887+ return pmd_k;
14888+}
14889+
14890+/*
14891+ * Handle a fault on the vmalloc or module mapping area
14892+ *
14893+ * This assumes no large pages in there.
14894+ */
14895+static inline int vmalloc_fault(unsigned long address)
14896+{
14897+ unsigned long pgd_paddr;
14898+ pmd_t *pmd_k;
14899+ pte_t *pte_k;
14900+ /*
14901+ * Synchronize this task's top level page-table
14902+ * with the 'reference' page table.
14903+ *
14904+ * Do _not_ use "current" here. We might be inside
14905+ * an interrupt in the middle of a task switch..
14906+ */
14907+ pgd_paddr = read_cr3();
14908+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
14909+ if (!pmd_k)
14910+ return -1;
14911+ pte_k = pte_offset_kernel(pmd_k, address);
14912+ if (!pte_present(*pte_k))
14913+ return -1;
14914+ return 0;
14915+}
14916+
14917+/*
14918+ * This routine handles page faults. It determines the address,
14919+ * and the problem, and then passes it off to one of the appropriate
14920+ * routines.
14921+ *
14922+ * error_code:
14923+ * bit 0 == 0 means no page found, 1 means protection fault
14924+ * bit 1 == 0 means read, 1 means write
14925+ * bit 2 == 0 means kernel, 1 means user-mode
14926+ * bit 3 == 1 means use of reserved bit detected
14927+ * bit 4 == 1 means fault was an instruction fetch
14928+ */
14929+fastcall void __kprobes do_page_fault(struct pt_regs *regs,
14930+ unsigned long error_code)
14931+{
14932+ struct task_struct *tsk;
14933+ struct mm_struct *mm;
14934+ struct vm_area_struct * vma;
14935+ unsigned long address;
14936+ int write, si_code;
14937+
14938+ /* get the address */
14939+ address = read_cr2();
14940+
14941+ /* Set the "privileged fault" bit to something sane. */
14942+ error_code &= ~4;
14943+ error_code |= (regs->xcs & 2) << 1;
14944+ if (regs->eflags & X86_EFLAGS_VM)
14945+ error_code |= 4;
14946+
14947+ tsk = current;
14948+
14949+ si_code = SEGV_MAPERR;
14950+
14951+ /*
14952+ * We fault-in kernel-space virtual memory on-demand. The
14953+ * 'reference' page table is init_mm.pgd.
14954+ *
14955+ * NOTE! We MUST NOT take any locks for this case. We may
14956+ * be in an interrupt or a critical region, and should
14957+ * only copy the information from the master page table,
14958+ * nothing more.
14959+ *
14960+ * This verifies that the fault happens in kernel space
14961+ * (error_code & 4) == 0, and that the fault was not a
14962+ * protection error (error_code & 9) == 0.
14963+ */
14964+ if (unlikely(address >= TASK_SIZE)) {
14965+#ifdef CONFIG_XEN
14966+ /* Faults in hypervisor area can never be patched up. */
14967+ if (address >= hypervisor_virt_start)
14968+ goto bad_area_nosemaphore;
14969+#endif
14970+ if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
14971+ return;
14972+ /* Can take a spurious fault if mapping changes R/O -> R/W. */
14973+ if (spurious_fault(regs, address, error_code))
14974+ return;
14975+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
14976+ SIGSEGV) == NOTIFY_STOP)
14977+ return;
14978+ /*
14979+ * Don't take the mm semaphore here. If we fixup a prefetch
14980+ * fault we could otherwise deadlock.
14981+ */
14982+ goto bad_area_nosemaphore;
14983+ }
14984+
14985+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
14986+ SIGSEGV) == NOTIFY_STOP)
14987+ return;
14988+
14989+ /* It's safe to allow irq's after cr2 has been saved and the vmalloc
14990+ fault has been handled. */
14991+ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
14992+ local_irq_enable();
14993+
14994+ mm = tsk->mm;
14995+
14996+ /*
14997+ * If we're in an interrupt, have no user context or are running in an
14998+ * atomic region then we must not take the fault..
14999+ */
15000+ if (in_atomic() || !mm)
15001+ goto bad_area_nosemaphore;
15002+
15003+ /* When running in the kernel we expect faults to occur only to
15004+ * addresses in user space. All other faults represent errors in the
15005+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
15006+ * erroneous fault occurring in a code path which already holds mmap_sem
15007+ * we will deadlock attempting to validate the fault against the
15008+ * address space. Luckily the kernel only validly references user
15009+ * space from well defined areas of code, which are listed in the
15010+ * exceptions table.
15011+ *
15012+ * As the vast majority of faults will be valid we will only perform
15013+ * the source reference check when there is a possibilty of a deadlock.
15014+ * Attempt to lock the address space, if we cannot we then validate the
15015+ * source. If this is invalid we can skip the address space check,
15016+ * thus avoiding the deadlock.
15017+ */
15018+ if (!down_read_trylock(&mm->mmap_sem)) {
15019+ if ((error_code & 4) == 0 &&
15020+ !search_exception_tables(regs->eip))
15021+ goto bad_area_nosemaphore;
15022+ down_read(&mm->mmap_sem);
15023+ }
15024+
15025+ vma = find_vma(mm, address);
15026+ if (!vma)
15027+ goto bad_area;
15028+ if (vma->vm_start <= address)
15029+ goto good_area;
15030+ if (!(vma->vm_flags & VM_GROWSDOWN))
15031+ goto bad_area;
15032+ if (error_code & 4) {
15033+ /*
15034+ * Accessing the stack below %esp is always a bug.
15035+ * The large cushion allows instructions like enter
15036+ * and pusha to work. ("enter $65535,$31" pushes
15037+ * 32 pointers and then decrements %esp by 65535.)
15038+ */
15039+ if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
15040+ goto bad_area;
15041+ }
15042+ if (expand_stack(vma, address))
15043+ goto bad_area;
15044+/*
15045+ * Ok, we have a good vm_area for this memory access, so
15046+ * we can handle it..
15047+ */
15048+good_area:
15049+ si_code = SEGV_ACCERR;
15050+ write = 0;
15051+ switch (error_code & 3) {
15052+ default: /* 3: write, present */
15053+#ifdef TEST_VERIFY_AREA
15054+ if (regs->cs == GET_KERNEL_CS())
15055+ printk("WP fault at %08lx\n", regs->eip);
15056+#endif
15057+ /* fall through */
15058+ case 2: /* write, not present */
15059+ if (!(vma->vm_flags & VM_WRITE))
15060+ goto bad_area;
15061+ write++;
15062+ break;
15063+ case 1: /* read, present */
15064+ goto bad_area;
15065+ case 0: /* read, not present */
15066+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
15067+ goto bad_area;
15068+ }
15069+
15070+ survive:
15071+ /*
15072+ * If for any reason at all we couldn't handle the fault,
15073+ * make sure we exit gracefully rather than endlessly redo
15074+ * the fault.
15075+ */
15076+ switch (handle_mm_fault(mm, vma, address, write)) {
15077+ case VM_FAULT_MINOR:
15078+ tsk->min_flt++;
15079+ break;
15080+ case VM_FAULT_MAJOR:
15081+ tsk->maj_flt++;
15082+ break;
15083+ case VM_FAULT_SIGBUS:
15084+ goto do_sigbus;
15085+ case VM_FAULT_OOM:
15086+ goto out_of_memory;
15087+ default:
15088+ BUG();
15089+ }
15090+
15091+ /*
15092+ * Did it hit the DOS screen memory VA from vm86 mode?
15093+ */
15094+ if (regs->eflags & VM_MASK) {
15095+ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
15096+ if (bit < 32)
15097+ tsk->thread.screen_bitmap |= 1 << bit;
15098+ }
15099+ up_read(&mm->mmap_sem);
15100+ return;
15101+
15102+/*
15103+ * Something tried to access memory that isn't in our memory map..
15104+ * Fix it, but check if it's kernel or user first..
15105+ */
15106+bad_area:
15107+ up_read(&mm->mmap_sem);
15108+
15109+bad_area_nosemaphore:
15110+ /* User mode accesses just cause a SIGSEGV */
15111+ if (error_code & 4) {
15112+ /*
15113+ * Valid to do another page fault here because this one came
15114+ * from user space.
15115+ */
15116+ if (is_prefetch(regs, address, error_code))
15117+ return;
15118+
15119+ tsk->thread.cr2 = address;
15120+ /* Kernel addresses are always protection faults */
15121+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
15122+ tsk->thread.trap_no = 14;
15123+ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
15124+ return;
15125+ }
15126+
15127+#ifdef CONFIG_X86_F00F_BUG
15128+ /*
15129+ * Pentium F0 0F C7 C8 bug workaround.
15130+ */
15131+ if (boot_cpu_data.f00f_bug) {
15132+ unsigned long nr;
15133+
15134+ nr = (address - idt_descr.address) >> 3;
15135+
15136+ if (nr == 6) {
15137+ do_invalid_op(regs, 0);
15138+ return;
15139+ }
15140+ }
15141+#endif
15142+
15143+no_context:
15144+ /* Are we prepared to handle this kernel fault? */
15145+ if (fixup_exception(regs))
15146+ return;
15147+
15148+ /*
15149+ * Valid to do another page fault here, because if this fault
15150+ * had been triggered by is_prefetch fixup_exception would have
15151+ * handled it.
15152+ */
15153+ if (is_prefetch(regs, address, error_code))
15154+ return;
15155+
15156+/*
15157+ * Oops. The kernel tried to access some bad page. We'll have to
15158+ * terminate things with extreme prejudice.
15159+ */
15160+
15161+ bust_spinlocks(1);
15162+
15163+ if (oops_may_print()) {
15164+ #ifdef CONFIG_X86_PAE
15165+ if (error_code & 16) {
15166+ pte_t *pte = lookup_address(address);
15167+
15168+ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
15169+ printk(KERN_CRIT "kernel tried to execute "
15170+ "NX-protected page - exploit attempt? "
15171+ "(uid: %d)\n", current->uid);
15172+ }
15173+ #endif
15174+ if (address < PAGE_SIZE)
15175+ printk(KERN_ALERT "BUG: unable to handle kernel NULL "
15176+ "pointer dereference");
15177+ else
15178+ printk(KERN_ALERT "BUG: unable to handle kernel paging"
15179+ " request");
15180+ printk(" at virtual address %08lx\n",address);
15181+ printk(KERN_ALERT " printing eip:\n");
15182+ printk("%08lx\n", regs->eip);
15183+ }
15184+ dump_fault_path(address);
15185+ tsk->thread.cr2 = address;
15186+ tsk->thread.trap_no = 14;
15187+ tsk->thread.error_code = error_code;
15188+ die("Oops", regs, error_code);
15189+ bust_spinlocks(0);
15190+ do_exit(SIGKILL);
15191+
15192+/*
15193+ * We ran out of memory, or some other thing happened to us that made
15194+ * us unable to handle the page fault gracefully.
15195+ */
15196+out_of_memory:
15197+ up_read(&mm->mmap_sem);
15198+ if (tsk->pid == 1) {
15199+ yield();
15200+ down_read(&mm->mmap_sem);
15201+ goto survive;
15202+ }
15203+ printk("VM: killing process %s\n", tsk->comm);
15204+ if (error_code & 4)
15205+ do_exit(SIGKILL);
15206+ goto no_context;
15207+
15208+do_sigbus:
15209+ up_read(&mm->mmap_sem);
15210+
15211+ /* Kernel mode? Handle exceptions or die */
15212+ if (!(error_code & 4))
15213+ goto no_context;
15214+
15215+ /* User space => ok to do another page fault */
15216+ if (is_prefetch(regs, address, error_code))
15217+ return;
15218+
15219+ tsk->thread.cr2 = address;
15220+ tsk->thread.error_code = error_code;
15221+ tsk->thread.trap_no = 14;
15222+ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
15223+}
15224+
15225+#if !HAVE_SHARED_KERNEL_PMD
15226+void vmalloc_sync_all(void)
15227+{
15228+ /*
15229+ * Note that races in the updates of insync and start aren't
15230+ * problematic: insync can only get set bits added, and updates to
15231+ * start are only improving performance (without affecting correctness
15232+ * if undone).
15233+ * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
15234+ * This change works just fine with 2-level paging too.
15235+ */
15236+#define sync_index(a) ((a) >> PMD_SHIFT)
15237+ static DECLARE_BITMAP(insync, PTRS_PER_PGD*PTRS_PER_PMD);
15238+ static unsigned long start = TASK_SIZE;
15239+ unsigned long address;
15240+
15241+ BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
15242+ for (address = start;
15243+ address >= TASK_SIZE && address < hypervisor_virt_start;
15244+ address += 1UL << PMD_SHIFT) {
15245+ if (!test_bit(sync_index(address), insync)) {
15246+ unsigned long flags;
15247+ struct page *page;
15248+
15249+ spin_lock_irqsave(&pgd_lock, flags);
15250+ /* XEN: failure path assumes non-empty pgd_list. */
15251+ if (unlikely(!pgd_list)) {
15252+ spin_unlock_irqrestore(&pgd_lock, flags);
15253+ return;
15254+ }
15255+ for (page = pgd_list; page; page =
15256+ (struct page *)page->index)
15257+ if (!vmalloc_sync_one(page_address(page),
15258+ address)) {
15259+ BUG_ON(page != pgd_list);
15260+ break;
15261+ }
15262+ spin_unlock_irqrestore(&pgd_lock, flags);
15263+ if (!page)
15264+ set_bit(sync_index(address), insync);
15265+ }
15266+ if (address == start && test_bit(sync_index(address), insync))
15267+ start = address + (1UL << PMD_SHIFT);
15268+ }
15269+}
15270+#endif
15271Index: head-2008-11-25/arch/x86/mm/highmem_32-xen.c
15272===================================================================
15273--- /dev/null 1970-01-01 00:00:00.000000000 +0000
15274+++ head-2008-11-25/arch/x86/mm/highmem_32-xen.c 2008-10-29 09:55:56.000000000 +0100
15275@@ -0,0 +1,183 @@
15276+#include <linux/highmem.h>
15277+#include <linux/module.h>
15278+
15279+void *kmap(struct page *page)
15280+{
15281+ might_sleep();
15282+ if (!PageHighMem(page))
15283+ return page_address(page);
15284+ return kmap_high(page);
15285+}
15286+
15287+void kunmap(struct page *page)
15288+{
15289+ if (in_interrupt())
15290+ BUG();
15291+ if (!PageHighMem(page))
15292+ return;
15293+ kunmap_high(page);
15294+}
15295+
15296+/*
15297+ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
15298+ * no global lock is needed and because the kmap code must perform a global TLB
15299+ * invalidation when the kmap pool wraps.
15300+ *
15301+ * However when holding an atomic kmap is is not legal to sleep, so atomic
15302+ * kmaps are appropriate for short, tight code paths only.
15303+ */
15304+static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
15305+{
15306+ enum fixed_addresses idx;
15307+ unsigned long vaddr;
15308+
15309+ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
15310+ inc_preempt_count();
15311+ if (!PageHighMem(page))
15312+ return page_address(page);
15313+
15314+ idx = type + KM_TYPE_NR*smp_processor_id();
15315+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
15316+#ifdef CONFIG_DEBUG_HIGHMEM
15317+ if (!pte_none(*(kmap_pte-idx)))
15318+ BUG();
15319+#endif
15320+ set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
15321+
15322+ return (void*) vaddr;
15323+}
15324+
15325+void *kmap_atomic(struct page *page, enum km_type type)
15326+{
15327+ return __kmap_atomic(page, type, kmap_prot);
15328+}
15329+
15330+/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
15331+void *kmap_atomic_pte(struct page *page, enum km_type type)
15332+{
15333+ return __kmap_atomic(page, type,
15334+ test_bit(PG_pinned, &page->flags)
15335+ ? PAGE_KERNEL_RO : kmap_prot);
15336+}
15337+
15338+void kunmap_atomic(void *kvaddr, enum km_type type)
15339+{
15340+#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
15341+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
15342+ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
15343+
15344+ if (vaddr < FIXADDR_START) { // FIXME
15345+ dec_preempt_count();
15346+ preempt_check_resched();
15347+ return;
15348+ }
15349+#endif
15350+
15351+#if defined(CONFIG_DEBUG_HIGHMEM)
15352+ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
15353+ BUG();
15354+
15355+ /*
15356+ * force other mappings to Oops if they'll try to access
15357+ * this pte without first remap it
15358+ */
15359+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
15360+ __flush_tlb_one(vaddr);
15361+#elif defined(CONFIG_XEN)
15362+ /*
15363+ * We must ensure there are no dangling pagetable references when
15364+ * returning memory to Xen (decrease_reservation).
15365+ * XXX TODO: We could make this faster by only zapping when
15366+ * kmap_flush_unused is called but that is trickier and more invasive.
15367+ */
15368+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
15369+#endif
15370+
15371+ dec_preempt_count();
15372+ preempt_check_resched();
15373+}
15374+
15375+/* This is the same as kmap_atomic() but can map memory that doesn't
15376+ * have a struct page associated with it.
15377+ */
15378+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
15379+{
15380+ enum fixed_addresses idx;
15381+ unsigned long vaddr;
15382+
15383+ inc_preempt_count();
15384+
15385+ idx = type + KM_TYPE_NR*smp_processor_id();
15386+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
15387+ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
15388+ __flush_tlb_one(vaddr);
15389+
15390+ return (void*) vaddr;
15391+}
15392+
15393+struct page *kmap_atomic_to_page(void *ptr)
15394+{
15395+ unsigned long idx, vaddr = (unsigned long)ptr;
15396+ pte_t *pte;
15397+
15398+ if (vaddr < FIXADDR_START)
15399+ return virt_to_page(ptr);
15400+
15401+ idx = virt_to_fix(vaddr);
15402+ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
15403+ return pte_page(*pte);
15404+}
15405+
15406+void clear_highpage(struct page *page)
15407+{
15408+ void *kaddr;
15409+
15410+ if (likely(xen_feature(XENFEAT_highmem_assist))
15411+ && PageHighMem(page)) {
15412+ struct mmuext_op meo;
15413+
15414+ meo.cmd = MMUEXT_CLEAR_PAGE;
15415+ meo.arg1.mfn = pfn_to_mfn(page_to_pfn(page));
15416+ if (HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
15417+ return;
15418+ }
15419+
15420+ kaddr = kmap_atomic(page, KM_USER0);
15421+ clear_page(kaddr);
15422+ kunmap_atomic(kaddr, KM_USER0);
15423+}
15424+
15425+void copy_highpage(struct page *to, struct page *from)
15426+{
15427+ void *vfrom, *vto;
15428+
15429+ if (likely(xen_feature(XENFEAT_highmem_assist))
15430+ && (PageHighMem(from) || PageHighMem(to))) {
15431+ unsigned long from_pfn = page_to_pfn(from);
15432+ unsigned long to_pfn = page_to_pfn(to);
15433+ struct mmuext_op meo;
15434+
15435+ meo.cmd = MMUEXT_COPY_PAGE;
15436+ meo.arg1.mfn = pfn_to_mfn(to_pfn);
15437+ meo.arg2.src_mfn = pfn_to_mfn(from_pfn);
15438+ if (mfn_to_pfn(meo.arg2.src_mfn) == from_pfn
15439+ && mfn_to_pfn(meo.arg1.mfn) == to_pfn
15440+ && HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
15441+ return;
15442+ }
15443+
15444+ vfrom = kmap_atomic(from, KM_USER0);
15445+ vto = kmap_atomic(to, KM_USER1);
15446+ copy_page(vto, vfrom);
15447+ kunmap_atomic(vfrom, KM_USER0);
15448+ kunmap_atomic(vto, KM_USER1);
15449+}
15450+
15451+EXPORT_SYMBOL(kmap);
15452+EXPORT_SYMBOL(kunmap);
15453+EXPORT_SYMBOL(kmap_atomic);
15454+EXPORT_SYMBOL(kmap_atomic_pte);
15455+EXPORT_SYMBOL(kunmap_atomic);
15456+EXPORT_SYMBOL(kmap_atomic_to_page);
15457+EXPORT_SYMBOL(clear_highpage);
15458+EXPORT_SYMBOL(copy_highpage);
15459Index: head-2008-11-25/arch/x86/mm/hypervisor.c
15460===================================================================
15461--- /dev/null 1970-01-01 00:00:00.000000000 +0000
15462+++ head-2008-11-25/arch/x86/mm/hypervisor.c 2008-10-29 09:55:56.000000000 +0100
15463@@ -0,0 +1,547 @@
15464+/******************************************************************************
15465+ * mm/hypervisor.c
15466+ *
15467+ * Update page tables via the hypervisor.
15468+ *
15469+ * Copyright (c) 2002-2004, K A Fraser
15470+ *
15471+ * This program is free software; you can redistribute it and/or
15472+ * modify it under the terms of the GNU General Public License version 2
15473+ * as published by the Free Software Foundation; or, when distributed
15474+ * separately from the Linux kernel or incorporated into other
15475+ * software packages, subject to the following license:
15476+ *
15477+ * Permission is hereby granted, free of charge, to any person obtaining a copy
15478+ * of this source file (the "Software"), to deal in the Software without
15479+ * restriction, including without limitation the rights to use, copy, modify,
15480+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15481+ * and to permit persons to whom the Software is furnished to do so, subject to
15482+ * the following conditions:
15483+ *
15484+ * The above copyright notice and this permission notice shall be included in
15485+ * all copies or substantial portions of the Software.
15486+ *
15487+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15488+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15489+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15490+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
15491+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
15492+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
15493+ * IN THE SOFTWARE.
15494+ */
15495+
15496+#include <linux/sched.h>
15497+#include <linux/mm.h>
15498+#include <linux/vmalloc.h>
15499+#include <asm/page.h>
15500+#include <asm/pgtable.h>
15501+#include <asm/hypervisor.h>
15502+#include <xen/balloon.h>
15503+#include <xen/features.h>
15504+#include <xen/interface/memory.h>
15505+#include <linux/module.h>
15506+#include <linux/percpu.h>
15507+#include <asm/tlbflush.h>
15508+#include <linux/highmem.h>
15509+
15510+void xen_l1_entry_update(pte_t *ptr, pte_t val)
15511+{
15512+ mmu_update_t u;
15513+#ifdef CONFIG_HIGHPTE
15514+ u.ptr = ((unsigned long)ptr >= (unsigned long)high_memory) ?
15515+ arbitrary_virt_to_machine(ptr) : virt_to_machine(ptr);
15516+#else
15517+ u.ptr = virt_to_machine(ptr);
15518+#endif
15519+ u.val = __pte_val(val);
15520+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
15521+}
15522+EXPORT_SYMBOL_GPL(xen_l1_entry_update);
15523+
15524+void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
15525+{
15526+ mmu_update_t u;
15527+ u.ptr = virt_to_machine(ptr);
15528+ u.val = __pmd_val(val);
15529+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
15530+}
15531+
15532+#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
15533+void xen_l3_entry_update(pud_t *ptr, pud_t val)
15534+{
15535+ mmu_update_t u;
15536+ u.ptr = virt_to_machine(ptr);
15537+ u.val = __pud_val(val);
15538+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
15539+}
15540+#endif
15541+
15542+#ifdef CONFIG_X86_64
15543+void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
15544+{
15545+ mmu_update_t u;
15546+ u.ptr = virt_to_machine(ptr);
15547+ u.val = __pgd_val(val);
15548+ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
15549+}
15550+#endif /* CONFIG_X86_64 */
15551+
15552+void xen_pt_switch(unsigned long ptr)
15553+{
15554+ struct mmuext_op op;
15555+ op.cmd = MMUEXT_NEW_BASEPTR;
15556+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
15557+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15558+}
15559+
15560+void xen_new_user_pt(unsigned long ptr)
15561+{
15562+ struct mmuext_op op;
15563+ op.cmd = MMUEXT_NEW_USER_BASEPTR;
15564+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
15565+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15566+}
15567+
15568+void xen_tlb_flush(void)
15569+{
15570+ struct mmuext_op op;
15571+ op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
15572+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15573+}
15574+EXPORT_SYMBOL(xen_tlb_flush);
15575+
15576+void xen_invlpg(unsigned long ptr)
15577+{
15578+ struct mmuext_op op;
15579+ op.cmd = MMUEXT_INVLPG_LOCAL;
15580+ op.arg1.linear_addr = ptr & PAGE_MASK;
15581+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15582+}
15583+EXPORT_SYMBOL(xen_invlpg);
15584+
15585+#ifdef CONFIG_SMP
15586+
15587+void xen_tlb_flush_all(void)
15588+{
15589+ struct mmuext_op op;
15590+ op.cmd = MMUEXT_TLB_FLUSH_ALL;
15591+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15592+}
15593+
15594+void xen_tlb_flush_mask(cpumask_t *mask)
15595+{
15596+ struct mmuext_op op;
15597+ if ( cpus_empty(*mask) )
15598+ return;
15599+ op.cmd = MMUEXT_TLB_FLUSH_MULTI;
15600+ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
15601+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15602+}
15603+
15604+void xen_invlpg_all(unsigned long ptr)
15605+{
15606+ struct mmuext_op op;
15607+ op.cmd = MMUEXT_INVLPG_ALL;
15608+ op.arg1.linear_addr = ptr & PAGE_MASK;
15609+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15610+}
15611+
15612+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
15613+{
15614+ struct mmuext_op op;
15615+ if ( cpus_empty(*mask) )
15616+ return;
15617+ op.cmd = MMUEXT_INVLPG_MULTI;
15618+ op.arg1.linear_addr = ptr & PAGE_MASK;
15619+ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
15620+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15621+}
15622+
15623+#endif /* CONFIG_SMP */
15624+
15625+void xen_pgd_pin(unsigned long ptr)
15626+{
15627+ struct mmuext_op op;
15628+#ifdef CONFIG_X86_64
15629+ op.cmd = MMUEXT_PIN_L4_TABLE;
15630+#elif defined(CONFIG_X86_PAE)
15631+ op.cmd = MMUEXT_PIN_L3_TABLE;
15632+#else
15633+ op.cmd = MMUEXT_PIN_L2_TABLE;
15634+#endif
15635+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
15636+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15637+}
15638+
15639+void xen_pgd_unpin(unsigned long ptr)
15640+{
15641+ struct mmuext_op op;
15642+ op.cmd = MMUEXT_UNPIN_TABLE;
15643+ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
15644+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15645+}
15646+
15647+void xen_set_ldt(const void *ptr, unsigned int ents)
15648+{
15649+ struct mmuext_op op;
15650+ op.cmd = MMUEXT_SET_LDT;
15651+ op.arg1.linear_addr = (unsigned long)ptr;
15652+ op.arg2.nr_ents = ents;
15653+ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
15654+}
15655+
15656+/* Protected by balloon_lock. */
15657+#define MAX_CONTIG_ORDER 9 /* 2MB */
15658+static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
15659+static unsigned long limited_frames[1<<MAX_CONTIG_ORDER];
15660+static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
15661+
15662+/* Ensure multi-page extents are contiguous in machine memory. */
15663+int xen_create_contiguous_region(
15664+ unsigned long vstart, unsigned int order, unsigned int address_bits)
15665+{
15666+ unsigned long *in_frames = discontig_frames, out_frame;
15667+ unsigned long frame, flags;
15668+ unsigned int i;
15669+ int rc, success;
15670+ struct xen_memory_exchange exchange = {
15671+ .in = {
15672+ .nr_extents = 1UL << order,
15673+ .extent_order = 0,
15674+ .domid = DOMID_SELF
15675+ },
15676+ .out = {
15677+ .nr_extents = 1,
15678+ .extent_order = order,
15679+ .address_bits = address_bits,
15680+ .domid = DOMID_SELF
15681+ }
15682+ };
15683+
15684+ /*
15685+ * Currently an auto-translated guest will not perform I/O, nor will
15686+ * it require PAE page directories below 4GB. Therefore any calls to
15687+ * this function are redundant and can be ignored.
15688+ */
15689+ if (xen_feature(XENFEAT_auto_translated_physmap))
15690+ return 0;
15691+
15692+ if (unlikely(order > MAX_CONTIG_ORDER))
15693+ return -ENOMEM;
15694+
15695+ set_xen_guest_handle(exchange.in.extent_start, in_frames);
15696+ set_xen_guest_handle(exchange.out.extent_start, &out_frame);
15697+
15698+ scrub_pages((void *)vstart, 1 << order);
15699+
15700+ balloon_lock(flags);
15701+
15702+ /* 1. Zap current PTEs, remembering MFNs. */
15703+ for (i = 0; i < (1U<<order); i++) {
15704+ in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
15705+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
15706+ __pte_ma(0), 0);
15707+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
15708+ INVALID_P2M_ENTRY);
15709+ }
15710+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
15711+ BUG();
15712+
15713+ /* 2. Get a new contiguous memory extent. */
15714+ out_frame = __pa(vstart) >> PAGE_SHIFT;
15715+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
15716+ success = (exchange.nr_exchanged == (1UL << order));
15717+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
15718+ BUG_ON(success && (rc != 0));
15719+#if CONFIG_XEN_COMPAT <= 0x030002
15720+ if (unlikely(rc == -ENOSYS)) {
15721+ /* Compatibility when XENMEM_exchange is unsupported. */
15722+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
15723+ &exchange.in) != (1UL << order))
15724+ BUG();
15725+ success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
15726+ &exchange.out) == 1);
15727+ if (!success) {
15728+ /* Couldn't get special memory: fall back to normal. */
15729+ for (i = 0; i < (1U<<order); i++)
15730+ in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
15731+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
15732+ &exchange.in) != (1UL<<order))
15733+ BUG();
15734+ }
15735+ }
15736+#endif
15737+
15738+ /* 3. Map the new extent in place of old pages. */
15739+ for (i = 0; i < (1U<<order); i++) {
15740+ frame = success ? (out_frame + i) : in_frames[i];
15741+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
15742+ pfn_pte_ma(frame, PAGE_KERNEL), 0);
15743+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
15744+ }
15745+
15746+ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
15747+ ? UVMF_TLB_FLUSH|UVMF_ALL
15748+ : UVMF_INVLPG|UVMF_ALL;
15749+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
15750+ BUG();
15751+
15752+ balloon_unlock(flags);
15753+
15754+ return success ? 0 : -ENOMEM;
15755+}
15756+EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
15757+
15758+void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
15759+{
15760+ unsigned long *out_frames = discontig_frames, in_frame;
15761+ unsigned long frame, flags;
15762+ unsigned int i;
15763+ int rc, success;
15764+ struct xen_memory_exchange exchange = {
15765+ .in = {
15766+ .nr_extents = 1,
15767+ .extent_order = order,
15768+ .domid = DOMID_SELF
15769+ },
15770+ .out = {
15771+ .nr_extents = 1UL << order,
15772+ .extent_order = 0,
15773+ .domid = DOMID_SELF
15774+ }
15775+ };
15776+
15777+ if (xen_feature(XENFEAT_auto_translated_physmap))
15778+ return;
15779+
15780+ if (unlikely(order > MAX_CONTIG_ORDER))
15781+ return;
15782+
15783+ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
15784+ set_xen_guest_handle(exchange.out.extent_start, out_frames);
15785+
15786+ scrub_pages((void *)vstart, 1 << order);
15787+
15788+ balloon_lock(flags);
15789+
15790+ /* 1. Find start MFN of contiguous extent. */
15791+ in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
15792+
15793+ /* 2. Zap current PTEs. */
15794+ for (i = 0; i < (1U<<order); i++) {
15795+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
15796+ __pte_ma(0), 0);
15797+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
15798+ INVALID_P2M_ENTRY);
15799+ out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
15800+ }
15801+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
15802+ BUG();
15803+
15804+ /* 3. Do the exchange for non-contiguous MFNs. */
15805+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
15806+ success = (exchange.nr_exchanged == 1);
15807+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
15808+ BUG_ON(success && (rc != 0));
15809+#if CONFIG_XEN_COMPAT <= 0x030002
15810+ if (unlikely(rc == -ENOSYS)) {
15811+ /* Compatibility when XENMEM_exchange is unsupported. */
15812+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
15813+ &exchange.in) != 1)
15814+ BUG();
15815+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
15816+ &exchange.out) != (1UL << order))
15817+ BUG();
15818+ success = 1;
15819+ }
15820+#endif
15821+
15822+ /* 4. Map new pages in place of old pages. */
15823+ for (i = 0; i < (1U<<order); i++) {
15824+ frame = success ? out_frames[i] : (in_frame + i);
15825+ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
15826+ pfn_pte_ma(frame, PAGE_KERNEL), 0);
15827+ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
15828+ }
15829+
15830+ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
15831+ ? UVMF_TLB_FLUSH|UVMF_ALL
15832+ : UVMF_INVLPG|UVMF_ALL;
15833+ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
15834+ BUG();
15835+
15836+ balloon_unlock(flags);
15837+}
15838+EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
15839+
15840+int xen_limit_pages_to_max_mfn(
15841+ struct page *pages, unsigned int order, unsigned int address_bits)
15842+{
15843+ unsigned long flags, frame;
15844+ unsigned long *in_frames = discontig_frames, *out_frames = limited_frames;
15845+ struct page *page;
15846+ unsigned int i, n, nr_mcl;
15847+ int rc, success;
15848+ DECLARE_BITMAP(limit_map, 1 << MAX_CONTIG_ORDER);
15849+
15850+ struct xen_memory_exchange exchange = {
15851+ .in = {
15852+ .extent_order = 0,
15853+ .domid = DOMID_SELF
15854+ },
15855+ .out = {
15856+ .extent_order = 0,
15857+ .address_bits = address_bits,
15858+ .domid = DOMID_SELF
15859+ }
15860+ };
15861+
15862+ if (xen_feature(XENFEAT_auto_translated_physmap))
15863+ return 0;
15864+
15865+ if (unlikely(order > MAX_CONTIG_ORDER))
15866+ return -ENOMEM;
15867+
15868+ bitmap_zero(limit_map, 1U << order);
15869+ set_xen_guest_handle(exchange.in.extent_start, in_frames);
15870+ set_xen_guest_handle(exchange.out.extent_start, out_frames);
15871+
15872+ /* 0. Scrub the pages. */
15873+ for (i = 0, n = 0; i < 1U<<order ; i++) {
15874+ page = &pages[i];
15875+ if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT)))
15876+ continue;
15877+ __set_bit(i, limit_map);
15878+
15879+ if (!PageHighMem(page))
15880+ scrub_pages(page_address(page), 1);
15881+#ifdef CONFIG_XEN_SCRUB_PAGES
15882+ else {
15883+ scrub_pages(kmap(page), 1);
15884+ kunmap(page);
15885+ ++n;
15886+ }
15887+#endif
15888+ }
15889+ if (bitmap_empty(limit_map, 1U << order))
15890+ return 0;
15891+
15892+ if (n)
15893+ kmap_flush_unused();
15894+
15895+ balloon_lock(flags);
15896+
15897+ /* 1. Zap current PTEs (if any), remembering MFNs. */
15898+ for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
15899+ if(!test_bit(i, limit_map))
15900+ continue;
15901+ page = &pages[i];
15902+
15903+ out_frames[n] = page_to_pfn(page);
15904+ in_frames[n] = pfn_to_mfn(out_frames[n]);
15905+
15906+ if (!PageHighMem(page))
15907+ MULTI_update_va_mapping(cr_mcl + nr_mcl++,
15908+ (unsigned long)page_address(page),
15909+ __pte_ma(0), 0);
15910+
15911+ set_phys_to_machine(out_frames[n], INVALID_P2M_ENTRY);
15912+ ++n;
15913+ }
15914+ if (nr_mcl && HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
15915+ BUG();
15916+
15917+ /* 2. Get new memory below the required limit. */
15918+ exchange.in.nr_extents = n;
15919+ exchange.out.nr_extents = n;
15920+ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
15921+ success = (exchange.nr_exchanged == n);
15922+ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
15923+ BUG_ON(success && (rc != 0));
15924+#if CONFIG_XEN_COMPAT <= 0x030002
15925+ if (unlikely(rc == -ENOSYS)) {
15926+ /* Compatibility when XENMEM_exchange is unsupported. */
15927+ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
15928+ &exchange.in) != n)
15929+ BUG();
15930+ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
15931+ &exchange.out) != n)
15932+ BUG();
15933+ success = 1;
15934+ }
15935+#endif
15936+
15937+ /* 3. Map the new pages in place of old pages. */
15938+ for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
15939+ if(!test_bit(i, limit_map))
15940+ continue;
15941+ page = &pages[i];
15942+
15943+ frame = success ? out_frames[n] : in_frames[n];
15944+
15945+ if (!PageHighMem(page))
15946+ MULTI_update_va_mapping(cr_mcl + nr_mcl++,
15947+ (unsigned long)page_address(page),
15948+ pfn_pte_ma(frame, PAGE_KERNEL), 0);
15949+
15950+ set_phys_to_machine(page_to_pfn(page), frame);
15951+ ++n;
15952+ }
15953+ if (nr_mcl) {
15954+ cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
15955+ ? UVMF_TLB_FLUSH|UVMF_ALL
15956+ : UVMF_INVLPG|UVMF_ALL;
15957+ if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
15958+ BUG();
15959+ }
15960+
15961+ balloon_unlock(flags);
15962+
15963+ return success ? 0 : -ENOMEM;
15964+}
15965+EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);
15966+
15967+#ifdef __i386__
15968+int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
15969+{
15970+ __u32 *lp = (__u32 *)((char *)ldt + entry * 8);
15971+ maddr_t mach_lp = arbitrary_virt_to_machine(lp);
15972+ return HYPERVISOR_update_descriptor(
15973+ mach_lp, (u64)entry_a | ((u64)entry_b<<32));
15974+}
15975+#endif
15976+
15977+#define MAX_BATCHED_FULL_PTES 32
15978+
15979+int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
15980+ unsigned long addr, unsigned long end, pgprot_t newprot)
15981+{
15982+ int rc = 0, i = 0;
15983+ mmu_update_t u[MAX_BATCHED_FULL_PTES];
15984+ pte_t *pte;
15985+ spinlock_t *ptl;
15986+
15987+ if (!xen_feature(XENFEAT_mmu_pt_update_preserve_ad))
15988+ return 0;
15989+
15990+ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
15991+ do {
15992+ if (pte_present(*pte)) {
15993+ u[i].ptr = (__pmd_val(*pmd) & PHYSICAL_PAGE_MASK)
15994+ | ((unsigned long)pte & ~PAGE_MASK)
15995+ | MMU_PT_UPDATE_PRESERVE_AD;
15996+ u[i].val = __pte_val(pte_modify(*pte, newprot));
15997+ if (++i == MAX_BATCHED_FULL_PTES) {
15998+ if ((rc = HYPERVISOR_mmu_update(
15999+ &u[0], i, NULL, DOMID_SELF)) != 0)
16000+ break;
16001+ i = 0;
16002+ }
16003+ }
16004+ } while (pte++, addr += PAGE_SIZE, addr != end);
16005+ if (i)
16006+ rc = HYPERVISOR_mmu_update( &u[0], i, NULL, DOMID_SELF);
16007+ pte_unmap_unlock(pte - 1, ptl);
16008+ BUG_ON(rc && rc != -ENOSYS);
16009+ return !rc;
16010+}
16011Index: head-2008-11-25/arch/x86/mm/init_32-xen.c
16012===================================================================
16013--- /dev/null 1970-01-01 00:00:00.000000000 +0000
16014+++ head-2008-11-25/arch/x86/mm/init_32-xen.c 2008-10-29 09:55:56.000000000 +0100
16015@@ -0,0 +1,840 @@
16016+/*
16017+ * linux/arch/i386/mm/init.c
16018+ *
16019+ * Copyright (C) 1995 Linus Torvalds
16020+ *
16021+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
16022+ */
16023+
16024+#include <linux/module.h>
16025+#include <linux/signal.h>
16026+#include <linux/sched.h>
16027+#include <linux/kernel.h>
16028+#include <linux/errno.h>
16029+#include <linux/string.h>
16030+#include <linux/types.h>
16031+#include <linux/ptrace.h>
16032+#include <linux/mman.h>
16033+#include <linux/mm.h>
16034+#include <linux/hugetlb.h>
16035+#include <linux/swap.h>
16036+#include <linux/smp.h>
16037+#include <linux/init.h>
16038+#include <linux/highmem.h>
16039+#include <linux/pagemap.h>
16040+#include <linux/poison.h>
16041+#include <linux/bootmem.h>
16042+#include <linux/slab.h>
16043+#include <linux/proc_fs.h>
16044+#include <linux/efi.h>
16045+#include <linux/memory_hotplug.h>
16046+#include <linux/initrd.h>
16047+#include <linux/cpumask.h>
16048+#include <linux/dma-mapping.h>
16049+#include <linux/scatterlist.h>
16050+
16051+#include <asm/processor.h>
16052+#include <asm/system.h>
16053+#include <asm/uaccess.h>
16054+#include <asm/pgtable.h>
16055+#include <asm/dma.h>
16056+#include <asm/fixmap.h>
16057+#include <asm/e820.h>
16058+#include <asm/apic.h>
16059+#include <asm/tlb.h>
16060+#include <asm/tlbflush.h>
16061+#include <asm/sections.h>
16062+#include <asm/hypervisor.h>
16063+#include <asm/swiotlb.h>
16064+
16065+unsigned int __VMALLOC_RESERVE = 128 << 20;
16066+
16067+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
16068+unsigned long highstart_pfn, highend_pfn;
16069+
16070+static int noinline do_test_wp_bit(void);
16071+
16072+/*
16073+ * Creates a middle page table and puts a pointer to it in the
16074+ * given global directory entry. This only returns the gd entry
16075+ * in non-PAE compilation mode, since the middle layer is folded.
16076+ */
16077+static pmd_t * __init one_md_table_init(pgd_t *pgd)
16078+{
16079+ pud_t *pud;
16080+ pmd_t *pmd_table;
16081+
16082+#ifdef CONFIG_X86_PAE
16083+ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
16084+ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
16085+ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
16086+ pud = pud_offset(pgd, 0);
16087+ if (pmd_table != pmd_offset(pud, 0))
16088+ BUG();
16089+#else
16090+ pud = pud_offset(pgd, 0);
16091+ pmd_table = pmd_offset(pud, 0);
16092+#endif
16093+
16094+ return pmd_table;
16095+}
16096+
16097+/*
16098+ * Create a page table and place a pointer to it in a middle page
16099+ * directory entry.
16100+ */
16101+static pte_t * __init one_page_table_init(pmd_t *pmd)
16102+{
16103+ if (pmd_none(*pmd)) {
16104+ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
16105+ make_lowmem_page_readonly(page_table,
16106+ XENFEAT_writable_page_tables);
16107+ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
16108+ if (page_table != pte_offset_kernel(pmd, 0))
16109+ BUG();
16110+
16111+ return page_table;
16112+ }
16113+
16114+ return pte_offset_kernel(pmd, 0);
16115+}
16116+
16117+/*
16118+ * This function initializes a certain range of kernel virtual memory
16119+ * with new bootmem page tables, everywhere page tables are missing in
16120+ * the given range.
16121+ */
16122+
16123+/*
16124+ * NOTE: The pagetables are allocated contiguous on the physical space
16125+ * so we can cache the place of the first one and move around without
16126+ * checking the pgd every time.
16127+ */
16128+static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
16129+{
16130+ pgd_t *pgd;
16131+ pud_t *pud;
16132+ pmd_t *pmd;
16133+ int pgd_idx, pmd_idx;
16134+ unsigned long vaddr;
16135+
16136+ vaddr = start;
16137+ pgd_idx = pgd_index(vaddr);
16138+ pmd_idx = pmd_index(vaddr);
16139+ pgd = pgd_base + pgd_idx;
16140+
16141+ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
16142+ if (pgd_none(*pgd))
16143+ one_md_table_init(pgd);
16144+ pud = pud_offset(pgd, vaddr);
16145+ pmd = pmd_offset(pud, vaddr);
16146+ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
16147+ if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
16148+ one_page_table_init(pmd);
16149+
16150+ vaddr += PMD_SIZE;
16151+ }
16152+ pmd_idx = 0;
16153+ }
16154+}
16155+
16156+static inline int is_kernel_text(unsigned long addr)
16157+{
16158+ if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
16159+ return 1;
16160+ return 0;
16161+}
16162+
16163+/*
16164+ * This maps the physical memory to kernel virtual address space, a total
16165+ * of max_low_pfn pages, by creating page tables starting from address
16166+ * PAGE_OFFSET.
16167+ */
16168+static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
16169+{
16170+ unsigned long pfn;
16171+ pgd_t *pgd;
16172+ pmd_t *pmd;
16173+ pte_t *pte;
16174+ int pgd_idx, pmd_idx, pte_ofs;
16175+
16176+ unsigned long max_ram_pfn = xen_start_info->nr_pages;
16177+ if (max_ram_pfn > max_low_pfn)
16178+ max_ram_pfn = max_low_pfn;
16179+
16180+ pgd_idx = pgd_index(PAGE_OFFSET);
16181+ pgd = pgd_base + pgd_idx;
16182+ pfn = 0;
16183+ pmd_idx = pmd_index(PAGE_OFFSET);
16184+ pte_ofs = pte_index(PAGE_OFFSET);
16185+
16186+ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
16187+#ifdef CONFIG_XEN
16188+ /*
16189+ * Native linux hasn't PAE-paging enabled yet at this
16190+ * point. When running as xen domain we are in PAE
16191+ * mode already, thus we can't simply hook a empty
16192+ * pmd. That would kill the mappings we are currently
16193+ * using ...
16194+ */
16195+ pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
16196+#else
16197+ pmd = one_md_table_init(pgd);
16198+#endif
16199+ if (pfn >= max_low_pfn)
16200+ continue;
16201+ pmd += pmd_idx;
16202+ for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
16203+ unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
16204+ if (address >= hypervisor_virt_start)
16205+ continue;
16206+
16207+ /* Map with big pages if possible, otherwise create normal page tables. */
16208+ if (cpu_has_pse) {
16209+ unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
16210+
16211+ if (is_kernel_text(address) || is_kernel_text(address2))
16212+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
16213+ else
16214+ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
16215+ pfn += PTRS_PER_PTE;
16216+ } else {
16217+ pte = one_page_table_init(pmd);
16218+
16219+ pte += pte_ofs;
16220+ for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
16221+ /* XEN: Only map initial RAM allocation. */
16222+ if ((pfn >= max_ram_pfn) || pte_present(*pte))
16223+ continue;
16224+ if (is_kernel_text(address))
16225+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
16226+ else
16227+ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
16228+ }
16229+ pte_ofs = 0;
16230+ }
16231+ }
16232+ pmd_idx = 0;
16233+ }
16234+}
16235+
16236+#ifndef CONFIG_XEN
16237+
16238+static inline int page_kills_ppro(unsigned long pagenr)
16239+{
16240+ if (pagenr >= 0x70000 && pagenr <= 0x7003F)
16241+ return 1;
16242+ return 0;
16243+}
16244+
16245+#else
16246+
16247+#define page_kills_ppro(p) 0
16248+
16249+#endif
16250+
16251+extern int is_available_memory(efi_memory_desc_t *);
16252+
16253+int page_is_ram(unsigned long pagenr)
16254+{
16255+ int i;
16256+ unsigned long addr, end;
16257+
16258+ if (efi_enabled) {
16259+ efi_memory_desc_t *md;
16260+ void *p;
16261+
16262+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
16263+ md = p;
16264+ if (!is_available_memory(md))
16265+ continue;
16266+ addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
16267+ end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
16268+
16269+ if ((pagenr >= addr) && (pagenr < end))
16270+ return 1;
16271+ }
16272+ return 0;
16273+ }
16274+
16275+ for (i = 0; i < e820.nr_map; i++) {
16276+
16277+ if (e820.map[i].type != E820_RAM) /* not usable memory */
16278+ continue;
16279+ /*
16280+ * !!!FIXME!!! Some BIOSen report areas as RAM that
16281+ * are not. Notably the 640->1Mb area. We need a sanity
16282+ * check here.
16283+ */
16284+ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
16285+ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
16286+ if ((pagenr >= addr) && (pagenr < end))
16287+ return 1;
16288+ }
16289+ return 0;
16290+}
16291+
16292+#ifdef CONFIG_HIGHMEM
16293+pte_t *kmap_pte;
16294+pgprot_t kmap_prot;
16295+
16296+#define kmap_get_fixmap_pte(vaddr) \
16297+ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
16298+
16299+static void __init kmap_init(void)
16300+{
16301+ unsigned long kmap_vstart;
16302+
16303+ /* cache the first kmap pte */
16304+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
16305+ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
16306+
16307+ kmap_prot = PAGE_KERNEL;
16308+}
16309+
16310+static void __init permanent_kmaps_init(pgd_t *pgd_base)
16311+{
16312+ pgd_t *pgd;
16313+ pud_t *pud;
16314+ pmd_t *pmd;
16315+ pte_t *pte;
16316+ unsigned long vaddr;
16317+
16318+ vaddr = PKMAP_BASE;
16319+ page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
16320+
16321+ pgd = swapper_pg_dir + pgd_index(vaddr);
16322+ pud = pud_offset(pgd, vaddr);
16323+ pmd = pmd_offset(pud, vaddr);
16324+ pte = pte_offset_kernel(pmd, vaddr);
16325+ pkmap_page_table = pte;
16326+}
16327+
16328+static void __meminit free_new_highpage(struct page *page, int pfn)
16329+{
16330+ init_page_count(page);
16331+ if (pfn < xen_start_info->nr_pages)
16332+ __free_page(page);
16333+ totalhigh_pages++;
16334+}
16335+
16336+void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
16337+{
16338+ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
16339+ ClearPageReserved(page);
16340+ free_new_highpage(page, pfn);
16341+ } else
16342+ SetPageReserved(page);
16343+}
16344+
16345+static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
16346+{
16347+ free_new_highpage(page, pfn);
16348+ totalram_pages++;
16349+#ifdef CONFIG_FLATMEM
16350+ max_mapnr = max(pfn, max_mapnr);
16351+#endif
16352+ num_physpages++;
16353+ return 0;
16354+}
16355+
16356+/*
16357+ * Not currently handling the NUMA case.
16358+ * Assuming single node and all memory that
16359+ * has been added dynamically that would be
16360+ * onlined here is in HIGHMEM
16361+ */
16362+void online_page(struct page *page)
16363+{
16364+ ClearPageReserved(page);
16365+ add_one_highpage_hotplug(page, page_to_pfn(page));
16366+}
16367+
16368+
16369+#ifdef CONFIG_NUMA
16370+extern void set_highmem_pages_init(int);
16371+#else
16372+static void __init set_highmem_pages_init(int bad_ppro)
16373+{
16374+ int pfn;
16375+ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
16376+ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
16377+ totalram_pages += totalhigh_pages;
16378+}
16379+#endif /* CONFIG_FLATMEM */
16380+
16381+#else
16382+#define kmap_init() do { } while (0)
16383+#define permanent_kmaps_init(pgd_base) do { } while (0)
16384+#define set_highmem_pages_init(bad_ppro) do { } while (0)
16385+#endif /* CONFIG_HIGHMEM */
16386+
16387+unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
16388+EXPORT_SYMBOL(__PAGE_KERNEL);
16389+unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
16390+
16391+#ifdef CONFIG_NUMA
16392+extern void __init remap_numa_kva(void);
16393+#else
16394+#define remap_numa_kva() do {} while (0)
16395+#endif
16396+
16397+pgd_t *swapper_pg_dir;
16398+
16399+static void __init pagetable_init (void)
16400+{
16401+ unsigned long vaddr;
16402+ pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
16403+
16404+ /* Enable PSE if available */
16405+ if (cpu_has_pse) {
16406+ set_in_cr4(X86_CR4_PSE);
16407+ }
16408+
16409+ /* Enable PGE if available */
16410+ if (cpu_has_pge) {
16411+ set_in_cr4(X86_CR4_PGE);
16412+ __PAGE_KERNEL |= _PAGE_GLOBAL;
16413+ __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
16414+ }
16415+
16416+ kernel_physical_mapping_init(pgd_base);
16417+ remap_numa_kva();
16418+
16419+ /*
16420+ * Fixed mappings, only the page table structure has to be
16421+ * created - mappings will be set by set_fixmap():
16422+ */
16423+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
16424+ page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
16425+
16426+ permanent_kmaps_init(pgd_base);
16427+}
16428+
16429+#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
16430+/*
16431+ * Swap suspend & friends need this for resume because things like the intel-agp
16432+ * driver might have split up a kernel 4MB mapping.
16433+ */
16434+char __nosavedata swsusp_pg_dir[PAGE_SIZE]
16435+ __attribute__ ((aligned (PAGE_SIZE)));
16436+
16437+static inline void save_pg_dir(void)
16438+{
16439+ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
16440+}
16441+#else
16442+static inline void save_pg_dir(void)
16443+{
16444+}
16445+#endif
16446+
16447+void zap_low_mappings (void)
16448+{
16449+ int i;
16450+
16451+ save_pg_dir();
16452+
16453+ /*
16454+ * Zap initial low-memory mappings.
16455+ *
16456+ * Note that "pgd_clear()" doesn't do it for
16457+ * us, because pgd_clear() is a no-op on i386.
16458+ */
16459+ for (i = 0; i < USER_PTRS_PER_PGD; i++)
16460+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
16461+ set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
16462+#else
16463+ set_pgd(swapper_pg_dir+i, __pgd(0));
16464+#endif
16465+ flush_tlb_all();
16466+}
16467+
16468+static int disable_nx __initdata = 0;
16469+u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
16470+EXPORT_SYMBOL(__supported_pte_mask);
16471+
16472+/*
16473+ * noexec = on|off
16474+ *
16475+ * Control non executable mappings.
16476+ *
16477+ * on Enable
16478+ * off Disable
16479+ */
16480+void __init noexec_setup(const char *str)
16481+{
16482+ if (!strncmp(str, "on",2) && cpu_has_nx) {
16483+ __supported_pte_mask |= _PAGE_NX;
16484+ disable_nx = 0;
16485+ } else if (!strncmp(str,"off",3)) {
16486+ disable_nx = 1;
16487+ __supported_pte_mask &= ~_PAGE_NX;
16488+ }
16489+}
16490+
16491+int nx_enabled = 0;
16492+#ifdef CONFIG_X86_PAE
16493+
16494+static void __init set_nx(void)
16495+{
16496+ unsigned int v[4], l, h;
16497+
16498+ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
16499+ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
16500+ if ((v[3] & (1 << 20)) && !disable_nx) {
16501+ rdmsr(MSR_EFER, l, h);
16502+ l |= EFER_NX;
16503+ wrmsr(MSR_EFER, l, h);
16504+ nx_enabled = 1;
16505+ __supported_pte_mask |= _PAGE_NX;
16506+ }
16507+ }
16508+}
16509+
16510+/*
16511+ * Enables/disables executability of a given kernel page and
16512+ * returns the previous setting.
16513+ */
16514+int __init set_kernel_exec(unsigned long vaddr, int enable)
16515+{
16516+ pte_t *pte;
16517+ int ret = 1;
16518+
16519+ if (!nx_enabled)
16520+ goto out;
16521+
16522+ pte = lookup_address(vaddr);
16523+ BUG_ON(!pte);
16524+
16525+ if (!pte_exec_kernel(*pte))
16526+ ret = 0;
16527+
16528+ if (enable)
16529+ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
16530+ else
16531+ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
16532+ __flush_tlb_all();
16533+out:
16534+ return ret;
16535+}
16536+
16537+#endif
16538+
16539+/*
16540+ * paging_init() sets up the page tables - note that the first 8MB are
16541+ * already mapped by head.S.
16542+ *
16543+ * This routines also unmaps the page at virtual kernel address 0, so
16544+ * that we can trap those pesky NULL-reference errors in the kernel.
16545+ */
16546+void __init paging_init(void)
16547+{
16548+ int i;
16549+
16550+#ifdef CONFIG_X86_PAE
16551+ set_nx();
16552+ if (nx_enabled)
16553+ printk("NX (Execute Disable) protection: active\n");
16554+#endif
16555+
16556+ pagetable_init();
16557+
16558+#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
16559+ /*
16560+ * We will bail out later - printk doesn't work right now so
16561+ * the user would just see a hanging kernel.
16562+ * when running as xen domain we are already in PAE mode at
16563+ * this point.
16564+ */
16565+ if (cpu_has_pae)
16566+ set_in_cr4(X86_CR4_PAE);
16567+#endif
16568+ __flush_tlb_all();
16569+
16570+ kmap_init();
16571+
16572+ /* Switch to the real shared_info page, and clear the
16573+ * dummy page. */
16574+ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
16575+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
16576+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
16577+
16578+ /* Setup mapping of lower 1st MB */
16579+ for (i = 0; i < NR_FIX_ISAMAPS; i++)
16580+ if (is_initial_xendomain())
16581+ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
16582+ else
16583+ __set_fixmap(FIX_ISAMAP_BEGIN - i,
16584+ virt_to_machine(empty_zero_page),
16585+ PAGE_KERNEL_RO);
16586+}
16587+
16588+/*
16589+ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
16590+ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
16591+ * used to involve black magic jumps to work around some nasty CPU bugs,
16592+ * but fortunately the switch to using exceptions got rid of all that.
16593+ */
16594+
16595+static void __init test_wp_bit(void)
16596+{
16597+ printk("Checking if this processor honours the WP bit even in supervisor mode... ");
16598+
16599+ /* Any page-aligned address will do, the test is non-destructive */
16600+ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
16601+ boot_cpu_data.wp_works_ok = do_test_wp_bit();
16602+ clear_fixmap(FIX_WP_TEST);
16603+
16604+ if (!boot_cpu_data.wp_works_ok) {
16605+ printk("No.\n");
16606+#ifdef CONFIG_X86_WP_WORKS_OK
16607+ panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
16608+#endif
16609+ } else {
16610+ printk("Ok.\n");
16611+ }
16612+}
16613+
16614+static void __init set_max_mapnr_init(void)
16615+{
16616+#ifdef CONFIG_HIGHMEM
16617+ num_physpages = highend_pfn;
16618+#else
16619+ num_physpages = max_low_pfn;
16620+#endif
16621+#ifdef CONFIG_FLATMEM
16622+ max_mapnr = num_physpages;
16623+#endif
16624+}
16625+
16626+static struct kcore_list kcore_mem, kcore_vmalloc;
16627+
16628+void __init mem_init(void)
16629+{
16630+ extern int ppro_with_ram_bug(void);
16631+ int codesize, reservedpages, datasize, initsize;
16632+ int tmp;
16633+ int bad_ppro;
16634+ unsigned long pfn;
16635+
16636+#if defined(CONFIG_SWIOTLB)
16637+ swiotlb_init();
16638+#endif
16639+
16640+#ifdef CONFIG_FLATMEM
16641+ if (!mem_map)
16642+ BUG();
16643+#endif
16644+
16645+ bad_ppro = ppro_with_ram_bug();
16646+
16647+#ifdef CONFIG_HIGHMEM
16648+ /* check that fixmap and pkmap do not overlap */
16649+ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
16650+ printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
16651+ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
16652+ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
16653+ BUG();
16654+ }
16655+#endif
16656+
16657+ set_max_mapnr_init();
16658+
16659+#ifdef CONFIG_HIGHMEM
16660+ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
16661+#else
16662+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
16663+#endif
16664+ printk("vmalloc area: %lx-%lx, maxmem %lx\n",
16665+ VMALLOC_START,VMALLOC_END,MAXMEM);
16666+ BUG_ON(VMALLOC_START > VMALLOC_END);
16667+
16668+ /* this will put all low memory onto the freelists */
16669+ totalram_pages += free_all_bootmem();
16670+ /* XEN: init and count low-mem pages outside initial allocation. */
16671+ for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
16672+ ClearPageReserved(pfn_to_page(pfn));
16673+ init_page_count(pfn_to_page(pfn));
16674+ totalram_pages++;
16675+ }
16676+
16677+ reservedpages = 0;
16678+ for (tmp = 0; tmp < max_low_pfn; tmp++)
16679+ /*
16680+ * Only count reserved RAM pages
16681+ */
16682+ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
16683+ reservedpages++;
16684+
16685+ set_highmem_pages_init(bad_ppro);
16686+
16687+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
16688+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
16689+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
16690+
16691+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
16692+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
16693+ VMALLOC_END-VMALLOC_START);
16694+
16695+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
16696+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
16697+ num_physpages << (PAGE_SHIFT-10),
16698+ codesize >> 10,
16699+ reservedpages << (PAGE_SHIFT-10),
16700+ datasize >> 10,
16701+ initsize >> 10,
16702+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
16703+ );
16704+
16705+#ifdef CONFIG_X86_PAE
16706+ if (!cpu_has_pae)
16707+ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
16708+#endif
16709+ if (boot_cpu_data.wp_works_ok < 0)
16710+ test_wp_bit();
16711+
16712+ /*
16713+ * Subtle. SMP is doing it's boot stuff late (because it has to
16714+ * fork idle threads) - but it also needs low mappings for the
16715+ * protected-mode entry to work. We zap these entries only after
16716+ * the WP-bit has been tested.
16717+ */
16718+#ifndef CONFIG_SMP
16719+ zap_low_mappings();
16720+#endif
16721+
16722+ set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
16723+}
16724+
16725+/*
16726+ * this is for the non-NUMA, single node SMP system case.
16727+ * Specifically, in the case of x86, we will always add
16728+ * memory to the highmem for now.
16729+ */
16730+#ifdef CONFIG_MEMORY_HOTPLUG
16731+#ifndef CONFIG_NEED_MULTIPLE_NODES
16732+int arch_add_memory(int nid, u64 start, u64 size)
16733+{
16734+ struct pglist_data *pgdata = &contig_page_data;
16735+ struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
16736+ unsigned long start_pfn = start >> PAGE_SHIFT;
16737+ unsigned long nr_pages = size >> PAGE_SHIFT;
16738+
16739+ return __add_pages(zone, start_pfn, nr_pages);
16740+}
16741+
16742+int remove_memory(u64 start, u64 size)
16743+{
16744+ return -EINVAL;
16745+}
16746+#endif
16747+#endif
16748+
16749+kmem_cache_t *pgd_cache;
16750+kmem_cache_t *pmd_cache;
16751+
16752+void __init pgtable_cache_init(void)
16753+{
16754+ if (PTRS_PER_PMD > 1) {
16755+ pmd_cache = kmem_cache_create("pmd",
16756+ PTRS_PER_PMD*sizeof(pmd_t),
16757+ PTRS_PER_PMD*sizeof(pmd_t),
16758+ 0,
16759+ pmd_ctor,
16760+ NULL);
16761+ if (!pmd_cache)
16762+ panic("pgtable_cache_init(): cannot create pmd cache");
16763+ }
16764+ pgd_cache = kmem_cache_create("pgd",
16765+#ifndef CONFIG_XEN
16766+ PTRS_PER_PGD*sizeof(pgd_t),
16767+ PTRS_PER_PGD*sizeof(pgd_t),
16768+#else
16769+ PAGE_SIZE,
16770+ PAGE_SIZE,
16771+#endif
16772+ 0,
16773+ pgd_ctor,
16774+ PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
16775+ if (!pgd_cache)
16776+ panic("pgtable_cache_init(): Cannot create pgd cache");
16777+}
16778+
16779+/*
16780+ * This function cannot be __init, since exceptions don't work in that
16781+ * section. Put this after the callers, so that it cannot be inlined.
16782+ */
16783+static int noinline do_test_wp_bit(void)
16784+{
16785+ char tmp_reg;
16786+ int flag;
16787+
16788+ __asm__ __volatile__(
16789+ " movb %0,%1 \n"
16790+ "1: movb %1,%0 \n"
16791+ " xorl %2,%2 \n"
16792+ "2: \n"
16793+ ".section __ex_table,\"a\"\n"
16794+ " .align 4 \n"
16795+ " .long 1b,2b \n"
16796+ ".previous \n"
16797+ :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
16798+ "=q" (tmp_reg),
16799+ "=r" (flag)
16800+ :"2" (1)
16801+ :"memory");
16802+
16803+ return flag;
16804+}
16805+
16806+#ifdef CONFIG_DEBUG_RODATA
16807+
16808+void mark_rodata_ro(void)
16809+{
16810+ unsigned long addr = (unsigned long)__start_rodata;
16811+
16812+ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
16813+ change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
16814+
16815+ printk("Write protecting the kernel read-only data: %uk\n",
16816+ (__end_rodata - __start_rodata) >> 10);
16817+
16818+ /*
16819+ * change_page_attr() requires a global_flush_tlb() call after it.
16820+ * We do this after the printk so that if something went wrong in the
16821+ * change, the printk gets out at least to give a better debug hint
16822+ * of who is the culprit.
16823+ */
16824+ global_flush_tlb();
16825+}
16826+#endif
16827+
16828+void free_init_pages(char *what, unsigned long begin, unsigned long end)
16829+{
16830+ unsigned long addr;
16831+
16832+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
16833+ ClearPageReserved(virt_to_page(addr));
16834+ init_page_count(virt_to_page(addr));
16835+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
16836+ free_page(addr);
16837+ totalram_pages++;
16838+ }
16839+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
16840+}
16841+
16842+void free_initmem(void)
16843+{
16844+ free_init_pages("unused kernel memory",
16845+ (unsigned long)(&__init_begin),
16846+ (unsigned long)(&__init_end));
16847+}
16848+
16849+#ifdef CONFIG_BLK_DEV_INITRD
16850+void free_initrd_mem(unsigned long start, unsigned long end)
16851+{
16852+ free_init_pages("initrd memory", start, end);
16853+}
16854+#endif
16855+
16856Index: head-2008-11-25/arch/x86/mm/ioremap_32-xen.c
16857===================================================================
16858--- /dev/null 1970-01-01 00:00:00.000000000 +0000
16859+++ head-2008-11-25/arch/x86/mm/ioremap_32-xen.c 2008-04-02 12:34:02.000000000 +0200
16860@@ -0,0 +1,443 @@
16861+/*
16862+ * arch/i386/mm/ioremap.c
16863+ *
16864+ * Re-map IO memory to kernel address space so that we can access it.
16865+ * This is needed for high PCI addresses that aren't mapped in the
16866+ * 640k-1MB IO memory area on PC's
16867+ *
16868+ * (C) Copyright 1995 1996 Linus Torvalds
16869+ */
16870+
16871+#include <linux/vmalloc.h>
16872+#include <linux/init.h>
16873+#include <linux/slab.h>
16874+#include <linux/module.h>
16875+#include <asm/io.h>
16876+#include <asm/fixmap.h>
16877+#include <asm/cacheflush.h>
16878+#include <asm/tlbflush.h>
16879+#include <asm/pgtable.h>
16880+#include <asm/pgalloc.h>
16881+
16882+#define ISA_START_ADDRESS 0x0
16883+#define ISA_END_ADDRESS 0x100000
16884+
16885+static int direct_remap_area_pte_fn(pte_t *pte,
16886+ struct page *pmd_page,
16887+ unsigned long address,
16888+ void *data)
16889+{
16890+ mmu_update_t **v = (mmu_update_t **)data;
16891+
16892+ BUG_ON(!pte_none(*pte));
16893+
16894+ (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) <<
16895+ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
16896+ (*v)++;
16897+
16898+ return 0;
16899+}
16900+
16901+static int __direct_remap_pfn_range(struct mm_struct *mm,
16902+ unsigned long address,
16903+ unsigned long mfn,
16904+ unsigned long size,
16905+ pgprot_t prot,
16906+ domid_t domid)
16907+{
16908+ int rc;
16909+ unsigned long i, start_address;
16910+ mmu_update_t *u, *v, *w;
16911+
16912+ u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
16913+ if (u == NULL)
16914+ return -ENOMEM;
16915+
16916+ start_address = address;
16917+
16918+ flush_cache_all();
16919+
16920+ for (i = 0; i < size; i += PAGE_SIZE) {
16921+ if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
16922+ /* Flush a full batch after filling in the PTE ptrs. */
16923+ rc = apply_to_page_range(mm, start_address,
16924+ address - start_address,
16925+ direct_remap_area_pte_fn, &w);
16926+ if (rc)
16927+ goto out;
16928+ rc = -EFAULT;
16929+ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
16930+ goto out;
16931+ v = w = u;
16932+ start_address = address;
16933+ }
16934+
16935+ /*
16936+ * Fill in the machine address: PTE ptr is done later by
16937+ * apply_to_page_range().
16938+ */
16939+ v->val = __pte_val(pfn_pte_ma(mfn, prot)) | _PAGE_IO;
16940+
16941+ mfn++;
16942+ address += PAGE_SIZE;
16943+ v++;
16944+ }
16945+
16946+ if (v != u) {
16947+ /* Final batch. */
16948+ rc = apply_to_page_range(mm, start_address,
16949+ address - start_address,
16950+ direct_remap_area_pte_fn, &w);
16951+ if (rc)
16952+ goto out;
16953+ rc = -EFAULT;
16954+ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
16955+ goto out;
16956+ }
16957+
16958+ rc = 0;
16959+
16960+ out:
16961+ flush_tlb_all();
16962+
16963+ free_page((unsigned long)u);
16964+
16965+ return rc;
16966+}
16967+
16968+int direct_remap_pfn_range(struct vm_area_struct *vma,
16969+ unsigned long address,
16970+ unsigned long mfn,
16971+ unsigned long size,
16972+ pgprot_t prot,
16973+ domid_t domid)
16974+{
16975+ if (xen_feature(XENFEAT_auto_translated_physmap))
16976+ return remap_pfn_range(vma, address, mfn, size, prot);
16977+
16978+ if (domid == DOMID_SELF)
16979+ return -EINVAL;
16980+
16981+ vma->vm_flags |= VM_IO | VM_RESERVED;
16982+
16983+ vma->vm_mm->context.has_foreign_mappings = 1;
16984+
16985+ return __direct_remap_pfn_range(
16986+ vma->vm_mm, address, mfn, size, prot, domid);
16987+}
16988+EXPORT_SYMBOL(direct_remap_pfn_range);
16989+
16990+int direct_kernel_remap_pfn_range(unsigned long address,
16991+ unsigned long mfn,
16992+ unsigned long size,
16993+ pgprot_t prot,
16994+ domid_t domid)
16995+{
16996+ return __direct_remap_pfn_range(
16997+ &init_mm, address, mfn, size, prot, domid);
16998+}
16999+EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
17000+
17001+static int lookup_pte_fn(
17002+ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
17003+{
17004+ uint64_t *ptep = (uint64_t *)data;
17005+ if (ptep)
17006+ *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) <<
17007+ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
17008+ return 0;
17009+}
17010+
17011+int create_lookup_pte_addr(struct mm_struct *mm,
17012+ unsigned long address,
17013+ uint64_t *ptep)
17014+{
17015+ return apply_to_page_range(mm, address, PAGE_SIZE,
17016+ lookup_pte_fn, ptep);
17017+}
17018+
17019+EXPORT_SYMBOL(create_lookup_pte_addr);
17020+
17021+static int noop_fn(
17022+ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
17023+{
17024+ return 0;
17025+}
17026+
17027+int touch_pte_range(struct mm_struct *mm,
17028+ unsigned long address,
17029+ unsigned long size)
17030+{
17031+ return apply_to_page_range(mm, address, size, noop_fn, NULL);
17032+}
17033+
17034+EXPORT_SYMBOL(touch_pte_range);
17035+
17036+/*
17037+ * Does @address reside within a non-highmem page that is local to this virtual
17038+ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
17039+ * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
17040+ * why this works.
17041+ */
17042+static inline int is_local_lowmem(unsigned long address)
17043+{
17044+ extern unsigned long max_low_pfn;
17045+ return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
17046+}
17047+
17048+/*
17049+ * Generic mapping function (not visible outside):
17050+ */
17051+
17052+/*
17053+ * Remap an arbitrary physical address space into the kernel virtual
17054+ * address space. Needed when the kernel wants to access high addresses
17055+ * directly.
17056+ *
17057+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
17058+ * have to convert them into an offset in a page-aligned mapping, but the
17059+ * caller shouldn't need to know that small detail.
17060+ */
17061+void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
17062+{
17063+ void __iomem * addr;
17064+ struct vm_struct * area;
17065+ unsigned long offset, last_addr;
17066+ domid_t domid = DOMID_IO;
17067+
17068+ /* Don't allow wraparound or zero size */
17069+ last_addr = phys_addr + size - 1;
17070+ if (!size || last_addr < phys_addr)
17071+ return NULL;
17072+
17073+ /*
17074+ * Don't remap the low PCI/ISA area, it's always mapped..
17075+ */
17076+ if (is_initial_xendomain() &&
17077+ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
17078+ return (void __iomem *) isa_bus_to_virt(phys_addr);
17079+
17080+ /*
17081+ * Don't allow anybody to remap normal RAM that we're using..
17082+ */
17083+ if (is_local_lowmem(phys_addr)) {
17084+ char *t_addr, *t_end;
17085+ struct page *page;
17086+
17087+ t_addr = bus_to_virt(phys_addr);
17088+ t_end = t_addr + (size - 1);
17089+
17090+ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
17091+ if(!PageReserved(page))
17092+ return NULL;
17093+
17094+ domid = DOMID_SELF;
17095+ }
17096+
17097+ /*
17098+ * Mappings have to be page-aligned
17099+ */
17100+ offset = phys_addr & ~PAGE_MASK;
17101+ phys_addr &= PAGE_MASK;
17102+ size = PAGE_ALIGN(last_addr+1) - phys_addr;
17103+
17104+ /*
17105+ * Ok, go for it..
17106+ */
17107+ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
17108+ if (!area)
17109+ return NULL;
17110+ area->phys_addr = phys_addr;
17111+ addr = (void __iomem *) area->addr;
17112+ flags |= _KERNPG_TABLE;
17113+ if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
17114+ phys_addr>>PAGE_SHIFT,
17115+ size, __pgprot(flags), domid)) {
17116+ vunmap((void __force *) addr);
17117+ return NULL;
17118+ }
17119+ return (void __iomem *) (offset + (char __iomem *)addr);
17120+}
17121+EXPORT_SYMBOL(__ioremap);
17122+
17123+/**
17124+ * ioremap_nocache - map bus memory into CPU space
17125+ * @offset: bus address of the memory
17126+ * @size: size of the resource to map
17127+ *
17128+ * ioremap_nocache performs a platform specific sequence of operations to
17129+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
17130+ * writew/writel functions and the other mmio helpers. The returned
17131+ * address is not guaranteed to be usable directly as a virtual
17132+ * address.
17133+ *
17134+ * This version of ioremap ensures that the memory is marked uncachable
17135+ * on the CPU as well as honouring existing caching rules from things like
17136+ * the PCI bus. Note that there are other caches and buffers on many
17137+ * busses. In particular driver authors should read up on PCI writes
17138+ *
17139+ * It's useful if some control registers are in such an area and
17140+ * write combining or read caching is not desirable:
17141+ *
17142+ * Must be freed with iounmap.
17143+ */
17144+
17145+void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
17146+{
17147+ unsigned long last_addr;
17148+ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
17149+ if (!p)
17150+ return p;
17151+
17152+ /* Guaranteed to be > phys_addr, as per __ioremap() */
17153+ last_addr = phys_addr + size - 1;
17154+
17155+ if (is_local_lowmem(last_addr)) {
17156+ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
17157+ unsigned long npages;
17158+
17159+ phys_addr &= PAGE_MASK;
17160+
17161+ /* This might overflow and become zero.. */
17162+ last_addr = PAGE_ALIGN(last_addr);
17163+
17164+ /* .. but that's ok, because modulo-2**n arithmetic will make
17165+ * the page-aligned "last - first" come out right.
17166+ */
17167+ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
17168+
17169+ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
17170+ iounmap(p);
17171+ p = NULL;
17172+ }
17173+ global_flush_tlb();
17174+ }
17175+
17176+ return p;
17177+}
17178+EXPORT_SYMBOL(ioremap_nocache);
17179+
17180+/**
17181+ * iounmap - Free a IO remapping
17182+ * @addr: virtual address from ioremap_*
17183+ *
17184+ * Caller must ensure there is only one unmapping for the same pointer.
17185+ */
17186+void iounmap(volatile void __iomem *addr)
17187+{
17188+ struct vm_struct *p, *o;
17189+
17190+ if ((void __force *)addr <= high_memory)
17191+ return;
17192+
17193+ /*
17194+ * __ioremap special-cases the PCI/ISA range by not instantiating a
17195+ * vm_area and by simply returning an address into the kernel mapping
17196+ * of ISA space. So handle that here.
17197+ */
17198+ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
17199+ return;
17200+
17201+ addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
17202+
17203+ /* Use the vm area unlocked, assuming the caller
17204+ ensures there isn't another iounmap for the same address
17205+ in parallel. Reuse of the virtual address is prevented by
17206+ leaving it in the global lists until we're done with it.
17207+ cpa takes care of the direct mappings. */
17208+ read_lock(&vmlist_lock);
17209+ for (p = vmlist; p; p = p->next) {
17210+ if (p->addr == addr)
17211+ break;
17212+ }
17213+ read_unlock(&vmlist_lock);
17214+
17215+ if (!p) {
17216+ printk("iounmap: bad address %p\n", addr);
17217+ dump_stack();
17218+ return;
17219+ }
17220+
17221+ /* Reset the direct mapping. Can block */
17222+ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
17223+ /* p->size includes the guard page, but cpa doesn't like that */
17224+ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
17225+ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
17226+ PAGE_KERNEL);
17227+ global_flush_tlb();
17228+ }
17229+
17230+ /* Finally remove it */
17231+ o = remove_vm_area((void *)addr);
17232+ BUG_ON(p != o || o == NULL);
17233+ kfree(p);
17234+}
17235+EXPORT_SYMBOL(iounmap);
17236+
17237+void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
17238+{
17239+ unsigned long offset, last_addr;
17240+ unsigned int nrpages;
17241+ enum fixed_addresses idx;
17242+
17243+ /* Don't allow wraparound or zero size */
17244+ last_addr = phys_addr + size - 1;
17245+ if (!size || last_addr < phys_addr)
17246+ return NULL;
17247+
17248+ /*
17249+ * Don't remap the low PCI/ISA area, it's always mapped..
17250+ */
17251+ if (is_initial_xendomain() &&
17252+ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
17253+ return isa_bus_to_virt(phys_addr);
17254+
17255+ /*
17256+ * Mappings have to be page-aligned
17257+ */
17258+ offset = phys_addr & ~PAGE_MASK;
17259+ phys_addr &= PAGE_MASK;
17260+ size = PAGE_ALIGN(last_addr) - phys_addr;
17261+
17262+ /*
17263+ * Mappings have to fit in the FIX_BTMAP area.
17264+ */
17265+ nrpages = size >> PAGE_SHIFT;
17266+ if (nrpages > NR_FIX_BTMAPS)
17267+ return NULL;
17268+
17269+ /*
17270+ * Ok, go for it..
17271+ */
17272+ idx = FIX_BTMAP_BEGIN;
17273+ while (nrpages > 0) {
17274+ set_fixmap(idx, phys_addr);
17275+ phys_addr += PAGE_SIZE;
17276+ --idx;
17277+ --nrpages;
17278+ }
17279+ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
17280+}
17281+
17282+void __init bt_iounmap(void *addr, unsigned long size)
17283+{
17284+ unsigned long virt_addr;
17285+ unsigned long offset;
17286+ unsigned int nrpages;
17287+ enum fixed_addresses idx;
17288+
17289+ virt_addr = (unsigned long)addr;
17290+ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
17291+ return;
17292+ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
17293+ return;
17294+ offset = virt_addr & ~PAGE_MASK;
17295+ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
17296+
17297+ idx = FIX_BTMAP_BEGIN;
17298+ while (nrpages > 0) {
17299+ clear_fixmap(idx);
17300+ --idx;
17301+ --nrpages;
17302+ }
17303+}
17304Index: head-2008-11-25/arch/x86/mm/pgtable_32-xen.c
17305===================================================================
17306--- /dev/null 1970-01-01 00:00:00.000000000 +0000
17307+++ head-2008-11-25/arch/x86/mm/pgtable_32-xen.c 2007-10-09 11:48:25.000000000 +0200
17308@@ -0,0 +1,725 @@
17309+/*
17310+ * linux/arch/i386/mm/pgtable.c
17311+ */
17312+
17313+#include <linux/sched.h>
17314+#include <linux/kernel.h>
17315+#include <linux/errno.h>
17316+#include <linux/mm.h>
17317+#include <linux/swap.h>
17318+#include <linux/smp.h>
17319+#include <linux/highmem.h>
17320+#include <linux/slab.h>
17321+#include <linux/pagemap.h>
17322+#include <linux/spinlock.h>
17323+#include <linux/module.h>
17324+
17325+#include <asm/system.h>
17326+#include <asm/pgtable.h>
17327+#include <asm/pgalloc.h>
17328+#include <asm/fixmap.h>
17329+#include <asm/e820.h>
17330+#include <asm/tlb.h>
17331+#include <asm/tlbflush.h>
17332+#include <asm/io.h>
17333+#include <asm/mmu_context.h>
17334+
17335+#include <xen/features.h>
17336+#include <asm/hypervisor.h>
17337+
17338+static void pgd_test_and_unpin(pgd_t *pgd);
17339+
17340+void show_mem(void)
17341+{
17342+ int total = 0, reserved = 0;
17343+ int shared = 0, cached = 0;
17344+ int highmem = 0;
17345+ struct page *page;
17346+ pg_data_t *pgdat;
17347+ unsigned long i;
17348+ unsigned long flags;
17349+
17350+ printk(KERN_INFO "Mem-info:\n");
17351+ show_free_areas();
17352+ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
17353+ for_each_online_pgdat(pgdat) {
17354+ pgdat_resize_lock(pgdat, &flags);
17355+ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
17356+ page = pgdat_page_nr(pgdat, i);
17357+ total++;
17358+ if (PageHighMem(page))
17359+ highmem++;
17360+ if (PageReserved(page))
17361+ reserved++;
17362+ else if (PageSwapCache(page))
17363+ cached++;
17364+ else if (page_count(page))
17365+ shared += page_count(page) - 1;
17366+ }
17367+ pgdat_resize_unlock(pgdat, &flags);
17368+ }
17369+ printk(KERN_INFO "%d pages of RAM\n", total);
17370+ printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
17371+ printk(KERN_INFO "%d reserved pages\n", reserved);
17372+ printk(KERN_INFO "%d pages shared\n", shared);
17373+ printk(KERN_INFO "%d pages swap cached\n", cached);
17374+
17375+ printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
17376+ printk(KERN_INFO "%lu pages writeback\n",
17377+ global_page_state(NR_WRITEBACK));
17378+ printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
17379+ printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
17380+ printk(KERN_INFO "%lu pages pagetables\n",
17381+ global_page_state(NR_PAGETABLE));
17382+}
17383+
17384+/*
17385+ * Associate a large virtual page frame with a given physical page frame
17386+ * and protection flags for that frame. pfn is for the base of the page,
17387+ * vaddr is what the page gets mapped to - both must be properly aligned.
17388+ * The pmd must already be instantiated. Assumes PAE mode.
17389+ */
17390+void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
17391+{
17392+ pgd_t *pgd;
17393+ pud_t *pud;
17394+ pmd_t *pmd;
17395+
17396+ if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
17397+ printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
17398+ return; /* BUG(); */
17399+ }
17400+ if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
17401+ printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
17402+ return; /* BUG(); */
17403+ }
17404+ pgd = swapper_pg_dir + pgd_index(vaddr);
17405+ if (pgd_none(*pgd)) {
17406+ printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
17407+ return; /* BUG(); */
17408+ }
17409+ pud = pud_offset(pgd, vaddr);
17410+ pmd = pmd_offset(pud, vaddr);
17411+ set_pmd(pmd, pfn_pmd(pfn, flags));
17412+ /*
17413+ * It's enough to flush this one mapping.
17414+ * (PGE mappings get flushed as well)
17415+ */
17416+ __flush_tlb_one(vaddr);
17417+}
17418+
17419+static int nr_fixmaps = 0;
17420+unsigned long hypervisor_virt_start = HYPERVISOR_VIRT_START;
17421+unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
17422+EXPORT_SYMBOL(__FIXADDR_TOP);
17423+
17424+void __init set_fixaddr_top(unsigned long top)
17425+{
17426+ BUG_ON(nr_fixmaps > 0);
17427+ hypervisor_virt_start = top;
17428+ __FIXADDR_TOP = hypervisor_virt_start - 2 * PAGE_SIZE;
17429+}
17430+
17431+void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
17432+{
17433+ unsigned long address = __fix_to_virt(idx);
17434+ pte_t pte;
17435+
17436+ if (idx >= __end_of_fixed_addresses) {
17437+ BUG();
17438+ return;
17439+ }
17440+ switch (idx) {
17441+ case FIX_WP_TEST:
17442+ case FIX_VDSO:
17443+ pte = pfn_pte(phys >> PAGE_SHIFT, flags);
17444+ break;
17445+ default:
17446+ pte = pfn_pte_ma(phys >> PAGE_SHIFT, flags);
17447+ break;
17448+ }
17449+ if (HYPERVISOR_update_va_mapping(address, pte,
17450+ UVMF_INVLPG|UVMF_ALL))
17451+ BUG();
17452+ nr_fixmaps++;
17453+}
17454+
17455+pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
17456+{
17457+ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
17458+ if (pte)
17459+ make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
17460+ return pte;
17461+}
17462+
17463+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
17464+{
17465+ struct page *pte;
17466+
17467+#ifdef CONFIG_HIGHPTE
17468+ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
17469+#else
17470+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
17471+#endif
17472+ if (pte) {
17473+ SetPageForeign(pte, pte_free);
17474+ init_page_count(pte);
17475+ }
17476+ return pte;
17477+}
17478+
17479+void pte_free(struct page *pte)
17480+{
17481+ unsigned long pfn = page_to_pfn(pte);
17482+
17483+ if (!PageHighMem(pte)) {
17484+ unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT);
17485+
17486+ if (!pte_write(*virt_to_ptep(va)))
17487+ if (HYPERVISOR_update_va_mapping(
17488+ va, pfn_pte(pfn, PAGE_KERNEL), 0))
17489+ BUG();
17490+ } else
17491+ clear_bit(PG_pinned, &pte->flags);
17492+
17493+ ClearPageForeign(pte);
17494+ init_page_count(pte);
17495+
17496+ __free_page(pte);
17497+}
17498+
17499+void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
17500+{
17501+ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
17502+}
17503+
17504+/*
17505+ * List of all pgd's needed for non-PAE so it can invalidate entries
17506+ * in both cached and uncached pgd's; not needed for PAE since the
17507+ * kernel pmd is shared. If PAE were not to share the pmd a similar
17508+ * tactic would be needed. This is essentially codepath-based locking
17509+ * against pageattr.c; it is the unique case in which a valid change
17510+ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
17511+ * vmalloc faults work because attached pagetables are never freed.
17512+ * The locking scheme was chosen on the basis of manfred's
17513+ * recommendations and having no core impact whatsoever.
17514+ * -- wli
17515+ */
17516+DEFINE_SPINLOCK(pgd_lock);
17517+struct page *pgd_list;
17518+
17519+static inline void pgd_list_add(pgd_t *pgd)
17520+{
17521+ struct page *page = virt_to_page(pgd);
17522+ page->index = (unsigned long)pgd_list;
17523+ if (pgd_list)
17524+ set_page_private(pgd_list, (unsigned long)&page->index);
17525+ pgd_list = page;
17526+ set_page_private(page, (unsigned long)&pgd_list);
17527+}
17528+
17529+static inline void pgd_list_del(pgd_t *pgd)
17530+{
17531+ struct page *next, **pprev, *page = virt_to_page(pgd);
17532+ next = (struct page *)page->index;
17533+ pprev = (struct page **)page_private(page);
17534+ *pprev = next;
17535+ if (next)
17536+ set_page_private(next, (unsigned long)pprev);
17537+}
17538+
17539+void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
17540+{
17541+ unsigned long flags;
17542+
17543+ if (PTRS_PER_PMD > 1) {
17544+ if (HAVE_SHARED_KERNEL_PMD)
17545+ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
17546+ swapper_pg_dir + USER_PTRS_PER_PGD,
17547+ KERNEL_PGD_PTRS);
17548+ } else {
17549+ spin_lock_irqsave(&pgd_lock, flags);
17550+ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
17551+ swapper_pg_dir + USER_PTRS_PER_PGD,
17552+ KERNEL_PGD_PTRS);
17553+ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
17554+ pgd_list_add(pgd);
17555+ spin_unlock_irqrestore(&pgd_lock, flags);
17556+ }
17557+}
17558+
17559+/* never called when PTRS_PER_PMD > 1 */
17560+void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
17561+{
17562+ unsigned long flags; /* can be called from interrupt context */
17563+
17564+ spin_lock_irqsave(&pgd_lock, flags);
17565+ pgd_list_del(pgd);
17566+ spin_unlock_irqrestore(&pgd_lock, flags);
17567+
17568+ pgd_test_and_unpin(pgd);
17569+}
17570+
17571+pgd_t *pgd_alloc(struct mm_struct *mm)
17572+{
17573+ int i;
17574+ pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
17575+ pmd_t **pmd;
17576+ unsigned long flags;
17577+
17578+ pgd_test_and_unpin(pgd);
17579+
17580+ if (PTRS_PER_PMD == 1 || !pgd)
17581+ return pgd;
17582+
17583+ if (HAVE_SHARED_KERNEL_PMD) {
17584+ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
17585+ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
17586+ if (!pmd)
17587+ goto out_oom;
17588+ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
17589+ }
17590+ return pgd;
17591+ }
17592+
17593+ /*
17594+ * We can race save/restore (if we sleep during a GFP_KERNEL memory
17595+ * allocation). We therefore store virtual addresses of pmds as they
17596+ * do not change across save/restore, and poke the machine addresses
17597+ * into the pgdir under the pgd_lock.
17598+ */
17599+ pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
17600+ if (!pmd) {
17601+ kmem_cache_free(pgd_cache, pgd);
17602+ return NULL;
17603+ }
17604+
17605+ /* Allocate pmds, remember virtual addresses. */
17606+ for (i = 0; i < PTRS_PER_PGD; ++i) {
17607+ pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
17608+ if (!pmd[i])
17609+ goto out_oom;
17610+ }
17611+
17612+ spin_lock_irqsave(&pgd_lock, flags);
17613+
17614+ /* Protect against save/restore: move below 4GB under pgd_lock. */
17615+ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
17616+ int rc = xen_create_contiguous_region(
17617+ (unsigned long)pgd, 0, 32);
17618+ if (rc) {
17619+ spin_unlock_irqrestore(&pgd_lock, flags);
17620+ goto out_oom;
17621+ }
17622+ }
17623+
17624+ /* Copy kernel pmd contents and write-protect the new pmds. */
17625+ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
17626+ unsigned long v = (unsigned long)i << PGDIR_SHIFT;
17627+ pgd_t *kpgd = pgd_offset_k(v);
17628+ pud_t *kpud = pud_offset(kpgd, v);
17629+ pmd_t *kpmd = pmd_offset(kpud, v);
17630+ memcpy(pmd[i], kpmd, PAGE_SIZE);
17631+ make_lowmem_page_readonly(
17632+ pmd[i], XENFEAT_writable_page_tables);
17633+ }
17634+
17635+ /* It is safe to poke machine addresses of pmds under the pmd_lock. */
17636+ for (i = 0; i < PTRS_PER_PGD; i++)
17637+ set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i])));
17638+
17639+ /* Ensure this pgd gets picked up and pinned on save/restore. */
17640+ pgd_list_add(pgd);
17641+
17642+ spin_unlock_irqrestore(&pgd_lock, flags);
17643+
17644+ kfree(pmd);
17645+
17646+ return pgd;
17647+
17648+out_oom:
17649+ if (HAVE_SHARED_KERNEL_PMD) {
17650+ for (i--; i >= 0; i--)
17651+ kmem_cache_free(pmd_cache,
17652+ (void *)__va(pgd_val(pgd[i])-1));
17653+ } else {
17654+ for (i--; i >= 0; i--)
17655+ kmem_cache_free(pmd_cache, pmd[i]);
17656+ kfree(pmd);
17657+ }
17658+ kmem_cache_free(pgd_cache, pgd);
17659+ return NULL;
17660+}
17661+
17662+void pgd_free(pgd_t *pgd)
17663+{
17664+ int i;
17665+
17666+ /*
17667+ * After this the pgd should not be pinned for the duration of this
17668+ * function's execution. We should never sleep and thus never race:
17669+ * 1. User pmds will not become write-protected under our feet due
17670+ * to a concurrent mm_pin_all().
17671+ * 2. The machine addresses in PGD entries will not become invalid
17672+ * due to a concurrent save/restore.
17673+ */
17674+ pgd_test_and_unpin(pgd);
17675+
17676+ /* in the PAE case user pgd entries are overwritten before usage */
17677+ if (PTRS_PER_PMD > 1) {
17678+ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
17679+ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
17680+ kmem_cache_free(pmd_cache, pmd);
17681+ }
17682+
17683+ if (!HAVE_SHARED_KERNEL_PMD) {
17684+ unsigned long flags;
17685+ spin_lock_irqsave(&pgd_lock, flags);
17686+ pgd_list_del(pgd);
17687+ spin_unlock_irqrestore(&pgd_lock, flags);
17688+
17689+ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
17690+ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
17691+ make_lowmem_page_writable(
17692+ pmd, XENFEAT_writable_page_tables);
17693+ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
17694+ kmem_cache_free(pmd_cache, pmd);
17695+ }
17696+
17697+ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
17698+ xen_destroy_contiguous_region(
17699+ (unsigned long)pgd, 0);
17700+ }
17701+ }
17702+
17703+ /* in the non-PAE case, free_pgtables() clears user pgd entries */
17704+ kmem_cache_free(pgd_cache, pgd);
17705+}
17706+
17707+void make_lowmem_page_readonly(void *va, unsigned int feature)
17708+{
17709+ pte_t *pte;
17710+ int rc;
17711+
17712+ if (xen_feature(feature))
17713+ return;
17714+
17715+ pte = virt_to_ptep(va);
17716+ rc = HYPERVISOR_update_va_mapping(
17717+ (unsigned long)va, pte_wrprotect(*pte), 0);
17718+ BUG_ON(rc);
17719+}
17720+
17721+void make_lowmem_page_writable(void *va, unsigned int feature)
17722+{
17723+ pte_t *pte;
17724+ int rc;
17725+
17726+ if (xen_feature(feature))
17727+ return;
17728+
17729+ pte = virt_to_ptep(va);
17730+ rc = HYPERVISOR_update_va_mapping(
17731+ (unsigned long)va, pte_mkwrite(*pte), 0);
17732+ BUG_ON(rc);
17733+}
17734+
17735+void make_page_readonly(void *va, unsigned int feature)
17736+{
17737+ pte_t *pte;
17738+ int rc;
17739+
17740+ if (xen_feature(feature))
17741+ return;
17742+
17743+ pte = virt_to_ptep(va);
17744+ rc = HYPERVISOR_update_va_mapping(
17745+ (unsigned long)va, pte_wrprotect(*pte), 0);
17746+ if (rc) /* fallback? */
17747+ xen_l1_entry_update(pte, pte_wrprotect(*pte));
17748+ if ((unsigned long)va >= (unsigned long)high_memory) {
17749+ unsigned long pfn = pte_pfn(*pte);
17750+#ifdef CONFIG_HIGHMEM
17751+ if (pfn >= highstart_pfn)
17752+ kmap_flush_unused(); /* flush stale writable kmaps */
17753+ else
17754+#endif
17755+ make_lowmem_page_readonly(
17756+ phys_to_virt(pfn << PAGE_SHIFT), feature);
17757+ }
17758+}
17759+
17760+void make_page_writable(void *va, unsigned int feature)
17761+{
17762+ pte_t *pte;
17763+ int rc;
17764+
17765+ if (xen_feature(feature))
17766+ return;
17767+
17768+ pte = virt_to_ptep(va);
17769+ rc = HYPERVISOR_update_va_mapping(
17770+ (unsigned long)va, pte_mkwrite(*pte), 0);
17771+ if (rc) /* fallback? */
17772+ xen_l1_entry_update(pte, pte_mkwrite(*pte));
17773+ if ((unsigned long)va >= (unsigned long)high_memory) {
17774+ unsigned long pfn = pte_pfn(*pte);
17775+#ifdef CONFIG_HIGHMEM
17776+ if (pfn < highstart_pfn)
17777+#endif
17778+ make_lowmem_page_writable(
17779+ phys_to_virt(pfn << PAGE_SHIFT), feature);
17780+ }
17781+}
17782+
17783+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
17784+{
17785+ if (xen_feature(feature))
17786+ return;
17787+
17788+ while (nr-- != 0) {
17789+ make_page_readonly(va, feature);
17790+ va = (void *)((unsigned long)va + PAGE_SIZE);
17791+ }
17792+}
17793+
17794+void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
17795+{
17796+ if (xen_feature(feature))
17797+ return;
17798+
17799+ while (nr-- != 0) {
17800+ make_page_writable(va, feature);
17801+ va = (void *)((unsigned long)va + PAGE_SIZE);
17802+ }
17803+}
17804+
17805+static void _pin_lock(struct mm_struct *mm, int lock) {
17806+ if (lock)
17807+ spin_lock(&mm->page_table_lock);
17808+#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
17809+ /* While mm->page_table_lock protects us against insertions and
17810+ * removals of higher level page table pages, it doesn't protect
17811+ * against updates of pte-s. Such updates, however, require the
17812+ * pte pages to be in consistent state (unpinned+writable or
17813+ * pinned+readonly). The pinning and attribute changes, however
17814+ * cannot be done atomically, which is why such updates must be
17815+ * prevented from happening concurrently.
17816+ * Note that no pte lock can ever elsewhere be acquired nesting
17817+ * with an already acquired one in the same mm, or with the mm's
17818+ * page_table_lock already acquired, as that would break in the
17819+ * non-split case (where all these are actually resolving to the
17820+ * one page_table_lock). Thus acquiring all of them here is not
17821+ * going to result in dead locks, and the order of acquires
17822+ * doesn't matter.
17823+ */
17824+ {
17825+ pgd_t *pgd = mm->pgd;
17826+ unsigned g;
17827+
17828+ for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
17829+ pud_t *pud;
17830+ unsigned u;
17831+
17832+ if (pgd_none(*pgd))
17833+ continue;
17834+ pud = pud_offset(pgd, 0);
17835+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
17836+ pmd_t *pmd;
17837+ unsigned m;
17838+
17839+ if (pud_none(*pud))
17840+ continue;
17841+ pmd = pmd_offset(pud, 0);
17842+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
17843+ spinlock_t *ptl;
17844+
17845+ if (pmd_none(*pmd))
17846+ continue;
17847+ ptl = pte_lockptr(0, pmd);
17848+ if (lock)
17849+ spin_lock(ptl);
17850+ else
17851+ spin_unlock(ptl);
17852+ }
17853+ }
17854+ }
17855+ }
17856+#endif
17857+ if (!lock)
17858+ spin_unlock(&mm->page_table_lock);
17859+}
17860+#define pin_lock(mm) _pin_lock(mm, 1)
17861+#define pin_unlock(mm) _pin_lock(mm, 0)
17862+
17863+#define PIN_BATCH 4
17864+static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
17865+
17866+static inline unsigned int pgd_walk_set_prot(struct page *page, pgprot_t flags,
17867+ unsigned int cpu, unsigned seq)
17868+{
17869+ unsigned long pfn = page_to_pfn(page);
17870+
17871+ if (PageHighMem(page)) {
17872+ if (pgprot_val(flags) & _PAGE_RW)
17873+ clear_bit(PG_pinned, &page->flags);
17874+ else
17875+ set_bit(PG_pinned, &page->flags);
17876+ } else {
17877+ MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
17878+ (unsigned long)__va(pfn << PAGE_SHIFT),
17879+ pfn_pte(pfn, flags), 0);
17880+ if (unlikely(++seq == PIN_BATCH)) {
17881+ if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
17882+ PIN_BATCH, NULL)))
17883+ BUG();
17884+ seq = 0;
17885+ }
17886+ }
17887+
17888+ return seq;
17889+}
17890+
17891+static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
17892+{
17893+ pgd_t *pgd = pgd_base;
17894+ pud_t *pud;
17895+ pmd_t *pmd;
17896+ int g, u, m;
17897+ unsigned int cpu, seq;
17898+
17899+ if (xen_feature(XENFEAT_auto_translated_physmap))
17900+ return;
17901+
17902+ cpu = get_cpu();
17903+
17904+ for (g = 0, seq = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
17905+ if (pgd_none(*pgd))
17906+ continue;
17907+ pud = pud_offset(pgd, 0);
17908+ if (PTRS_PER_PUD > 1) /* not folded */
17909+ seq = pgd_walk_set_prot(virt_to_page(pud),flags,cpu,seq);
17910+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
17911+ if (pud_none(*pud))
17912+ continue;
17913+ pmd = pmd_offset(pud, 0);
17914+ if (PTRS_PER_PMD > 1) /* not folded */
17915+ seq = pgd_walk_set_prot(virt_to_page(pmd),flags,cpu,seq);
17916+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
17917+ if (pmd_none(*pmd))
17918+ continue;
17919+ seq = pgd_walk_set_prot(pmd_page(*pmd),flags,cpu,seq);
17920+ }
17921+ }
17922+ }
17923+
17924+ if (likely(seq != 0)) {
17925+ MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
17926+ (unsigned long)pgd_base,
17927+ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
17928+ UVMF_TLB_FLUSH);
17929+ if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
17930+ seq + 1, NULL)))
17931+ BUG();
17932+ } else if(HYPERVISOR_update_va_mapping((unsigned long)pgd_base,
17933+ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
17934+ UVMF_TLB_FLUSH))
17935+ BUG();
17936+
17937+ put_cpu();
17938+}
17939+
17940+static void __pgd_pin(pgd_t *pgd)
17941+{
17942+ pgd_walk(pgd, PAGE_KERNEL_RO);
17943+ kmap_flush_unused();
17944+ xen_pgd_pin(__pa(pgd));
17945+ set_bit(PG_pinned, &virt_to_page(pgd)->flags);
17946+}
17947+
17948+static void __pgd_unpin(pgd_t *pgd)
17949+{
17950+ xen_pgd_unpin(__pa(pgd));
17951+ pgd_walk(pgd, PAGE_KERNEL);
17952+ clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
17953+}
17954+
17955+static void pgd_test_and_unpin(pgd_t *pgd)
17956+{
17957+ if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
17958+ __pgd_unpin(pgd);
17959+}
17960+
17961+void mm_pin(struct mm_struct *mm)
17962+{
17963+ if (xen_feature(XENFEAT_writable_page_tables))
17964+ return;
17965+ pin_lock(mm);
17966+ __pgd_pin(mm->pgd);
17967+ pin_unlock(mm);
17968+}
17969+
17970+void mm_unpin(struct mm_struct *mm)
17971+{
17972+ if (xen_feature(XENFEAT_writable_page_tables))
17973+ return;
17974+ pin_lock(mm);
17975+ __pgd_unpin(mm->pgd);
17976+ pin_unlock(mm);
17977+}
17978+
17979+void mm_pin_all(void)
17980+{
17981+ struct page *page;
17982+ unsigned long flags;
17983+
17984+ if (xen_feature(XENFEAT_writable_page_tables))
17985+ return;
17986+
17987+ /*
17988+ * Allow uninterrupted access to the pgd_list. Also protects
17989+ * __pgd_pin() by disabling preemption.
17990+ * All other CPUs must be at a safe point (e.g., in stop_machine
17991+ * or offlined entirely).
17992+ */
17993+ spin_lock_irqsave(&pgd_lock, flags);
17994+ for (page = pgd_list; page; page = (struct page *)page->index) {
17995+ if (!test_bit(PG_pinned, &page->flags))
17996+ __pgd_pin((pgd_t *)page_address(page));
17997+ }
17998+ spin_unlock_irqrestore(&pgd_lock, flags);
17999+}
18000+
18001+void _arch_dup_mmap(struct mm_struct *mm)
18002+{
18003+ if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
18004+ mm_pin(mm);
18005+}
18006+
18007+void _arch_exit_mmap(struct mm_struct *mm)
18008+{
18009+ struct task_struct *tsk = current;
18010+
18011+ task_lock(tsk);
18012+
18013+ /*
18014+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
18015+ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
18016+ */
18017+ if (tsk->active_mm == mm) {
18018+ tsk->active_mm = &init_mm;
18019+ atomic_inc(&init_mm.mm_count);
18020+
18021+ switch_mm(mm, &init_mm, tsk);
18022+
18023+ atomic_dec(&mm->mm_count);
18024+ BUG_ON(atomic_read(&mm->mm_count) == 0);
18025+ }
18026+
18027+ task_unlock(tsk);
18028+
18029+ if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
18030+ (atomic_read(&mm->mm_count) == 1) &&
18031+ !mm->context.has_foreign_mappings)
18032+ mm_unpin(mm);
18033+}
18034Index: head-2008-11-25/arch/x86/oprofile/xenoprof.c
18035===================================================================
18036--- /dev/null 1970-01-01 00:00:00.000000000 +0000
18037+++ head-2008-11-25/arch/x86/oprofile/xenoprof.c 2008-01-28 12:24:19.000000000 +0100
18038@@ -0,0 +1,179 @@
18039+/**
18040+ * @file xenoprof.c
18041+ *
18042+ * @remark Copyright 2002 OProfile authors
18043+ * @remark Read the file COPYING
18044+ *
18045+ * @author John Levon <levon@movementarian.org>
18046+ *
18047+ * Modified by Aravind Menon and Jose Renato Santos for Xen
18048+ * These modifications are:
18049+ * Copyright (C) 2005 Hewlett-Packard Co.
18050+ *
18051+ * x86-specific part
18052+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
18053+ * VA Linux Systems Japan K.K.
18054+ */
18055+
18056+#include <linux/init.h>
18057+#include <linux/oprofile.h>
18058+#include <linux/sched.h>
18059+#include <asm/pgtable.h>
18060+
18061+#include <xen/driver_util.h>
18062+#include <xen/interface/xen.h>
18063+#include <xen/interface/xenoprof.h>
18064+#include <xen/xenoprof.h>
18065+#include "op_counter.h"
18066+
18067+static unsigned int num_events = 0;
18068+
18069+void __init xenoprof_arch_init_counter(struct xenoprof_init *init)
18070+{
18071+ num_events = init->num_events;
18072+ /* just in case - make sure we do not overflow event list
18073+ (i.e. counter_config list) */
18074+ if (num_events > OP_MAX_COUNTER) {
18075+ num_events = OP_MAX_COUNTER;
18076+ init->num_events = num_events;
18077+ }
18078+}
18079+
18080+void xenoprof_arch_counter(void)
18081+{
18082+ int i;
18083+ struct xenoprof_counter counter;
18084+
18085+ for (i=0; i<num_events; i++) {
18086+ counter.ind = i;
18087+ counter.count = (uint64_t)counter_config[i].count;
18088+ counter.enabled = (uint32_t)counter_config[i].enabled;
18089+ counter.event = (uint32_t)counter_config[i].event;
18090+ counter.kernel = (uint32_t)counter_config[i].kernel;
18091+ counter.user = (uint32_t)counter_config[i].user;
18092+ counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
18093+ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_counter,
18094+ &counter));
18095+ }
18096+}
18097+
18098+void xenoprof_arch_start(void)
18099+{
18100+ /* nothing */
18101+}
18102+
18103+void xenoprof_arch_stop(void)
18104+{
18105+ /* nothing */
18106+}
18107+
18108+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer * sbuf)
18109+{
18110+ if (sbuf->buffer) {
18111+ vunmap(sbuf->buffer);
18112+ sbuf->buffer = NULL;
18113+ }
18114+}
18115+
18116+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer * get_buffer,
18117+ struct xenoprof_shared_buffer * sbuf)
18118+{
18119+ int npages, ret;
18120+ struct vm_struct *area;
18121+
18122+ sbuf->buffer = NULL;
18123+ if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, get_buffer)) )
18124+ return ret;
18125+
18126+ npages = (get_buffer->bufsize * get_buffer->nbuf - 1) / PAGE_SIZE + 1;
18127+
18128+ area = alloc_vm_area(npages * PAGE_SIZE);
18129+ if (area == NULL)
18130+ return -ENOMEM;
18131+
18132+ if ( (ret = direct_kernel_remap_pfn_range(
18133+ (unsigned long)area->addr,
18134+ get_buffer->buf_gmaddr >> PAGE_SHIFT,
18135+ npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE),
18136+ DOMID_SELF)) ) {
18137+ vunmap(area->addr);
18138+ return ret;
18139+ }
18140+
18141+ sbuf->buffer = area->addr;
18142+ return ret;
18143+}
18144+
18145+int xenoprof_arch_set_passive(struct xenoprof_passive * pdomain,
18146+ struct xenoprof_shared_buffer * sbuf)
18147+{
18148+ int ret;
18149+ int npages;
18150+ struct vm_struct *area;
18151+ pgprot_t prot = __pgprot(_KERNPG_TABLE);
18152+
18153+ sbuf->buffer = NULL;
18154+ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, pdomain);
18155+ if (ret)
18156+ goto out;
18157+
18158+ npages = (pdomain->bufsize * pdomain->nbuf - 1) / PAGE_SIZE + 1;
18159+
18160+ area = alloc_vm_area(npages * PAGE_SIZE);
18161+ if (area == NULL) {
18162+ ret = -ENOMEM;
18163+ goto out;
18164+ }
18165+
18166+ ret = direct_kernel_remap_pfn_range(
18167+ (unsigned long)area->addr,
18168+ pdomain->buf_gmaddr >> PAGE_SHIFT,
18169+ npages * PAGE_SIZE, prot, DOMID_SELF);
18170+ if (ret) {
18171+ vunmap(area->addr);
18172+ goto out;
18173+ }
18174+ sbuf->buffer = area->addr;
18175+
18176+out:
18177+ return ret;
18178+}
18179+
18180+struct op_counter_config counter_config[OP_MAX_COUNTER];
18181+
18182+int xenoprof_create_files(struct super_block * sb, struct dentry * root)
18183+{
18184+ unsigned int i;
18185+
18186+ for (i = 0; i < num_events; ++i) {
18187+ struct dentry * dir;
18188+ char buf[2];
18189+
18190+ snprintf(buf, 2, "%d", i);
18191+ dir = oprofilefs_mkdir(sb, root, buf);
18192+ oprofilefs_create_ulong(sb, dir, "enabled",
18193+ &counter_config[i].enabled);
18194+ oprofilefs_create_ulong(sb, dir, "event",
18195+ &counter_config[i].event);
18196+ oprofilefs_create_ulong(sb, dir, "count",
18197+ &counter_config[i].count);
18198+ oprofilefs_create_ulong(sb, dir, "unit_mask",
18199+ &counter_config[i].unit_mask);
18200+ oprofilefs_create_ulong(sb, dir, "kernel",
18201+ &counter_config[i].kernel);
18202+ oprofilefs_create_ulong(sb, dir, "user",
18203+ &counter_config[i].user);
18204+ }
18205+
18206+ return 0;
18207+}
18208+
18209+int __init oprofile_arch_init(struct oprofile_operations * ops)
18210+{
18211+ return xenoprofile_init(ops);
18212+}
18213+
18214+void oprofile_arch_exit(void)
18215+{
18216+ xenoprofile_exit();
18217+}
18218Index: head-2008-11-25/arch/x86/pci/irq-xen.c
18219===================================================================
18220--- /dev/null 1970-01-01 00:00:00.000000000 +0000
18221+++ head-2008-11-25/arch/x86/pci/irq-xen.c 2008-03-06 08:54:32.000000000 +0100
18222@@ -0,0 +1,1211 @@
18223+/*
18224+ * Low-Level PCI Support for PC -- Routing of Interrupts
18225+ *
18226+ * (c) 1999--2000 Martin Mares <mj@ucw.cz>
18227+ */
18228+
18229+#include <linux/types.h>
18230+#include <linux/kernel.h>
18231+#include <linux/pci.h>
18232+#include <linux/init.h>
18233+#include <linux/slab.h>
18234+#include <linux/interrupt.h>
18235+#include <linux/dmi.h>
18236+#include <asm/io.h>
18237+#include <asm/smp.h>
18238+#include <asm/io_apic.h>
18239+#include <linux/irq.h>
18240+#include <linux/acpi.h>
18241+
18242+#include "pci.h"
18243+
18244+#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
18245+#define PIRQ_VERSION 0x0100
18246+
18247+static int broken_hp_bios_irq9;
18248+static int acer_tm360_irqrouting;
18249+
18250+static struct irq_routing_table *pirq_table;
18251+
18252+static int pirq_enable_irq(struct pci_dev *dev);
18253+
18254+/*
18255+ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
18256+ * Avoid using: 13, 14 and 15 (FP error and IDE).
18257+ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
18258+ */
18259+unsigned int pcibios_irq_mask = 0xfff8;
18260+
18261+static int pirq_penalty[16] = {
18262+ 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
18263+ 0, 0, 0, 0, 1000, 100000, 100000, 100000
18264+};
18265+
18266+struct irq_router {
18267+ char *name;
18268+ u16 vendor, device;
18269+ int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
18270+ int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
18271+};
18272+
18273+struct irq_router_handler {
18274+ u16 vendor;
18275+ int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
18276+};
18277+
18278+int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
18279+void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
18280+
18281+/*
18282+ * Check passed address for the PCI IRQ Routing Table signature
18283+ * and perform checksum verification.
18284+ */
18285+
18286+static inline struct irq_routing_table * pirq_check_routing_table(u8 *addr)
18287+{
18288+ struct irq_routing_table *rt;
18289+ int i;
18290+ u8 sum;
18291+
18292+ rt = (struct irq_routing_table *) addr;
18293+ if (rt->signature != PIRQ_SIGNATURE ||
18294+ rt->version != PIRQ_VERSION ||
18295+ rt->size % 16 ||
18296+ rt->size < sizeof(struct irq_routing_table))
18297+ return NULL;
18298+ sum = 0;
18299+ for (i=0; i < rt->size; i++)
18300+ sum += addr[i];
18301+ if (!sum) {
18302+ DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt);
18303+ return rt;
18304+ }
18305+ return NULL;
18306+}
18307+
18308+
18309+
18310+/*
18311+ * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
18312+ */
18313+
18314+static struct irq_routing_table * __init pirq_find_routing_table(void)
18315+{
18316+ u8 *addr;
18317+ struct irq_routing_table *rt;
18318+
18319+#ifdef CONFIG_XEN
18320+ if (!is_initial_xendomain())
18321+ return NULL;
18322+#endif
18323+ if (pirq_table_addr) {
18324+ rt = pirq_check_routing_table((u8 *) isa_bus_to_virt(pirq_table_addr));
18325+ if (rt)
18326+ return rt;
18327+ printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
18328+ }
18329+ for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
18330+ rt = pirq_check_routing_table(addr);
18331+ if (rt)
18332+ return rt;
18333+ }
18334+ return NULL;
18335+}
18336+
18337+/*
18338+ * If we have a IRQ routing table, use it to search for peer host
18339+ * bridges. It's a gross hack, but since there are no other known
18340+ * ways how to get a list of buses, we have to go this way.
18341+ */
18342+
18343+static void __init pirq_peer_trick(void)
18344+{
18345+ struct irq_routing_table *rt = pirq_table;
18346+ u8 busmap[256];
18347+ int i;
18348+ struct irq_info *e;
18349+
18350+ memset(busmap, 0, sizeof(busmap));
18351+ for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
18352+ e = &rt->slots[i];
18353+#ifdef DEBUG
18354+ {
18355+ int j;
18356+ DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
18357+ for(j=0; j<4; j++)
18358+ DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
18359+ DBG("\n");
18360+ }
18361+#endif
18362+ busmap[e->bus] = 1;
18363+ }
18364+ for(i = 1; i < 256; i++) {
18365+ if (!busmap[i] || pci_find_bus(0, i))
18366+ continue;
18367+ if (pci_scan_bus(i, &pci_root_ops, NULL))
18368+ printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
18369+ }
18370+ pcibios_last_bus = -1;
18371+}
18372+
18373+/*
18374+ * Code for querying and setting of IRQ routes on various interrupt routers.
18375+ */
18376+
18377+void eisa_set_level_irq(unsigned int irq)
18378+{
18379+ unsigned char mask = 1 << (irq & 7);
18380+ unsigned int port = 0x4d0 + (irq >> 3);
18381+ unsigned char val;
18382+ static u16 eisa_irq_mask;
18383+
18384+ if (irq >= 16 || (1 << irq) & eisa_irq_mask)
18385+ return;
18386+
18387+ eisa_irq_mask |= (1 << irq);
18388+ printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
18389+ val = inb(port);
18390+ if (!(val & mask)) {
18391+ DBG(KERN_DEBUG " -> edge");
18392+ outb(val | mask, port);
18393+ }
18394+}
18395+
18396+/*
18397+ * Common IRQ routing practice: nybbles in config space,
18398+ * offset by some magic constant.
18399+ */
18400+static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
18401+{
18402+ u8 x;
18403+ unsigned reg = offset + (nr >> 1);
18404+
18405+ pci_read_config_byte(router, reg, &x);
18406+ return (nr & 1) ? (x >> 4) : (x & 0xf);
18407+}
18408+
18409+static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
18410+{
18411+ u8 x;
18412+ unsigned reg = offset + (nr >> 1);
18413+
18414+ pci_read_config_byte(router, reg, &x);
18415+ x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
18416+ pci_write_config_byte(router, reg, x);
18417+}
18418+
18419+/*
18420+ * ALI pirq entries are damn ugly, and completely undocumented.
18421+ * This has been figured out from pirq tables, and it's not a pretty
18422+ * picture.
18423+ */
18424+static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18425+{
18426+ static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
18427+
18428+ return irqmap[read_config_nybble(router, 0x48, pirq-1)];
18429+}
18430+
18431+static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18432+{
18433+ static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
18434+ unsigned int val = irqmap[irq];
18435+
18436+ if (val) {
18437+ write_config_nybble(router, 0x48, pirq-1, val);
18438+ return 1;
18439+ }
18440+ return 0;
18441+}
18442+
18443+/*
18444+ * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
18445+ * just a pointer to the config space.
18446+ */
18447+static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18448+{
18449+ u8 x;
18450+
18451+ pci_read_config_byte(router, pirq, &x);
18452+ return (x < 16) ? x : 0;
18453+}
18454+
18455+static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18456+{
18457+ pci_write_config_byte(router, pirq, irq);
18458+ return 1;
18459+}
18460+
18461+/*
18462+ * The VIA pirq rules are nibble-based, like ALI,
18463+ * but without the ugly irq number munging.
18464+ * However, PIRQD is in the upper instead of lower 4 bits.
18465+ */
18466+static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18467+{
18468+ return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
18469+}
18470+
18471+static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18472+{
18473+ write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
18474+ return 1;
18475+}
18476+
18477+/*
18478+ * The VIA pirq rules are nibble-based, like ALI,
18479+ * but without the ugly irq number munging.
18480+ * However, for 82C586, nibble map is different .
18481+ */
18482+static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18483+{
18484+ static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
18485+ return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
18486+}
18487+
18488+static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18489+{
18490+ static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
18491+ write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
18492+ return 1;
18493+}
18494+
18495+/*
18496+ * ITE 8330G pirq rules are nibble-based
18497+ * FIXME: pirqmap may be { 1, 0, 3, 2 },
18498+ * 2+3 are both mapped to irq 9 on my system
18499+ */
18500+static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18501+{
18502+ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
18503+ return read_config_nybble(router,0x43, pirqmap[pirq-1]);
18504+}
18505+
18506+static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18507+{
18508+ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
18509+ write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
18510+ return 1;
18511+}
18512+
18513+/*
18514+ * OPTI: high four bits are nibble pointer..
18515+ * I wonder what the low bits do?
18516+ */
18517+static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18518+{
18519+ return read_config_nybble(router, 0xb8, pirq >> 4);
18520+}
18521+
18522+static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18523+{
18524+ write_config_nybble(router, 0xb8, pirq >> 4, irq);
18525+ return 1;
18526+}
18527+
18528+/*
18529+ * Cyrix: nibble offset 0x5C
18530+ * 0x5C bits 7:4 is INTB bits 3:0 is INTA
18531+ * 0x5D bits 7:4 is INTD bits 3:0 is INTC
18532+ */
18533+static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18534+{
18535+ return read_config_nybble(router, 0x5C, (pirq-1)^1);
18536+}
18537+
18538+static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18539+{
18540+ write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
18541+ return 1;
18542+}
18543+
18544+/*
18545+ * PIRQ routing for SiS 85C503 router used in several SiS chipsets.
18546+ * We have to deal with the following issues here:
18547+ * - vendors have different ideas about the meaning of link values
18548+ * - some onboard devices (integrated in the chipset) have special
18549+ * links and are thus routed differently (i.e. not via PCI INTA-INTD)
18550+ * - different revision of the router have a different layout for
18551+ * the routing registers, particularly for the onchip devices
18552+ *
18553+ * For all routing registers the common thing is we have one byte
18554+ * per routeable link which is defined as:
18555+ * bit 7 IRQ mapping enabled (0) or disabled (1)
18556+ * bits [6:4] reserved (sometimes used for onchip devices)
18557+ * bits [3:0] IRQ to map to
18558+ * allowed: 3-7, 9-12, 14-15
18559+ * reserved: 0, 1, 2, 8, 13
18560+ *
18561+ * The config-space registers located at 0x41/0x42/0x43/0x44 are
18562+ * always used to route the normal PCI INT A/B/C/D respectively.
18563+ * Apparently there are systems implementing PCI routing table using
18564+ * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
18565+ * We try our best to handle both link mappings.
18566+ *
18567+ * Currently (2003-05-21) it appears most SiS chipsets follow the
18568+ * definition of routing registers from the SiS-5595 southbridge.
18569+ * According to the SiS 5595 datasheets the revision id's of the
18570+ * router (ISA-bridge) should be 0x01 or 0xb0.
18571+ *
18572+ * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
18573+ * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
18574+ * They seem to work with the current routing code. However there is
18575+ * some concern because of the two USB-OHCI HCs (original SiS 5595
18576+ * had only one). YMMV.
18577+ *
18578+ * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
18579+ *
18580+ * 0x61: IDEIRQ:
18581+ * bits [6:5] must be written 01
18582+ * bit 4 channel-select primary (0), secondary (1)
18583+ *
18584+ * 0x62: USBIRQ:
18585+ * bit 6 OHCI function disabled (0), enabled (1)
18586+ *
18587+ * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved
18588+ *
18589+ * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved
18590+ *
18591+ * We support USBIRQ (in addition to INTA-INTD) and keep the
18592+ * IDE, ACPI and DAQ routing untouched as set by the BIOS.
18593+ *
18594+ * Currently the only reported exception is the new SiS 65x chipset
18595+ * which includes the SiS 69x southbridge. Here we have the 85C503
18596+ * router revision 0x04 and there are changes in the register layout
18597+ * mostly related to the different USB HCs with USB 2.0 support.
18598+ *
18599+ * Onchip routing for router rev-id 0x04 (try-and-error observation)
18600+ *
18601+ * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs
18602+ * bit 6-4 are probably unused, not like 5595
18603+ */
18604+
18605+#define PIRQ_SIS_IRQ_MASK 0x0f
18606+#define PIRQ_SIS_IRQ_DISABLE 0x80
18607+#define PIRQ_SIS_USB_ENABLE 0x40
18608+
18609+static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18610+{
18611+ u8 x;
18612+ int reg;
18613+
18614+ reg = pirq;
18615+ if (reg >= 0x01 && reg <= 0x04)
18616+ reg += 0x40;
18617+ pci_read_config_byte(router, reg, &x);
18618+ return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK);
18619+}
18620+
18621+static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18622+{
18623+ u8 x;
18624+ int reg;
18625+
18626+ reg = pirq;
18627+ if (reg >= 0x01 && reg <= 0x04)
18628+ reg += 0x40;
18629+ pci_read_config_byte(router, reg, &x);
18630+ x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE);
18631+ x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE;
18632+ pci_write_config_byte(router, reg, x);
18633+ return 1;
18634+}
18635+
18636+
18637+/*
18638+ * VLSI: nibble offset 0x74 - educated guess due to routing table and
18639+ * config space of VLSI 82C534 PCI-bridge/router (1004:0102)
18640+ * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
18641+ * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
18642+ * for the busbridge to the docking station.
18643+ */
18644+
18645+static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18646+{
18647+ if (pirq > 8) {
18648+ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
18649+ return 0;
18650+ }
18651+ return read_config_nybble(router, 0x74, pirq-1);
18652+}
18653+
18654+static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18655+{
18656+ if (pirq > 8) {
18657+ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
18658+ return 0;
18659+ }
18660+ write_config_nybble(router, 0x74, pirq-1, irq);
18661+ return 1;
18662+}
18663+
18664+/*
18665+ * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
18666+ * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register
18667+ * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect
18668+ * register is a straight binary coding of desired PIC IRQ (low nibble).
18669+ *
18670+ * The 'link' value in the PIRQ table is already in the correct format
18671+ * for the Index register. There are some special index values:
18672+ * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
18673+ * and 0x03 for SMBus.
18674+ */
18675+static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18676+{
18677+ outb_p(pirq, 0xc00);
18678+ return inb(0xc01) & 0xf;
18679+}
18680+
18681+static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18682+{
18683+ outb_p(pirq, 0xc00);
18684+ outb_p(irq, 0xc01);
18685+ return 1;
18686+}
18687+
18688+/* Support for AMD756 PCI IRQ Routing
18689+ * Jhon H. Caicedo <jhcaiced@osso.org.co>
18690+ * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
18691+ * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
18692+ * The AMD756 pirq rules are nibble-based
18693+ * offset 0x56 0-3 PIRQA 4-7 PIRQB
18694+ * offset 0x57 0-3 PIRQC 4-7 PIRQD
18695+ */
18696+static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
18697+{
18698+ u8 irq;
18699+ irq = 0;
18700+ if (pirq <= 4)
18701+ {
18702+ irq = read_config_nybble(router, 0x56, pirq - 1);
18703+ }
18704+ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
18705+ dev->vendor, dev->device, pirq, irq);
18706+ return irq;
18707+}
18708+
18709+static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18710+{
18711+ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
18712+ dev->vendor, dev->device, pirq, irq);
18713+ if (pirq <= 4)
18714+ {
18715+ write_config_nybble(router, 0x56, pirq - 1, irq);
18716+ }
18717+ return 1;
18718+}
18719+
18720+#ifdef CONFIG_PCI_BIOS
18721+
18722+static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
18723+{
18724+ struct pci_dev *bridge;
18725+ int pin = pci_get_interrupt_pin(dev, &bridge);
18726+ return pcibios_set_irq_routing(bridge, pin, irq);
18727+}
18728+
18729+#endif
18730+
18731+static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
18732+{
18733+ static struct pci_device_id __initdata pirq_440gx[] = {
18734+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
18735+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
18736+ { },
18737+ };
18738+
18739+ /* 440GX has a proprietary PIRQ router -- don't use it */
18740+ if (pci_dev_present(pirq_440gx))
18741+ return 0;
18742+
18743+ switch(device)
18744+ {
18745+ case PCI_DEVICE_ID_INTEL_82371FB_0:
18746+ case PCI_DEVICE_ID_INTEL_82371SB_0:
18747+ case PCI_DEVICE_ID_INTEL_82371AB_0:
18748+ case PCI_DEVICE_ID_INTEL_82371MX:
18749+ case PCI_DEVICE_ID_INTEL_82443MX_0:
18750+ case PCI_DEVICE_ID_INTEL_82801AA_0:
18751+ case PCI_DEVICE_ID_INTEL_82801AB_0:
18752+ case PCI_DEVICE_ID_INTEL_82801BA_0:
18753+ case PCI_DEVICE_ID_INTEL_82801BA_10:
18754+ case PCI_DEVICE_ID_INTEL_82801CA_0:
18755+ case PCI_DEVICE_ID_INTEL_82801CA_12:
18756+ case PCI_DEVICE_ID_INTEL_82801DB_0:
18757+ case PCI_DEVICE_ID_INTEL_82801E_0:
18758+ case PCI_DEVICE_ID_INTEL_82801EB_0:
18759+ case PCI_DEVICE_ID_INTEL_ESB_1:
18760+ case PCI_DEVICE_ID_INTEL_ICH6_0:
18761+ case PCI_DEVICE_ID_INTEL_ICH6_1:
18762+ case PCI_DEVICE_ID_INTEL_ICH7_0:
18763+ case PCI_DEVICE_ID_INTEL_ICH7_1:
18764+ case PCI_DEVICE_ID_INTEL_ICH7_30:
18765+ case PCI_DEVICE_ID_INTEL_ICH7_31:
18766+ case PCI_DEVICE_ID_INTEL_ESB2_0:
18767+ case PCI_DEVICE_ID_INTEL_ICH8_0:
18768+ case PCI_DEVICE_ID_INTEL_ICH8_1:
18769+ case PCI_DEVICE_ID_INTEL_ICH8_2:
18770+ case PCI_DEVICE_ID_INTEL_ICH8_3:
18771+ case PCI_DEVICE_ID_INTEL_ICH8_4:
18772+ case PCI_DEVICE_ID_INTEL_ICH9_0:
18773+ case PCI_DEVICE_ID_INTEL_ICH9_1:
18774+ case PCI_DEVICE_ID_INTEL_ICH9_2:
18775+ case PCI_DEVICE_ID_INTEL_ICH9_3:
18776+ case PCI_DEVICE_ID_INTEL_ICH9_4:
18777+ case PCI_DEVICE_ID_INTEL_ICH9_5:
18778+ r->name = "PIIX/ICH";
18779+ r->get = pirq_piix_get;
18780+ r->set = pirq_piix_set;
18781+ return 1;
18782+ }
18783+ return 0;
18784+}
18785+
18786+static __init int via_router_probe(struct irq_router *r,
18787+ struct pci_dev *router, u16 device)
18788+{
18789+ /* FIXME: We should move some of the quirk fixup stuff here */
18790+
18791+ /*
18792+ * work arounds for some buggy BIOSes
18793+ */
18794+ if (device == PCI_DEVICE_ID_VIA_82C586_0) {
18795+ switch(router->device) {
18796+ case PCI_DEVICE_ID_VIA_82C686:
18797+ /*
18798+ * Asus k7m bios wrongly reports 82C686A
18799+ * as 586-compatible
18800+ */
18801+ device = PCI_DEVICE_ID_VIA_82C686;
18802+ break;
18803+ case PCI_DEVICE_ID_VIA_8235:
18804+ /**
18805+ * Asus a7v-x bios wrongly reports 8235
18806+ * as 586-compatible
18807+ */
18808+ device = PCI_DEVICE_ID_VIA_8235;
18809+ break;
18810+ }
18811+ }
18812+
18813+ switch(device) {
18814+ case PCI_DEVICE_ID_VIA_82C586_0:
18815+ r->name = "VIA";
18816+ r->get = pirq_via586_get;
18817+ r->set = pirq_via586_set;
18818+ return 1;
18819+ case PCI_DEVICE_ID_VIA_82C596:
18820+ case PCI_DEVICE_ID_VIA_82C686:
18821+ case PCI_DEVICE_ID_VIA_8231:
18822+ case PCI_DEVICE_ID_VIA_8233A:
18823+ case PCI_DEVICE_ID_VIA_8235:
18824+ case PCI_DEVICE_ID_VIA_8237:
18825+ /* FIXME: add new ones for 8233/5 */
18826+ r->name = "VIA";
18827+ r->get = pirq_via_get;
18828+ r->set = pirq_via_set;
18829+ return 1;
18830+ }
18831+ return 0;
18832+}
18833+
18834+static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
18835+{
18836+ switch(device)
18837+ {
18838+ case PCI_DEVICE_ID_VLSI_82C534:
18839+ r->name = "VLSI 82C534";
18840+ r->get = pirq_vlsi_get;
18841+ r->set = pirq_vlsi_set;
18842+ return 1;
18843+ }
18844+ return 0;
18845+}
18846+
18847+
18848+static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
18849+{
18850+ switch(device)
18851+ {
18852+ case PCI_DEVICE_ID_SERVERWORKS_OSB4:
18853+ case PCI_DEVICE_ID_SERVERWORKS_CSB5:
18854+ r->name = "ServerWorks";
18855+ r->get = pirq_serverworks_get;
18856+ r->set = pirq_serverworks_set;
18857+ return 1;
18858+ }
18859+ return 0;
18860+}
18861+
18862+static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
18863+{
18864+ if (device != PCI_DEVICE_ID_SI_503)
18865+ return 0;
18866+
18867+ r->name = "SIS";
18868+ r->get = pirq_sis_get;
18869+ r->set = pirq_sis_set;
18870+ return 1;
18871+}
18872+
18873+static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
18874+{
18875+ switch(device)
18876+ {
18877+ case PCI_DEVICE_ID_CYRIX_5520:
18878+ r->name = "NatSemi";
18879+ r->get = pirq_cyrix_get;
18880+ r->set = pirq_cyrix_set;
18881+ return 1;
18882+ }
18883+ return 0;
18884+}
18885+
18886+static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
18887+{
18888+ switch(device)
18889+ {
18890+ case PCI_DEVICE_ID_OPTI_82C700:
18891+ r->name = "OPTI";
18892+ r->get = pirq_opti_get;
18893+ r->set = pirq_opti_set;
18894+ return 1;
18895+ }
18896+ return 0;
18897+}
18898+
18899+static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
18900+{
18901+ switch(device)
18902+ {
18903+ case PCI_DEVICE_ID_ITE_IT8330G_0:
18904+ r->name = "ITE";
18905+ r->get = pirq_ite_get;
18906+ r->set = pirq_ite_set;
18907+ return 1;
18908+ }
18909+ return 0;
18910+}
18911+
18912+static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
18913+{
18914+ switch(device)
18915+ {
18916+ case PCI_DEVICE_ID_AL_M1533:
18917+ case PCI_DEVICE_ID_AL_M1563:
18918+ printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n");
18919+ r->name = "ALI";
18920+ r->get = pirq_ali_get;
18921+ r->set = pirq_ali_set;
18922+ return 1;
18923+ }
18924+ return 0;
18925+}
18926+
18927+static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
18928+{
18929+ switch(device)
18930+ {
18931+ case PCI_DEVICE_ID_AMD_VIPER_740B:
18932+ r->name = "AMD756";
18933+ break;
18934+ case PCI_DEVICE_ID_AMD_VIPER_7413:
18935+ r->name = "AMD766";
18936+ break;
18937+ case PCI_DEVICE_ID_AMD_VIPER_7443:
18938+ r->name = "AMD768";
18939+ break;
18940+ default:
18941+ return 0;
18942+ }
18943+ r->get = pirq_amd756_get;
18944+ r->set = pirq_amd756_set;
18945+ return 1;
18946+}
18947+
18948+static __initdata struct irq_router_handler pirq_routers[] = {
18949+ { PCI_VENDOR_ID_INTEL, intel_router_probe },
18950+ { PCI_VENDOR_ID_AL, ali_router_probe },
18951+ { PCI_VENDOR_ID_ITE, ite_router_probe },
18952+ { PCI_VENDOR_ID_VIA, via_router_probe },
18953+ { PCI_VENDOR_ID_OPTI, opti_router_probe },
18954+ { PCI_VENDOR_ID_SI, sis_router_probe },
18955+ { PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
18956+ { PCI_VENDOR_ID_VLSI, vlsi_router_probe },
18957+ { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
18958+ { PCI_VENDOR_ID_AMD, amd_router_probe },
18959+ /* Someone with docs needs to add the ATI Radeon IGP */
18960+ { 0, NULL }
18961+};
18962+static struct irq_router pirq_router;
18963+static struct pci_dev *pirq_router_dev;
18964+
18965+
18966+/*
18967+ * FIXME: should we have an option to say "generic for
18968+ * chipset" ?
18969+ */
18970+
18971+static void __init pirq_find_router(struct irq_router *r)
18972+{
18973+ struct irq_routing_table *rt = pirq_table;
18974+ struct irq_router_handler *h;
18975+
18976+#ifdef CONFIG_PCI_BIOS
18977+ if (!rt->signature) {
18978+ printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
18979+ r->set = pirq_bios_set;
18980+ r->name = "BIOS";
18981+ return;
18982+ }
18983+#endif
18984+
18985+ /* Default unless a driver reloads it */
18986+ r->name = "default";
18987+ r->get = NULL;
18988+ r->set = NULL;
18989+
18990+ DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
18991+ rt->rtr_vendor, rt->rtr_device);
18992+
18993+ pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
18994+ if (!pirq_router_dev) {
18995+ DBG(KERN_DEBUG "PCI: Interrupt router not found at "
18996+ "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
18997+ return;
18998+ }
18999+
19000+ for( h = pirq_routers; h->vendor; h++) {
19001+ /* First look for a router match */
19002+ if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device))
19003+ break;
19004+ /* Fall back to a device match */
19005+ if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device))
19006+ break;
19007+ }
19008+ printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
19009+ pirq_router.name,
19010+ pirq_router_dev->vendor,
19011+ pirq_router_dev->device,
19012+ pci_name(pirq_router_dev));
19013+}
19014+
19015+static struct irq_info *pirq_get_info(struct pci_dev *dev)
19016+{
19017+ struct irq_routing_table *rt = pirq_table;
19018+ int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
19019+ struct irq_info *info;
19020+
19021+ for (info = rt->slots; entries--; info++)
19022+ if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
19023+ return info;
19024+ return NULL;
19025+}
19026+
19027+static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
19028+{
19029+ u8 pin;
19030+ struct irq_info *info;
19031+ int i, pirq, newirq;
19032+ int irq = 0;
19033+ u32 mask;
19034+ struct irq_router *r = &pirq_router;
19035+ struct pci_dev *dev2 = NULL;
19036+ char *msg = NULL;
19037+
19038+ /* Find IRQ pin */
19039+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
19040+ if (!pin) {
19041+ DBG(KERN_DEBUG " -> no interrupt pin\n");
19042+ return 0;
19043+ }
19044+ pin = pin - 1;
19045+
19046+ /* Find IRQ routing entry */
19047+
19048+ if (!pirq_table)
19049+ return 0;
19050+
19051+ DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin);
19052+ info = pirq_get_info(dev);
19053+ if (!info) {
19054+ DBG(" -> not found in routing table\n" KERN_DEBUG);
19055+ return 0;
19056+ }
19057+ pirq = info->irq[pin].link;
19058+ mask = info->irq[pin].bitmap;
19059+ if (!pirq) {
19060+ DBG(" -> not routed\n" KERN_DEBUG);
19061+ return 0;
19062+ }
19063+ DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
19064+ mask &= pcibios_irq_mask;
19065+
19066+ /* Work around broken HP Pavilion Notebooks which assign USB to
19067+ IRQ 9 even though it is actually wired to IRQ 11 */
19068+
19069+ if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
19070+ dev->irq = 11;
19071+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
19072+ r->set(pirq_router_dev, dev, pirq, 11);
19073+ }
19074+
19075+ /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
19076+ if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) {
19077+ pirq = 0x68;
19078+ mask = 0x400;
19079+ dev->irq = r->get(pirq_router_dev, dev, pirq);
19080+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
19081+ }
19082+
19083+ /*
19084+ * Find the best IRQ to assign: use the one
19085+ * reported by the device if possible.
19086+ */
19087+ newirq = dev->irq;
19088+ if (newirq && !((1 << newirq) & mask)) {
19089+ if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
19090+ else printk("\n" KERN_WARNING
19091+ "PCI: IRQ %i for device %s doesn't match PIRQ mask "
19092+ "- try pci=usepirqmask\n" KERN_DEBUG, newirq,
19093+ pci_name(dev));
19094+ }
19095+ if (!newirq && assign) {
19096+ for (i = 0; i < 16; i++) {
19097+ if (!(mask & (1 << i)))
19098+ continue;
19099+ if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED))
19100+ newirq = i;
19101+ }
19102+ }
19103+ DBG(" -> newirq=%d", newirq);
19104+
19105+ /* Check if it is hardcoded */
19106+ if ((pirq & 0xf0) == 0xf0) {
19107+ irq = pirq & 0xf;
19108+ DBG(" -> hardcoded IRQ %d\n", irq);
19109+ msg = "Hardcoded";
19110+ } else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
19111+ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
19112+ DBG(" -> got IRQ %d\n", irq);
19113+ msg = "Found";
19114+ eisa_set_level_irq(irq);
19115+ } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
19116+ DBG(" -> assigning IRQ %d", newirq);
19117+ if (r->set(pirq_router_dev, dev, pirq, newirq)) {
19118+ eisa_set_level_irq(newirq);
19119+ DBG(" ... OK\n");
19120+ msg = "Assigned";
19121+ irq = newirq;
19122+ }
19123+ }
19124+
19125+ if (!irq) {
19126+ DBG(" ... failed\n");
19127+ if (newirq && mask == (1 << newirq)) {
19128+ msg = "Guessed";
19129+ irq = newirq;
19130+ } else
19131+ return 0;
19132+ }
19133+ printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev));
19134+
19135+ /* Update IRQ for all devices with the same pirq value */
19136+ while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
19137+ pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
19138+ if (!pin)
19139+ continue;
19140+ pin--;
19141+ info = pirq_get_info(dev2);
19142+ if (!info)
19143+ continue;
19144+ if (info->irq[pin].link == pirq) {
19145+ /* We refuse to override the dev->irq information. Give a warning! */
19146+ if ( dev2->irq && dev2->irq != irq && \
19147+ (!(pci_probe & PCI_USE_PIRQ_MASK) || \
19148+ ((1 << dev2->irq) & mask)) ) {
19149+#ifndef CONFIG_PCI_MSI
19150+ printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
19151+ pci_name(dev2), dev2->irq, irq);
19152+#endif
19153+ continue;
19154+ }
19155+ dev2->irq = irq;
19156+ pirq_penalty[irq]++;
19157+ if (dev != dev2)
19158+ printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2));
19159+ }
19160+ }
19161+ return 1;
19162+}
19163+
19164+static void __init pcibios_fixup_irqs(void)
19165+{
19166+ struct pci_dev *dev = NULL;
19167+ u8 pin;
19168+
19169+ DBG(KERN_DEBUG "PCI: IRQ fixup\n");
19170+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
19171+ /*
19172+ * If the BIOS has set an out of range IRQ number, just ignore it.
19173+ * Also keep track of which IRQ's are already in use.
19174+ */
19175+ if (dev->irq >= 16) {
19176+ DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
19177+ dev->irq = 0;
19178+ }
19179+ /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
19180+ if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
19181+ pirq_penalty[dev->irq] = 0;
19182+ pirq_penalty[dev->irq]++;
19183+ }
19184+
19185+ dev = NULL;
19186+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
19187+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
19188+#ifdef CONFIG_X86_IO_APIC
19189+ /*
19190+ * Recalculate IRQ numbers if we use the I/O APIC.
19191+ */
19192+ if (io_apic_assign_pci_irqs)
19193+ {
19194+ int irq;
19195+
19196+ if (pin) {
19197+ pin--; /* interrupt pins are numbered starting from 1 */
19198+ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
19199+ /*
19200+ * Busses behind bridges are typically not listed in the MP-table.
19201+ * In this case we have to look up the IRQ based on the parent bus,
19202+ * parent slot, and pin number. The SMP code detects such bridged
19203+ * busses itself so we should get into this branch reliably.
19204+ */
19205+ if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
19206+ struct pci_dev * bridge = dev->bus->self;
19207+
19208+ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
19209+ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
19210+ PCI_SLOT(bridge->devfn), pin);
19211+ if (irq >= 0)
19212+ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
19213+ pci_name(bridge), 'A' + pin, irq);
19214+ }
19215+ if (irq >= 0) {
19216+ if (use_pci_vector() &&
19217+ !platform_legacy_irq(irq))
19218+ irq = IO_APIC_VECTOR(irq);
19219+
19220+ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
19221+ pci_name(dev), 'A' + pin, irq);
19222+ dev->irq = irq;
19223+ }
19224+ }
19225+ }
19226+#endif
19227+ /*
19228+ * Still no IRQ? Try to lookup one...
19229+ */
19230+ if (pin && !dev->irq)
19231+ pcibios_lookup_irq(dev, 0);
19232+ }
19233+}
19234+
19235+/*
19236+ * Work around broken HP Pavilion Notebooks which assign USB to
19237+ * IRQ 9 even though it is actually wired to IRQ 11
19238+ */
19239+static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
19240+{
19241+ if (!broken_hp_bios_irq9) {
19242+ broken_hp_bios_irq9 = 1;
19243+ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
19244+ }
19245+ return 0;
19246+}
19247+
19248+/*
19249+ * Work around broken Acer TravelMate 360 Notebooks which assign
19250+ * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
19251+ */
19252+static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
19253+{
19254+ if (!acer_tm360_irqrouting) {
19255+ acer_tm360_irqrouting = 1;
19256+ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
19257+ }
19258+ return 0;
19259+}
19260+
19261+static struct dmi_system_id __initdata pciirq_dmi_table[] = {
19262+ {
19263+ .callback = fix_broken_hp_bios_irq9,
19264+ .ident = "HP Pavilion N5400 Series Laptop",
19265+ .matches = {
19266+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
19267+ DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
19268+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"),
19269+ DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
19270+ },
19271+ },
19272+ {
19273+ .callback = fix_acer_tm360_irqrouting,
19274+ .ident = "Acer TravelMate 36x Laptop",
19275+ .matches = {
19276+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
19277+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
19278+ },
19279+ },
19280+ { }
19281+};
19282+
19283+static int __init pcibios_irq_init(void)
19284+{
19285+ DBG(KERN_DEBUG "PCI: IRQ init\n");
19286+
19287+ if (pcibios_enable_irq || raw_pci_ops == NULL)
19288+ return 0;
19289+
19290+ dmi_check_system(pciirq_dmi_table);
19291+
19292+ pirq_table = pirq_find_routing_table();
19293+
19294+#ifdef CONFIG_PCI_BIOS
19295+ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
19296+ pirq_table = pcibios_get_irq_routing_table();
19297+#endif
19298+ if (pirq_table) {
19299+ pirq_peer_trick();
19300+ pirq_find_router(&pirq_router);
19301+ if (pirq_table->exclusive_irqs) {
19302+ int i;
19303+ for (i=0; i<16; i++)
19304+ if (!(pirq_table->exclusive_irqs & (1 << i)))
19305+ pirq_penalty[i] += 100;
19306+ }
19307+ /* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
19308+ if (io_apic_assign_pci_irqs)
19309+ pirq_table = NULL;
19310+ }
19311+
19312+ pcibios_enable_irq = pirq_enable_irq;
19313+
19314+ pcibios_fixup_irqs();
19315+ return 0;
19316+}
19317+
19318+subsys_initcall(pcibios_irq_init);
19319+
19320+
19321+static void pirq_penalize_isa_irq(int irq, int active)
19322+{
19323+ /*
19324+ * If any ISAPnP device reports an IRQ in its list of possible
19325+ * IRQ's, we try to avoid assigning it to PCI devices.
19326+ */
19327+ if (irq < 16) {
19328+ if (active)
19329+ pirq_penalty[irq] += 1000;
19330+ else
19331+ pirq_penalty[irq] += 100;
19332+ }
19333+}
19334+
19335+void pcibios_penalize_isa_irq(int irq, int active)
19336+{
19337+#ifdef CONFIG_ACPI
19338+ if (!acpi_noirq)
19339+ acpi_penalize_isa_irq(irq, active);
19340+ else
19341+#endif
19342+ pirq_penalize_isa_irq(irq, active);
19343+}
19344+
19345+static int pirq_enable_irq(struct pci_dev *dev)
19346+{
19347+ u8 pin;
19348+ struct pci_dev *temp_dev;
19349+
19350+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
19351+ if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
19352+ char *msg = "";
19353+
19354+ pin--; /* interrupt pins are numbered starting from 1 */
19355+
19356+ if (io_apic_assign_pci_irqs) {
19357+ int irq;
19358+
19359+ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
19360+ /*
19361+ * Busses behind bridges are typically not listed in the MP-table.
19362+ * In this case we have to look up the IRQ based on the parent bus,
19363+ * parent slot, and pin number. The SMP code detects such bridged
19364+ * busses itself so we should get into this branch reliably.
19365+ */
19366+ temp_dev = dev;
19367+ while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
19368+ struct pci_dev * bridge = dev->bus->self;
19369+
19370+ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
19371+ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
19372+ PCI_SLOT(bridge->devfn), pin);
19373+ if (irq >= 0)
19374+ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
19375+ pci_name(bridge), 'A' + pin, irq);
19376+ dev = bridge;
19377+ }
19378+ dev = temp_dev;
19379+ if (irq >= 0) {
19380+#ifdef CONFIG_PCI_MSI
19381+ if (!platform_legacy_irq(irq))
19382+ irq = IO_APIC_VECTOR(irq);
19383+#endif
19384+ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
19385+ pci_name(dev), 'A' + pin, irq);
19386+ dev->irq = irq;
19387+ return 0;
19388+ } else
19389+ msg = " Probably buggy MP table.";
19390+ } else if (pci_probe & PCI_BIOS_IRQ_SCAN)
19391+ msg = "";
19392+ else
19393+ msg = " Please try using pci=biosirq.";
19394+
19395+ /* With IDE legacy devices the IRQ lookup failure is not a problem.. */
19396+ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5))
19397+ return 0;
19398+
19399+ printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
19400+ 'A' + pin, pci_name(dev), msg);
19401+ }
19402+ return 0;
19403+}
19404+
19405+int pci_vector_resources(int last, int nr_released)
19406+{
19407+ int count = nr_released;
19408+
19409+ int next = last;
19410+ int offset = (last % 8);
19411+
19412+ while (next < FIRST_SYSTEM_VECTOR) {
19413+ next += 8;
19414+#ifdef CONFIG_X86_64
19415+ if (next == IA32_SYSCALL_VECTOR)
19416+ continue;
19417+#else
19418+ if (next == SYSCALL_VECTOR)
19419+ continue;
19420+#endif
19421+ count++;
19422+ if (next >= FIRST_SYSTEM_VECTOR) {
19423+ if (offset%8) {
19424+ next = FIRST_DEVICE_VECTOR + offset;
19425+ offset++;
19426+ continue;
19427+ }
19428+ count--;
19429+ }
19430+ }
19431+
19432+ return count;
19433+}
19434Index: head-2008-11-25/arch/x86/pci/pcifront.c
19435===================================================================
19436--- /dev/null 1970-01-01 00:00:00.000000000 +0000
19437+++ head-2008-11-25/arch/x86/pci/pcifront.c 2007-06-12 13:12:49.000000000 +0200
19438@@ -0,0 +1,55 @@
19439+/*
19440+ * PCI Frontend Stub - puts some "dummy" functions in to the Linux x86 PCI core
19441+ * to support the Xen PCI Frontend's operation
19442+ *
19443+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
19444+ */
19445+#include <linux/module.h>
19446+#include <linux/init.h>
19447+#include <linux/pci.h>
19448+#include <asm/acpi.h>
19449+#include "pci.h"
19450+
19451+static int pcifront_enable_irq(struct pci_dev *dev)
19452+{
19453+ u8 irq;
19454+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
19455+ dev->irq = irq;
19456+
19457+ return 0;
19458+}
19459+
19460+extern u8 pci_cache_line_size;
19461+
19462+static int __init pcifront_x86_stub_init(void)
19463+{
19464+ struct cpuinfo_x86 *c = &boot_cpu_data;
19465+
19466+ /* Only install our method if we haven't found real hardware already */
19467+ if (raw_pci_ops)
19468+ return 0;
19469+
19470+ printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
19471+
19472+ /* Copied from arch/i386/pci/common.c */
19473+ pci_cache_line_size = 32 >> 2;
19474+ if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
19475+ pci_cache_line_size = 64 >> 2; /* K7 & K8 */
19476+ else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
19477+ pci_cache_line_size = 128 >> 2; /* P4 */
19478+
19479+ /* On x86, we need to disable the normal IRQ routing table and
19480+ * just ask the backend
19481+ */
19482+ pcibios_enable_irq = pcifront_enable_irq;
19483+ pcibios_disable_irq = NULL;
19484+
19485+#ifdef CONFIG_ACPI
19486+ /* Keep ACPI out of the picture */
19487+ acpi_noirq = 1;
19488+#endif
19489+
19490+ return 0;
19491+}
19492+
19493+arch_initcall(pcifront_x86_stub_init);
19494Index: head-2008-11-25/arch/x86/ia32/ia32entry-xen.S
19495===================================================================
19496--- /dev/null 1970-01-01 00:00:00.000000000 +0000
19497+++ head-2008-11-25/arch/x86/ia32/ia32entry-xen.S 2008-04-02 12:34:02.000000000 +0200
19498@@ -0,0 +1,666 @@
19499+/*
19500+ * Compatibility mode system call entry point for x86-64.
19501+ *
19502+ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
19503+ */
19504+
19505+#include <asm/dwarf2.h>
19506+#include <asm/calling.h>
19507+#include <asm/asm-offsets.h>
19508+#include <asm/current.h>
19509+#include <asm/errno.h>
19510+#include <asm/ia32_unistd.h>
19511+#include <asm/thread_info.h>
19512+#include <asm/segment.h>
19513+#include <asm/vsyscall32.h>
19514+#include <asm/irqflags.h>
19515+#include <linux/linkage.h>
19516+
19517+#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
19518+
19519+ .macro IA32_ARG_FIXUP noebp=0
19520+ movl %edi,%r8d
19521+ .if \noebp
19522+ .else
19523+ movl %ebp,%r9d
19524+ .endif
19525+ xchg %ecx,%esi
19526+ movl %ebx,%edi
19527+ movl %edx,%edx /* zero extension */
19528+ .endm
19529+
19530+ /* clobbers %eax */
19531+ .macro CLEAR_RREGS
19532+ xorl %eax,%eax
19533+ movq %rax,R11(%rsp)
19534+ movq %rax,R10(%rsp)
19535+ movq %rax,R9(%rsp)
19536+ movq %rax,R8(%rsp)
19537+ .endm
19538+
19539+ .macro LOAD_ARGS32 offset
19540+ movl \offset(%rsp),%r11d
19541+ movl \offset+8(%rsp),%r10d
19542+ movl \offset+16(%rsp),%r9d
19543+ movl \offset+24(%rsp),%r8d
19544+ movl \offset+40(%rsp),%ecx
19545+ movl \offset+48(%rsp),%edx
19546+ movl \offset+56(%rsp),%esi
19547+ movl \offset+64(%rsp),%edi
19548+ movl \offset+72(%rsp),%eax
19549+ .endm
19550+
19551+ .macro CFI_STARTPROC32 simple
19552+ CFI_STARTPROC \simple
19553+ CFI_UNDEFINED r8
19554+ CFI_UNDEFINED r9
19555+ CFI_UNDEFINED r10
19556+ CFI_UNDEFINED r11
19557+ CFI_UNDEFINED r12
19558+ CFI_UNDEFINED r13
19559+ CFI_UNDEFINED r14
19560+ CFI_UNDEFINED r15
19561+ .endm
19562+
19563+/*
19564+ * 32bit SYSENTER instruction entry.
19565+ *
19566+ * Arguments:
19567+ * %eax System call number.
19568+ * %ebx Arg1
19569+ * %ecx Arg2
19570+ * %edx Arg3
19571+ * %esi Arg4
19572+ * %edi Arg5
19573+ * %ebp user stack
19574+ * 0(%ebp) Arg6
19575+ *
19576+ * Interrupts on.
19577+ *
19578+ * This is purely a fast path. For anything complicated we use the int 0x80
19579+ * path below. Set up a complete hardware stack frame to share code
19580+ * with the int 0x80 path.
19581+ */
19582+ENTRY(ia32_sysenter_target)
19583+ CFI_STARTPROC32 simple
19584+ CFI_DEF_CFA rsp,SS+8-RIP+16
19585+ /*CFI_REL_OFFSET ss,SS-RIP+16*/
19586+ CFI_REL_OFFSET rsp,RSP-RIP+16
19587+ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
19588+ /*CFI_REL_OFFSET cs,CS-RIP+16*/
19589+ CFI_REL_OFFSET rip,RIP-RIP+16
19590+ CFI_REL_OFFSET r11,8
19591+ CFI_REL_OFFSET rcx,0
19592+ movq 8(%rsp),%r11
19593+ CFI_RESTORE r11
19594+ popq %rcx
19595+ CFI_ADJUST_CFA_OFFSET -8
19596+ CFI_RESTORE rcx
19597+ movl %ebp,%ebp /* zero extension */
19598+ movl %eax,%eax
19599+ movl $__USER32_DS,40(%rsp)
19600+ movq %rbp,32(%rsp)
19601+ movl $__USER32_CS,16(%rsp)
19602+ movl $VSYSCALL32_SYSEXIT,8(%rsp)
19603+ movq %rax,(%rsp)
19604+ cld
19605+ SAVE_ARGS 0,0,0
19606+ /* no need to do an access_ok check here because rbp has been
19607+ 32bit zero extended */
19608+1: movl (%rbp),%r9d
19609+ .section __ex_table,"a"
19610+ .quad 1b,ia32_badarg
19611+ .previous
19612+ GET_THREAD_INFO(%r10)
19613+ orl $TS_COMPAT,threadinfo_status(%r10)
19614+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
19615+ jnz sysenter_tracesys
19616+sysenter_do_call:
19617+ cmpl $(IA32_NR_syscalls-1),%eax
19618+ ja ia32_badsys
19619+ IA32_ARG_FIXUP 1
19620+ call *ia32_sys_call_table(,%rax,8)
19621+ movq %rax,RAX-ARGOFFSET(%rsp)
19622+ jmp int_ret_from_sys_call
19623+
19624+sysenter_tracesys:
19625+ SAVE_REST
19626+ CLEAR_RREGS
19627+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
19628+ movq %rsp,%rdi /* &pt_regs -> arg1 */
19629+ call syscall_trace_enter
19630+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
19631+ RESTORE_REST
19632+ movl %ebp, %ebp
19633+ /* no need to do an access_ok check here because rbp has been
19634+ 32bit zero extended */
19635+1: movl (%rbp),%r9d
19636+ .section __ex_table,"a"
19637+ .quad 1b,ia32_badarg
19638+ .previous
19639+ jmp sysenter_do_call
19640+ CFI_ENDPROC
19641+ENDPROC(ia32_sysenter_target)
19642+
19643+/*
19644+ * 32bit SYSCALL instruction entry.
19645+ *
19646+ * Arguments:
19647+ * %eax System call number.
19648+ * %ebx Arg1
19649+ * %ecx return EIP
19650+ * %edx Arg3
19651+ * %esi Arg4
19652+ * %edi Arg5
19653+ * %ebp Arg2 [note: not saved in the stack frame, should not be touched]
19654+ * %esp user stack
19655+ * 0(%esp) Arg6
19656+ *
19657+ * Interrupts on.
19658+ *
19659+ * This is purely a fast path. For anything complicated we use the int 0x80
19660+ * path below. Set up a complete hardware stack frame to share code
19661+ * with the int 0x80 path.
19662+ */
19663+ENTRY(ia32_cstar_target)
19664+ CFI_STARTPROC32 simple
19665+ CFI_DEF_CFA rsp,SS+8-RIP+16
19666+ /*CFI_REL_OFFSET ss,SS-RIP+16*/
19667+ CFI_REL_OFFSET rsp,RSP-RIP+16
19668+ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
19669+ /*CFI_REL_OFFSET cs,CS-RIP+16*/
19670+ CFI_REL_OFFSET rip,RIP-RIP+16
19671+ movl %eax,%eax /* zero extension */
19672+ movl RSP-RIP+16(%rsp),%r8d
19673+ SAVE_ARGS -8,1,1
19674+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
19675+ movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
19676+ movl %ebp,%ecx
19677+ movl $__USER32_CS,CS-ARGOFFSET(%rsp)
19678+ movl $__USER32_DS,SS-ARGOFFSET(%rsp)
19679+ /* no need to do an access_ok check here because r8 has been
19680+ 32bit zero extended */
19681+ /* hardware stack frame is complete now */
19682+1: movl (%r8),%r9d
19683+ .section __ex_table,"a"
19684+ .quad 1b,ia32_badarg
19685+ .previous
19686+ GET_THREAD_INFO(%r10)
19687+ orl $TS_COMPAT,threadinfo_status(%r10)
19688+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
19689+ jnz cstar_tracesys
19690+cstar_do_call:
19691+ cmpl $IA32_NR_syscalls-1,%eax
19692+ ja ia32_badsys
19693+ IA32_ARG_FIXUP 1
19694+ call *ia32_sys_call_table(,%rax,8)
19695+ movq %rax,RAX-ARGOFFSET(%rsp)
19696+ jmp int_ret_from_sys_call
19697+
19698+cstar_tracesys:
19699+ SAVE_REST
19700+ CLEAR_RREGS
19701+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
19702+ movq %rsp,%rdi /* &pt_regs -> arg1 */
19703+ call syscall_trace_enter
19704+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
19705+ RESTORE_REST
19706+ movl RSP-ARGOFFSET(%rsp), %r8d
19707+ /* no need to do an access_ok check here because r8 has been
19708+ 32bit zero extended */
19709+1: movl (%r8),%r9d
19710+ .section __ex_table,"a"
19711+ .quad 1b,ia32_badarg
19712+ .previous
19713+ jmp cstar_do_call
19714+END(ia32_cstar_target)
19715+
19716+ia32_badarg:
19717+ movq $-EFAULT,%rax
19718+ jmp ia32_sysret
19719+ CFI_ENDPROC
19720+
19721+/*
19722+ * Emulated IA32 system calls via int 0x80.
19723+ *
19724+ * Arguments:
19725+ * %eax System call number.
19726+ * %ebx Arg1
19727+ * %ecx Arg2
19728+ * %edx Arg3
19729+ * %esi Arg4
19730+ * %edi Arg5
19731+ * %ebp Arg6 [note: not saved in the stack frame, should not be touched]
19732+ *
19733+ * Notes:
19734+ * Uses the same stack frame as the x86-64 version.
19735+ * All registers except %eax must be saved (but ptrace may violate that)
19736+ * Arguments are zero extended. For system calls that want sign extension and
19737+ * take long arguments a wrapper is needed. Most calls can just be called
19738+ * directly.
19739+ * Assumes it is only called from user space and entered with interrupts on.
19740+ */
19741+
19742+ENTRY(ia32_syscall)
19743+ CFI_STARTPROC simple
19744+ CFI_DEF_CFA rsp,SS+8-RIP+16
19745+ /*CFI_REL_OFFSET ss,SS-RIP+16*/
19746+ CFI_REL_OFFSET rsp,RSP-RIP+16
19747+ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
19748+ /*CFI_REL_OFFSET cs,CS-RIP+16*/
19749+ CFI_REL_OFFSET rip,RIP-RIP+16
19750+ CFI_REL_OFFSET r11,8
19751+ CFI_REL_OFFSET rcx,0
19752+ movq 8(%rsp),%r11
19753+ CFI_RESTORE r11
19754+ popq %rcx
19755+ CFI_ADJUST_CFA_OFFSET -8
19756+ CFI_RESTORE rcx
19757+ movl %eax,%eax
19758+ movq %rax,(%rsp)
19759+ cld
19760+ /* note the registers are not zero extended to the sf.
19761+ this could be a problem. */
19762+ SAVE_ARGS 0,0,1
19763+ GET_THREAD_INFO(%r10)
19764+ orl $TS_COMPAT,threadinfo_status(%r10)
19765+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
19766+ jnz ia32_tracesys
19767+ia32_do_syscall:
19768+ cmpl $(IA32_NR_syscalls-1),%eax
19769+ ja ia32_badsys
19770+ IA32_ARG_FIXUP
19771+ call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
19772+ia32_sysret:
19773+ movq %rax,RAX-ARGOFFSET(%rsp)
19774+ jmp int_ret_from_sys_call
19775+
19776+ia32_tracesys:
19777+ SAVE_REST
19778+ movq $-ENOSYS,RAX(%rsp) /* really needed? */
19779+ movq %rsp,%rdi /* &pt_regs -> arg1 */
19780+ call syscall_trace_enter
19781+ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
19782+ RESTORE_REST
19783+ jmp ia32_do_syscall
19784+END(ia32_syscall)
19785+
19786+ia32_badsys:
19787+ movq $0,ORIG_RAX-ARGOFFSET(%rsp)
19788+ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
19789+ jmp int_ret_from_sys_call
19790+
19791+quiet_ni_syscall:
19792+ movq $-ENOSYS,%rax
19793+ ret
19794+ CFI_ENDPROC
19795+
19796+ .macro PTREGSCALL label, func, arg
19797+ .globl \label
19798+\label:
19799+ leaq \func(%rip),%rax
19800+ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
19801+ jmp ia32_ptregs_common
19802+ .endm
19803+
19804+ CFI_STARTPROC32
19805+
19806+ PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
19807+ PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
19808+ PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
19809+ PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
19810+ PTREGSCALL stub32_execve, sys32_execve, %rcx
19811+ PTREGSCALL stub32_fork, sys_fork, %rdi
19812+ PTREGSCALL stub32_clone, sys32_clone, %rdx
19813+ PTREGSCALL stub32_vfork, sys_vfork, %rdi
19814+ PTREGSCALL stub32_iopl, sys_iopl, %rsi
19815+ PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
19816+
19817+ENTRY(ia32_ptregs_common)
19818+ popq %r11
19819+ CFI_ENDPROC
19820+ CFI_STARTPROC32 simple
19821+ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
19822+ CFI_REL_OFFSET rax,RAX-ARGOFFSET
19823+ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
19824+ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
19825+ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
19826+ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
19827+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
19828+/* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
19829+/* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
19830+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
19831+/* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
19832+ SAVE_REST
19833+ call *%rax
19834+ RESTORE_REST
19835+ jmp ia32_sysret /* misbalances the return cache */
19836+ CFI_ENDPROC
19837+END(ia32_ptregs_common)
19838+
19839+ .section .rodata,"a"
19840+ .align 8
19841+ia32_sys_call_table:
19842+ .quad sys_restart_syscall
19843+ .quad sys_exit
19844+ .quad stub32_fork
19845+ .quad sys_read
19846+ .quad sys_write
19847+ .quad compat_sys_open /* 5 */
19848+ .quad sys_close
19849+ .quad sys32_waitpid
19850+ .quad sys_creat
19851+ .quad sys_link
19852+ .quad sys_unlink /* 10 */
19853+ .quad stub32_execve
19854+ .quad sys_chdir
19855+ .quad compat_sys_time
19856+ .quad sys_mknod
19857+ .quad sys_chmod /* 15 */
19858+ .quad sys_lchown16
19859+ .quad quiet_ni_syscall /* old break syscall holder */
19860+ .quad sys_stat
19861+ .quad sys32_lseek
19862+ .quad sys_getpid /* 20 */
19863+ .quad compat_sys_mount /* mount */
19864+ .quad sys_oldumount /* old_umount */
19865+ .quad sys_setuid16
19866+ .quad sys_getuid16
19867+ .quad compat_sys_stime /* stime */ /* 25 */
19868+ .quad sys32_ptrace /* ptrace */
19869+ .quad sys_alarm
19870+ .quad sys_fstat /* (old)fstat */
19871+ .quad sys_pause
19872+ .quad compat_sys_utime /* 30 */
19873+ .quad quiet_ni_syscall /* old stty syscall holder */
19874+ .quad quiet_ni_syscall /* old gtty syscall holder */
19875+ .quad sys_access
19876+ .quad sys_nice
19877+ .quad quiet_ni_syscall /* 35 */ /* old ftime syscall holder */
19878+ .quad sys_sync
19879+ .quad sys32_kill
19880+ .quad sys_rename
19881+ .quad sys_mkdir
19882+ .quad sys_rmdir /* 40 */
19883+ .quad sys_dup
19884+ .quad sys32_pipe
19885+ .quad compat_sys_times
19886+ .quad quiet_ni_syscall /* old prof syscall holder */
19887+ .quad sys_brk /* 45 */
19888+ .quad sys_setgid16
19889+ .quad sys_getgid16
19890+ .quad sys_signal
19891+ .quad sys_geteuid16
19892+ .quad sys_getegid16 /* 50 */
19893+ .quad sys_acct
19894+ .quad sys_umount /* new_umount */
19895+ .quad quiet_ni_syscall /* old lock syscall holder */
19896+ .quad compat_sys_ioctl
19897+ .quad compat_sys_fcntl64 /* 55 */
19898+ .quad quiet_ni_syscall /* old mpx syscall holder */
19899+ .quad sys_setpgid
19900+ .quad quiet_ni_syscall /* old ulimit syscall holder */
19901+ .quad sys32_olduname
19902+ .quad sys_umask /* 60 */
19903+ .quad sys_chroot
19904+ .quad sys32_ustat
19905+ .quad sys_dup2
19906+ .quad sys_getppid
19907+ .quad sys_getpgrp /* 65 */
19908+ .quad sys_setsid
19909+ .quad sys32_sigaction
19910+ .quad sys_sgetmask
19911+ .quad sys_ssetmask
19912+ .quad sys_setreuid16 /* 70 */
19913+ .quad sys_setregid16
19914+ .quad stub32_sigsuspend
19915+ .quad compat_sys_sigpending
19916+ .quad sys_sethostname
19917+ .quad compat_sys_setrlimit /* 75 */
19918+ .quad compat_sys_old_getrlimit /* old_getrlimit */
19919+ .quad compat_sys_getrusage
19920+ .quad sys32_gettimeofday
19921+ .quad sys32_settimeofday
19922+ .quad sys_getgroups16 /* 80 */
19923+ .quad sys_setgroups16
19924+ .quad sys32_old_select
19925+ .quad sys_symlink
19926+ .quad sys_lstat
19927+ .quad sys_readlink /* 85 */
19928+#ifdef CONFIG_IA32_AOUT
19929+ .quad sys_uselib
19930+#else
19931+ .quad quiet_ni_syscall
19932+#endif
19933+ .quad sys_swapon
19934+ .quad sys_reboot
19935+ .quad compat_sys_old_readdir
19936+ .quad sys32_mmap /* 90 */
19937+ .quad sys_munmap
19938+ .quad sys_truncate
19939+ .quad sys_ftruncate
19940+ .quad sys_fchmod
19941+ .quad sys_fchown16 /* 95 */
19942+ .quad sys_getpriority
19943+ .quad sys_setpriority
19944+ .quad quiet_ni_syscall /* old profil syscall holder */
19945+ .quad compat_sys_statfs
19946+ .quad compat_sys_fstatfs /* 100 */
19947+ .quad sys_ioperm
19948+ .quad compat_sys_socketcall
19949+ .quad sys_syslog
19950+ .quad compat_sys_setitimer
19951+ .quad compat_sys_getitimer /* 105 */
19952+ .quad compat_sys_newstat
19953+ .quad compat_sys_newlstat
19954+ .quad compat_sys_newfstat
19955+ .quad sys32_uname
19956+ .quad stub32_iopl /* 110 */
19957+ .quad sys_vhangup
19958+ .quad quiet_ni_syscall /* old "idle" system call */
19959+ .quad sys32_vm86_warning /* vm86old */
19960+ .quad compat_sys_wait4
19961+ .quad sys_swapoff /* 115 */
19962+ .quad sys32_sysinfo
19963+ .quad sys32_ipc
19964+ .quad sys_fsync
19965+ .quad stub32_sigreturn
19966+ .quad stub32_clone /* 120 */
19967+ .quad sys_setdomainname
19968+ .quad sys_uname
19969+ .quad sys_modify_ldt
19970+ .quad compat_sys_adjtimex
19971+ .quad sys32_mprotect /* 125 */
19972+ .quad compat_sys_sigprocmask
19973+ .quad quiet_ni_syscall /* create_module */
19974+ .quad sys_init_module
19975+ .quad sys_delete_module
19976+ .quad quiet_ni_syscall /* 130 get_kernel_syms */
19977+ .quad sys_quotactl
19978+ .quad sys_getpgid
19979+ .quad sys_fchdir
19980+ .quad quiet_ni_syscall /* bdflush */
19981+ .quad sys_sysfs /* 135 */
19982+ .quad sys_personality
19983+ .quad quiet_ni_syscall /* for afs_syscall */
19984+ .quad sys_setfsuid16
19985+ .quad sys_setfsgid16
19986+ .quad sys_llseek /* 140 */
19987+ .quad compat_sys_getdents
19988+ .quad compat_sys_select
19989+ .quad sys_flock
19990+ .quad sys_msync
19991+ .quad compat_sys_readv /* 145 */
19992+ .quad compat_sys_writev
19993+ .quad sys_getsid
19994+ .quad sys_fdatasync
19995+ .quad sys32_sysctl /* sysctl */
19996+ .quad sys_mlock /* 150 */
19997+ .quad sys_munlock
19998+ .quad sys_mlockall
19999+ .quad sys_munlockall
20000+ .quad sys_sched_setparam
20001+ .quad sys_sched_getparam /* 155 */
20002+ .quad sys_sched_setscheduler
20003+ .quad sys_sched_getscheduler
20004+ .quad sys_sched_yield
20005+ .quad sys_sched_get_priority_max
20006+ .quad sys_sched_get_priority_min /* 160 */
20007+ .quad sys_sched_rr_get_interval
20008+ .quad compat_sys_nanosleep
20009+ .quad sys_mremap
20010+ .quad sys_setresuid16
20011+ .quad sys_getresuid16 /* 165 */
20012+ .quad sys32_vm86_warning /* vm86 */
20013+ .quad quiet_ni_syscall /* query_module */
20014+ .quad sys_poll
20015+ .quad compat_sys_nfsservctl
20016+ .quad sys_setresgid16 /* 170 */
20017+ .quad sys_getresgid16
20018+ .quad sys_prctl
20019+ .quad stub32_rt_sigreturn
20020+ .quad sys32_rt_sigaction
20021+ .quad sys32_rt_sigprocmask /* 175 */
20022+ .quad sys32_rt_sigpending
20023+ .quad compat_sys_rt_sigtimedwait
20024+ .quad sys32_rt_sigqueueinfo
20025+ .quad stub32_rt_sigsuspend
20026+ .quad sys32_pread /* 180 */
20027+ .quad sys32_pwrite
20028+ .quad sys_chown16
20029+ .quad sys_getcwd
20030+ .quad sys_capget
20031+ .quad sys_capset
20032+ .quad stub32_sigaltstack
20033+ .quad sys32_sendfile
20034+ .quad quiet_ni_syscall /* streams1 */
20035+ .quad quiet_ni_syscall /* streams2 */
20036+ .quad stub32_vfork /* 190 */
20037+ .quad compat_sys_getrlimit
20038+ .quad sys32_mmap2
20039+ .quad sys32_truncate64
20040+ .quad sys32_ftruncate64
20041+ .quad sys32_stat64 /* 195 */
20042+ .quad sys32_lstat64
20043+ .quad sys32_fstat64
20044+ .quad sys_lchown
20045+ .quad sys_getuid
20046+ .quad sys_getgid /* 200 */
20047+ .quad sys_geteuid
20048+ .quad sys_getegid
20049+ .quad sys_setreuid
20050+ .quad sys_setregid
20051+ .quad sys_getgroups /* 205 */
20052+ .quad sys_setgroups
20053+ .quad sys_fchown
20054+ .quad sys_setresuid
20055+ .quad sys_getresuid
20056+ .quad sys_setresgid /* 210 */
20057+ .quad sys_getresgid
20058+ .quad sys_chown
20059+ .quad sys_setuid
20060+ .quad sys_setgid
20061+ .quad sys_setfsuid /* 215 */
20062+ .quad sys_setfsgid
20063+ .quad sys_pivot_root
20064+ .quad sys_mincore
20065+ .quad sys_madvise
20066+ .quad compat_sys_getdents64 /* 220 getdents64 */
20067+ .quad compat_sys_fcntl64
20068+ .quad quiet_ni_syscall /* tux */
20069+ .quad quiet_ni_syscall /* security */
20070+ .quad sys_gettid
20071+ .quad sys_readahead /* 225 */
20072+ .quad sys_setxattr
20073+ .quad sys_lsetxattr
20074+ .quad sys_fsetxattr
20075+ .quad sys_getxattr
20076+ .quad sys_lgetxattr /* 230 */
20077+ .quad sys_fgetxattr
20078+ .quad sys_listxattr
20079+ .quad sys_llistxattr
20080+ .quad sys_flistxattr
20081+ .quad sys_removexattr /* 235 */
20082+ .quad sys_lremovexattr
20083+ .quad sys_fremovexattr
20084+ .quad sys_tkill
20085+ .quad sys_sendfile64
20086+ .quad compat_sys_futex /* 240 */
20087+ .quad compat_sys_sched_setaffinity
20088+ .quad compat_sys_sched_getaffinity
20089+ .quad sys32_set_thread_area
20090+ .quad sys32_get_thread_area
20091+ .quad compat_sys_io_setup /* 245 */
20092+ .quad sys_io_destroy
20093+ .quad compat_sys_io_getevents
20094+ .quad compat_sys_io_submit
20095+ .quad sys_io_cancel
20096+ .quad sys_fadvise64 /* 250 */
20097+ .quad quiet_ni_syscall /* free_huge_pages */
20098+ .quad sys_exit_group
20099+ .quad sys32_lookup_dcookie
20100+ .quad sys_epoll_create
20101+ .quad sys_epoll_ctl /* 255 */
20102+ .quad sys_epoll_wait
20103+ .quad sys_remap_file_pages
20104+ .quad sys_set_tid_address
20105+ .quad compat_sys_timer_create
20106+ .quad compat_sys_timer_settime /* 260 */
20107+ .quad compat_sys_timer_gettime
20108+ .quad sys_timer_getoverrun
20109+ .quad sys_timer_delete
20110+ .quad compat_sys_clock_settime
20111+ .quad compat_sys_clock_gettime /* 265 */
20112+ .quad compat_sys_clock_getres
20113+ .quad compat_sys_clock_nanosleep
20114+ .quad compat_sys_statfs64
20115+ .quad compat_sys_fstatfs64
20116+ .quad sys_tgkill /* 270 */
20117+ .quad compat_sys_utimes
20118+ .quad sys32_fadvise64_64
20119+ .quad quiet_ni_syscall /* sys_vserver */
20120+ .quad sys_mbind
20121+ .quad compat_sys_get_mempolicy /* 275 */
20122+ .quad sys_set_mempolicy
20123+ .quad compat_sys_mq_open
20124+ .quad sys_mq_unlink
20125+ .quad compat_sys_mq_timedsend
20126+ .quad compat_sys_mq_timedreceive /* 280 */
20127+ .quad compat_sys_mq_notify
20128+ .quad compat_sys_mq_getsetattr
20129+ .quad compat_sys_kexec_load /* reserved for kexec */
20130+ .quad compat_sys_waitid
20131+ .quad quiet_ni_syscall /* 285: sys_altroot */
20132+ .quad sys_add_key
20133+ .quad sys_request_key
20134+ .quad sys_keyctl
20135+ .quad sys_ioprio_set
20136+ .quad sys_ioprio_get /* 290 */
20137+ .quad sys_inotify_init
20138+ .quad sys_inotify_add_watch
20139+ .quad sys_inotify_rm_watch
20140+ .quad sys_migrate_pages
20141+ .quad compat_sys_openat /* 295 */
20142+ .quad sys_mkdirat
20143+ .quad sys_mknodat
20144+ .quad sys_fchownat
20145+ .quad compat_sys_futimesat
20146+ .quad sys32_fstatat /* 300 */
20147+ .quad sys_unlinkat
20148+ .quad sys_renameat
20149+ .quad sys_linkat
20150+ .quad sys_symlinkat
20151+ .quad sys_readlinkat /* 305 */
20152+ .quad sys_fchmodat
20153+ .quad sys_faccessat
20154+ .quad quiet_ni_syscall /* pselect6 for now */
20155+ .quad quiet_ni_syscall /* ppoll for now */
20156+ .quad sys_unshare /* 310 */
20157+ .quad compat_sys_set_robust_list
20158+ .quad compat_sys_get_robust_list
20159+ .quad sys_splice
20160+ .quad sys_sync_file_range
20161+ .quad sys_tee
20162+ .quad compat_sys_vmsplice
20163+ .quad compat_sys_move_pages
20164+ia32_syscall_end:
20165Index: head-2008-11-25/arch/x86/kernel/acpi/sleep_64-xen.c
20166===================================================================
20167--- /dev/null 1970-01-01 00:00:00.000000000 +0000
20168+++ head-2008-11-25/arch/x86/kernel/acpi/sleep_64-xen.c 2008-04-15 09:29:41.000000000 +0200
20169@@ -0,0 +1,146 @@
20170+/*
20171+ * acpi.c - Architecture-Specific Low-Level ACPI Support
20172+ *
20173+ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
20174+ * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
20175+ * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
20176+ * Copyright (C) 2002 Andi Kleen, SuSE Labs (x86-64 port)
20177+ * Copyright (C) 2003 Pavel Machek, SuSE Labs
20178+ *
20179+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20180+ *
20181+ * This program is free software; you can redistribute it and/or modify
20182+ * it under the terms of the GNU General Public License as published by
20183+ * the Free Software Foundation; either version 2 of the License, or
20184+ * (at your option) any later version.
20185+ *
20186+ * This program is distributed in the hope that it will be useful,
20187+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
20188+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20189+ * GNU General Public License for more details.
20190+ *
20191+ * You should have received a copy of the GNU General Public License
20192+ * along with this program; if not, write to the Free Software
20193+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20194+ *
20195+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
20196+ */
20197+
20198+#include <linux/kernel.h>
20199+#include <linux/init.h>
20200+#include <linux/types.h>
20201+#include <linux/stddef.h>
20202+#include <linux/slab.h>
20203+#include <linux/pci.h>
20204+#include <linux/bootmem.h>
20205+#include <linux/acpi.h>
20206+#include <linux/cpumask.h>
20207+
20208+#include <asm/mpspec.h>
20209+#include <asm/io.h>
20210+#include <asm/apic.h>
20211+#include <asm/apicdef.h>
20212+#include <asm/page.h>
20213+#include <asm/pgtable.h>
20214+#include <asm/pgalloc.h>
20215+#include <asm/io_apic.h>
20216+#include <asm/proto.h>
20217+#include <asm/tlbflush.h>
20218+
20219+/* --------------------------------------------------------------------------
20220+ Low-Level Sleep Support
20221+ -------------------------------------------------------------------------- */
20222+
20223+#ifdef CONFIG_ACPI_SLEEP
20224+
20225+#ifndef CONFIG_ACPI_PV_SLEEP
20226+/* address in low memory of the wakeup routine. */
20227+unsigned long acpi_wakeup_address = 0;
20228+unsigned long acpi_video_flags;
20229+extern char wakeup_start, wakeup_end;
20230+
20231+extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
20232+
20233+static pgd_t low_ptr;
20234+
20235+static void init_low_mapping(void)
20236+{
20237+ pgd_t *slot0 = pgd_offset(current->mm, 0UL);
20238+ low_ptr = *slot0;
20239+ set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
20240+ WARN_ON(num_online_cpus() != 1);
20241+ local_flush_tlb();
20242+}
20243+#endif
20244+
20245+/**
20246+ * acpi_save_state_mem - save kernel state
20247+ *
20248+ * Create an identity mapped page table and copy the wakeup routine to
20249+ * low memory.
20250+ */
20251+int acpi_save_state_mem(void)
20252+{
20253+#ifndef CONFIG_ACPI_PV_SLEEP
20254+ init_low_mapping();
20255+
20256+ memcpy((void *)acpi_wakeup_address, &wakeup_start,
20257+ &wakeup_end - &wakeup_start);
20258+ acpi_copy_wakeup_routine(acpi_wakeup_address);
20259+#endif
20260+ return 0;
20261+}
20262+
20263+/*
20264+ * acpi_restore_state
20265+ */
20266+void acpi_restore_state_mem(void)
20267+{
20268+#ifndef CONFIG_ACPI_PV_SLEEP
20269+ set_pgd(pgd_offset(current->mm, 0UL), low_ptr);
20270+ local_flush_tlb();
20271+#endif
20272+}
20273+
20274+/**
20275+ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
20276+ *
20277+ * We allocate a page in low memory for the wakeup
20278+ * routine for when we come back from a sleep state. The
20279+ * runtime allocator allows specification of <16M pages, but not
20280+ * <1M pages.
20281+ */
20282+void __init acpi_reserve_bootmem(void)
20283+{
20284+#ifndef CONFIG_ACPI_PV_SLEEP
20285+ acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
20286+ if ((&wakeup_end - &wakeup_start) > PAGE_SIZE)
20287+ printk(KERN_CRIT
20288+ "ACPI: Wakeup code way too big, will crash on attempt to suspend\n");
20289+#endif
20290+}
20291+
20292+#ifndef CONFIG_ACPI_PV_SLEEP
20293+static int __init acpi_sleep_setup(char *str)
20294+{
20295+ while ((str != NULL) && (*str != '\0')) {
20296+ if (strncmp(str, "s3_bios", 7) == 0)
20297+ acpi_video_flags = 1;
20298+ if (strncmp(str, "s3_mode", 7) == 0)
20299+ acpi_video_flags |= 2;
20300+ str = strchr(str, ',');
20301+ if (str != NULL)
20302+ str += strspn(str, ", \t");
20303+ }
20304+
20305+ return 1;
20306+}
20307+
20308+__setup("acpi_sleep=", acpi_sleep_setup);
20309+#endif /* CONFIG_ACPI_PV_SLEEP */
20310+
20311+#endif /*CONFIG_ACPI_SLEEP */
20312+
20313+void acpi_pci_link_exit(void)
20314+{
20315+}
20316Index: head-2008-11-25/arch/x86/kernel/apic_64-xen.c
20317===================================================================
20318--- /dev/null 1970-01-01 00:00:00.000000000 +0000
20319+++ head-2008-11-25/arch/x86/kernel/apic_64-xen.c 2007-06-12 13:13:01.000000000 +0200
20320@@ -0,0 +1,197 @@
20321+/*
20322+ * Local APIC handling, local APIC timers
20323+ *
20324+ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
20325+ *
20326+ * Fixes
20327+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
20328+ * thanks to Eric Gilmore
20329+ * and Rolf G. Tews
20330+ * for testing these extensively.
20331+ * Maciej W. Rozycki : Various updates and fixes.
20332+ * Mikael Pettersson : Power Management for UP-APIC.
20333+ * Pavel Machek and
20334+ * Mikael Pettersson : PM converted to driver model.
20335+ */
20336+
20337+#include <linux/init.h>
20338+
20339+#include <linux/mm.h>
20340+#include <linux/delay.h>
20341+#include <linux/bootmem.h>
20342+#include <linux/smp_lock.h>
20343+#include <linux/interrupt.h>
20344+#include <linux/mc146818rtc.h>
20345+#include <linux/kernel_stat.h>
20346+#include <linux/sysdev.h>
20347+#include <linux/module.h>
20348+
20349+#include <asm/atomic.h>
20350+#include <asm/smp.h>
20351+#include <asm/mtrr.h>
20352+#include <asm/mpspec.h>
20353+#include <asm/desc.h>
20354+#include <asm/arch_hooks.h>
20355+#include <asm/hpet.h>
20356+#include <asm/idle.h>
20357+
20358+int apic_verbosity;
20359+
20360+/*
20361+ * 'what should we do if we get a hw irq event on an illegal vector'.
20362+ * each architecture has to answer this themselves.
20363+ */
20364+void ack_bad_irq(unsigned int irq)
20365+{
20366+ printk("unexpected IRQ trap at vector %02x\n", irq);
20367+ /*
20368+ * Currently unexpected vectors happen only on SMP and APIC.
20369+ * We _must_ ack these because every local APIC has only N
20370+ * irq slots per priority level, and a 'hanging, unacked' IRQ
20371+ * holds up an irq slot - in excessive cases (when multiple
20372+ * unexpected vectors occur) that might lock up the APIC
20373+ * completely.
20374+ * But don't ack when the APIC is disabled. -AK
20375+ */
20376+ if (!disable_apic)
20377+ ack_APIC_irq();
20378+}
20379+
20380+int setup_profiling_timer(unsigned int multiplier)
20381+{
20382+ return -EINVAL;
20383+}
20384+
20385+void smp_local_timer_interrupt(struct pt_regs *regs)
20386+{
20387+ profile_tick(CPU_PROFILING, regs);
20388+#ifndef CONFIG_XEN
20389+#ifdef CONFIG_SMP
20390+ update_process_times(user_mode(regs));
20391+#endif
20392+#endif
20393+ /*
20394+ * We take the 'long' return path, and there every subsystem
20395+ * grabs the appropriate locks (kernel lock/ irq lock).
20396+ *
20397+ * we might want to decouple profiling from the 'long path',
20398+ * and do the profiling totally in assembly.
20399+ *
20400+ * Currently this isn't too much of an issue (performance wise),
20401+ * we can take more than 100K local irqs per second on a 100 MHz P5.
20402+ */
20403+}
20404+
20405+/*
20406+ * Local APIC timer interrupt. This is the most natural way for doing
20407+ * local interrupts, but local timer interrupts can be emulated by
20408+ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
20409+ *
20410+ * [ if a single-CPU system runs an SMP kernel then we call the local
20411+ * interrupt as well. Thus we cannot inline the local irq ... ]
20412+ */
20413+void smp_apic_timer_interrupt(struct pt_regs *regs)
20414+{
20415+ /*
20416+ * the NMI deadlock-detector uses this.
20417+ */
20418+ add_pda(apic_timer_irqs, 1);
20419+
20420+ /*
20421+ * NOTE! We'd better ACK the irq immediately,
20422+ * because timer handling can be slow.
20423+ */
20424+ ack_APIC_irq();
20425+ /*
20426+ * update_process_times() expects us to have done irq_enter().
20427+ * Besides, if we don't timer interrupts ignore the global
20428+ * interrupt lock, which is the WrongThing (tm) to do.
20429+ */
20430+ exit_idle();
20431+ irq_enter();
20432+ smp_local_timer_interrupt(regs);
20433+ irq_exit();
20434+}
20435+
20436+/*
20437+ * This interrupt should _never_ happen with our APIC/SMP architecture
20438+ */
20439+asmlinkage void smp_spurious_interrupt(void)
20440+{
20441+ unsigned int v;
20442+ exit_idle();
20443+ irq_enter();
20444+ /*
20445+ * Check if this really is a spurious interrupt and ACK it
20446+ * if it is a vectored one. Just in case...
20447+ * Spurious interrupts should not be ACKed.
20448+ */
20449+ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
20450+ if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
20451+ ack_APIC_irq();
20452+
20453+#if 0
20454+ static unsigned long last_warning;
20455+ static unsigned long skipped;
20456+
20457+ /* see sw-dev-man vol 3, chapter 7.4.13.5 */
20458+ if (time_before(last_warning+30*HZ,jiffies)) {
20459+ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
20460+ smp_processor_id(), skipped);
20461+ last_warning = jiffies;
20462+ skipped = 0;
20463+ } else {
20464+ skipped++;
20465+ }
20466+#endif
20467+ irq_exit();
20468+}
20469+
20470+/*
20471+ * This interrupt should never happen with our APIC/SMP architecture
20472+ */
20473+
20474+asmlinkage void smp_error_interrupt(void)
20475+{
20476+ unsigned int v, v1;
20477+
20478+ exit_idle();
20479+ irq_enter();
20480+ /* First tickle the hardware, only then report what went on. -- REW */
20481+ v = apic_read(APIC_ESR);
20482+ apic_write(APIC_ESR, 0);
20483+ v1 = apic_read(APIC_ESR);
20484+ ack_APIC_irq();
20485+ atomic_inc(&irq_err_count);
20486+
20487+ /* Here is what the APIC error bits mean:
20488+ 0: Send CS error
20489+ 1: Receive CS error
20490+ 2: Send accept error
20491+ 3: Receive accept error
20492+ 4: Reserved
20493+ 5: Send illegal vector
20494+ 6: Received illegal vector
20495+ 7: Illegal register address
20496+ */
20497+ printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
20498+ smp_processor_id(), v , v1);
20499+ irq_exit();
20500+}
20501+
20502+int disable_apic;
20503+
20504+/*
20505+ * This initializes the IO-APIC and APIC hardware if this is
20506+ * a UP kernel.
20507+ */
20508+int __init APIC_init_uniprocessor (void)
20509+{
20510+#ifdef CONFIG_X86_IO_APIC
20511+ if (smp_found_config)
20512+ if (!skip_ioapic_setup && nr_ioapics)
20513+ setup_IO_APIC();
20514+#endif
20515+
20516+ return 1;
20517+}
20518Index: head-2008-11-25/arch/x86/kernel/e820_64-xen.c
20519===================================================================
20520--- /dev/null 1970-01-01 00:00:00.000000000 +0000
20521+++ head-2008-11-25/arch/x86/kernel/e820_64-xen.c 2008-04-22 19:56:27.000000000 +0200
20522@@ -0,0 +1,798 @@
20523+/*
20524+ * Handle the memory map.
20525+ * The functions here do the job until bootmem takes over.
20526+ *
20527+ * Getting sanitize_e820_map() in sync with i386 version by applying change:
20528+ * - Provisions for empty E820 memory regions (reported by certain BIOSes).
20529+ * Alex Achenbach <xela@slit.de>, December 2002.
20530+ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
20531+ *
20532+ */
20533+#include <linux/kernel.h>
20534+#include <linux/types.h>
20535+#include <linux/init.h>
20536+#include <linux/bootmem.h>
20537+#include <linux/ioport.h>
20538+#include <linux/string.h>
20539+#include <linux/kexec.h>
20540+#include <linux/module.h>
20541+
20542+#include <asm/pgtable.h>
20543+#include <asm/page.h>
20544+#include <asm/e820.h>
20545+#include <asm/proto.h>
20546+#include <asm/bootsetup.h>
20547+#include <asm/sections.h>
20548+#include <xen/interface/memory.h>
20549+
20550+/*
20551+ * PFN of last memory page.
20552+ */
20553+unsigned long end_pfn;
20554+EXPORT_SYMBOL(end_pfn);
20555+
20556+/*
20557+ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
20558+ * The direct mapping extends to end_pfn_map, so that we can directly access
20559+ * apertures, ACPI and other tables without having to play with fixmaps.
20560+ */
20561+unsigned long end_pfn_map;
20562+
20563+/*
20564+ * Last pfn which the user wants to use.
20565+ */
20566+unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
20567+
20568+extern struct resource code_resource, data_resource;
20569+
20570+#ifdef CONFIG_XEN
20571+extern struct e820map machine_e820;
20572+#endif
20573+
20574+/* Check for some hardcoded bad areas that early boot is not allowed to touch */
20575+static inline int bad_addr(unsigned long *addrp, unsigned long size)
20576+{
20577+ unsigned long addr = *addrp, last = addr + size;
20578+
20579+#ifndef CONFIG_XEN
20580+ /* various gunk below that needed for SMP startup */
20581+ if (addr < 0x8000) {
20582+ *addrp = 0x8000;
20583+ return 1;
20584+ }
20585+
20586+ /* direct mapping tables of the kernel */
20587+ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
20588+ *addrp = table_end << PAGE_SHIFT;
20589+ return 1;
20590+ }
20591+
20592+ /* initrd */
20593+#ifdef CONFIG_BLK_DEV_INITRD
20594+ if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
20595+ addr < INITRD_START+INITRD_SIZE) {
20596+ *addrp = INITRD_START + INITRD_SIZE;
20597+ return 1;
20598+ }
20599+#endif
20600+ /* kernel code + 640k memory hole (later should not be needed, but
20601+ be paranoid for now) */
20602+ if (last >= 640*1024 && addr < 1024*1024) {
20603+ *addrp = 1024*1024;
20604+ return 1;
20605+ }
20606+ if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
20607+ *addrp = __pa_symbol(&_end);
20608+ return 1;
20609+ }
20610+
20611+ if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
20612+ *addrp = ebda_addr + ebda_size;
20613+ return 1;
20614+ }
20615+
20616+ /* XXX ramdisk image here? */
20617+#else
20618+ if (last < (table_end<<PAGE_SHIFT)) {
20619+ *addrp = table_end << PAGE_SHIFT;
20620+ return 1;
20621+ }
20622+#endif
20623+ return 0;
20624+}
20625+
20626+/*
20627+ * This function checks if any part of the range <start,end> is mapped
20628+ * with type.
20629+ */
20630+int e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
20631+{
20632+ int i;
20633+
20634+#ifndef CONFIG_XEN
20635+ for (i = 0; i < e820.nr_map; i++) {
20636+ struct e820entry *ei = &e820.map[i];
20637+#else
20638+ if (!is_initial_xendomain())
20639+ return 0;
20640+ for (i = 0; i < machine_e820.nr_map; i++) {
20641+ const struct e820entry *ei = &machine_e820.map[i];
20642+#endif
20643+
20644+ if (type && ei->type != type)
20645+ continue;
20646+ if (ei->addr >= end || ei->addr + ei->size <= start)
20647+ continue;
20648+ return 1;
20649+ }
20650+ return 0;
20651+}
20652+EXPORT_SYMBOL_GPL(e820_any_mapped);
20653+
20654+/*
20655+ * This function checks if the entire range <start,end> is mapped with type.
20656+ *
20657+ * Note: this function only works correct if the e820 table is sorted and
20658+ * not-overlapping, which is the case
20659+ */
20660+int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
20661+{
20662+ int i;
20663+
20664+#ifndef CONFIG_XEN
20665+ for (i = 0; i < e820.nr_map; i++) {
20666+ struct e820entry *ei = &e820.map[i];
20667+#else
20668+ if (!is_initial_xendomain())
20669+ return 0;
20670+ for (i = 0; i < machine_e820.nr_map; i++) {
20671+ const struct e820entry *ei = &machine_e820.map[i];
20672+#endif
20673+
20674+ if (type && ei->type != type)
20675+ continue;
20676+ /* is the region (part) in overlap with the current region ?*/
20677+ if (ei->addr >= end || ei->addr + ei->size <= start)
20678+ continue;
20679+
20680+ /* if the region is at the beginning of <start,end> we move
20681+ * start to the end of the region since it's ok until there
20682+ */
20683+ if (ei->addr <= start)
20684+ start = ei->addr + ei->size;
20685+ /* if start is now at or beyond end, we're done, full coverage */
20686+ if (start >= end)
20687+ return 1; /* we're done */
20688+ }
20689+ return 0;
20690+}
20691+
20692+/*
20693+ * Find a free area in a specific range.
20694+ */
20695+unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
20696+{
20697+ int i;
20698+ for (i = 0; i < e820.nr_map; i++) {
20699+ struct e820entry *ei = &e820.map[i];
20700+ unsigned long addr = ei->addr, last;
20701+ if (ei->type != E820_RAM)
20702+ continue;
20703+ if (addr < start)
20704+ addr = start;
20705+ if (addr > ei->addr + ei->size)
20706+ continue;
20707+ while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
20708+ ;
20709+ last = addr + size;
20710+ if (last > ei->addr + ei->size)
20711+ continue;
20712+ if (last > end)
20713+ continue;
20714+ return addr;
20715+ }
20716+ return -1UL;
20717+}
20718+
20719+/*
20720+ * Free bootmem based on the e820 table for a node.
20721+ */
20722+void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
20723+{
20724+ int i;
20725+ for (i = 0; i < e820.nr_map; i++) {
20726+ struct e820entry *ei = &e820.map[i];
20727+ unsigned long last, addr;
20728+
20729+ if (ei->type != E820_RAM ||
20730+ ei->addr+ei->size <= start ||
20731+ ei->addr >= end)
20732+ continue;
20733+
20734+ addr = round_up(ei->addr, PAGE_SIZE);
20735+ if (addr < start)
20736+ addr = start;
20737+
20738+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
20739+ if (last >= end)
20740+ last = end;
20741+
20742+ if (last > addr && last-addr >= PAGE_SIZE)
20743+ free_bootmem_node(pgdat, addr, last-addr);
20744+ }
20745+}
20746+
20747+/*
20748+ * Find the highest page frame number we have available
20749+ */
20750+unsigned long __init e820_end_of_ram(void)
20751+{
20752+ int i;
20753+ unsigned long end_pfn = 0;
20754+
20755+ for (i = 0; i < e820.nr_map; i++) {
20756+ struct e820entry *ei = &e820.map[i];
20757+ unsigned long start, end;
20758+
20759+ start = round_up(ei->addr, PAGE_SIZE);
20760+ end = round_down(ei->addr + ei->size, PAGE_SIZE);
20761+ if (start >= end)
20762+ continue;
20763+ if (ei->type == E820_RAM) {
20764+ if (end > end_pfn<<PAGE_SHIFT)
20765+ end_pfn = end>>PAGE_SHIFT;
20766+ } else {
20767+ if (end > end_pfn_map<<PAGE_SHIFT)
20768+ end_pfn_map = end>>PAGE_SHIFT;
20769+ }
20770+ }
20771+
20772+ if (end_pfn > end_pfn_map)
20773+ end_pfn_map = end_pfn;
20774+ if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
20775+ end_pfn_map = MAXMEM>>PAGE_SHIFT;
20776+ if (end_pfn > end_user_pfn)
20777+ end_pfn = end_user_pfn;
20778+ if (end_pfn > end_pfn_map)
20779+ end_pfn = end_pfn_map;
20780+
20781+ return end_pfn;
20782+}
20783+
20784+/*
20785+ * Compute how much memory is missing in a range.
20786+ * Unlike the other functions in this file the arguments are in page numbers.
20787+ */
20788+unsigned long __init
20789+e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
20790+{
20791+ unsigned long ram = 0;
20792+ unsigned long start = start_pfn << PAGE_SHIFT;
20793+ unsigned long end = end_pfn << PAGE_SHIFT;
20794+ int i;
20795+ for (i = 0; i < e820.nr_map; i++) {
20796+ struct e820entry *ei = &e820.map[i];
20797+ unsigned long last, addr;
20798+
20799+ if (ei->type != E820_RAM ||
20800+ ei->addr+ei->size <= start ||
20801+ ei->addr >= end)
20802+ continue;
20803+
20804+ addr = round_up(ei->addr, PAGE_SIZE);
20805+ if (addr < start)
20806+ addr = start;
20807+
20808+ last = round_down(ei->addr + ei->size, PAGE_SIZE);
20809+ if (last >= end)
20810+ last = end;
20811+
20812+ if (last > addr)
20813+ ram += last - addr;
20814+ }
20815+ return ((end - start) - ram) >> PAGE_SHIFT;
20816+}
20817+
20818+/*
20819+ * Mark e820 reserved areas as busy for the resource manager.
20820+ */
20821+void __init e820_reserve_resources(struct e820entry *e820, int nr_map)
20822+{
20823+ int i;
20824+ for (i = 0; i < nr_map; i++) {
20825+ struct resource *res;
20826+ res = alloc_bootmem_low(sizeof(struct resource));
20827+ switch (e820[i].type) {
20828+ case E820_RAM: res->name = "System RAM"; break;
20829+ case E820_ACPI: res->name = "ACPI Tables"; break;
20830+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
20831+ default: res->name = "reserved";
20832+ }
20833+ res->start = e820[i].addr;
20834+ res->end = res->start + e820[i].size - 1;
20835+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
20836+ request_resource(&iomem_resource, res);
20837+ if (e820[i].type == E820_RAM) {
20838+ /*
20839+ * We don't know which RAM region contains kernel data,
20840+ * so we try it repeatedly and let the resource manager
20841+ * test it.
20842+ */
20843+#ifndef CONFIG_XEN
20844+ request_resource(res, &code_resource);
20845+ request_resource(res, &data_resource);
20846+#endif
20847+#ifdef CONFIG_KEXEC
20848+ if (crashk_res.start != crashk_res.end)
20849+ request_resource(res, &crashk_res);
20850+#ifdef CONFIG_XEN
20851+ xen_machine_kexec_register_resources(res);
20852+#endif
20853+#endif
20854+ }
20855+ }
20856+}
20857+
20858+/*
20859+ * Add a memory region to the kernel e820 map.
20860+ */
20861+void __init add_memory_region(unsigned long start, unsigned long size, int type)
20862+{
20863+ int x = e820.nr_map;
20864+
20865+ if (x == E820MAX) {
20866+ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
20867+ return;
20868+ }
20869+
20870+ e820.map[x].addr = start;
20871+ e820.map[x].size = size;
20872+ e820.map[x].type = type;
20873+ e820.nr_map++;
20874+}
20875+
20876+void __init e820_print_map(char *who)
20877+{
20878+ int i;
20879+
20880+ for (i = 0; i < e820.nr_map; i++) {
20881+ printk(" %s: %016Lx - %016Lx ", who,
20882+ (unsigned long long) e820.map[i].addr,
20883+ (unsigned long long) (e820.map[i].addr + e820.map[i].size));
20884+ switch (e820.map[i].type) {
20885+ case E820_RAM: printk("(usable)\n");
20886+ break;
20887+ case E820_RESERVED:
20888+ printk("(reserved)\n");
20889+ break;
20890+ case E820_ACPI:
20891+ printk("(ACPI data)\n");
20892+ break;
20893+ case E820_NVS:
20894+ printk("(ACPI NVS)\n");
20895+ break;
20896+ default: printk("type %u\n", e820.map[i].type);
20897+ break;
20898+ }
20899+ }
20900+}
20901+
20902+/*
20903+ * Sanitize the BIOS e820 map.
20904+ *
20905+ * Some e820 responses include overlapping entries. The following
20906+ * replaces the original e820 map with a new one, removing overlaps.
20907+ *
20908+ */
20909+static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
20910+{
20911+ struct change_member {
20912+ struct e820entry *pbios; /* pointer to original bios entry */
20913+ unsigned long long addr; /* address for this change point */
20914+ };
20915+ static struct change_member change_point_list[2*E820MAX] __initdata;
20916+ static struct change_member *change_point[2*E820MAX] __initdata;
20917+ static struct e820entry *overlap_list[E820MAX] __initdata;
20918+ static struct e820entry new_bios[E820MAX] __initdata;
20919+ struct change_member *change_tmp;
20920+ unsigned long current_type, last_type;
20921+ unsigned long long last_addr;
20922+ int chgidx, still_changing;
20923+ int overlap_entries;
20924+ int new_bios_entry;
20925+ int old_nr, new_nr, chg_nr;
20926+ int i;
20927+
20928+ /*
20929+ Visually we're performing the following (1,2,3,4 = memory types)...
20930+
20931+ Sample memory map (w/overlaps):
20932+ ____22__________________
20933+ ______________________4_
20934+ ____1111________________
20935+ _44_____________________
20936+ 11111111________________
20937+ ____________________33__
20938+ ___________44___________
20939+ __________33333_________
20940+ ______________22________
20941+ ___________________2222_
20942+ _________111111111______
20943+ _____________________11_
20944+ _________________4______
20945+
20946+ Sanitized equivalent (no overlap):
20947+ 1_______________________
20948+ _44_____________________
20949+ ___1____________________
20950+ ____22__________________
20951+ ______11________________
20952+ _________1______________
20953+ __________3_____________
20954+ ___________44___________
20955+ _____________33_________
20956+ _______________2________
20957+ ________________1_______
20958+ _________________4______
20959+ ___________________2____
20960+ ____________________33__
20961+ ______________________4_
20962+ */
20963+
20964+ /* if there's only one memory region, don't bother */
20965+ if (*pnr_map < 2)
20966+ return -1;
20967+
20968+ old_nr = *pnr_map;
20969+
20970+ /* bail out if we find any unreasonable addresses in bios map */
20971+ for (i=0; i<old_nr; i++)
20972+ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
20973+ return -1;
20974+
20975+ /* create pointers for initial change-point information (for sorting) */
20976+ for (i=0; i < 2*old_nr; i++)
20977+ change_point[i] = &change_point_list[i];
20978+
20979+ /* record all known change-points (starting and ending addresses),
20980+ omitting those that are for empty memory regions */
20981+ chgidx = 0;
20982+ for (i=0; i < old_nr; i++) {
20983+ if (biosmap[i].size != 0) {
20984+ change_point[chgidx]->addr = biosmap[i].addr;
20985+ change_point[chgidx++]->pbios = &biosmap[i];
20986+ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
20987+ change_point[chgidx++]->pbios = &biosmap[i];
20988+ }
20989+ }
20990+ chg_nr = chgidx;
20991+
20992+ /* sort change-point list by memory addresses (low -> high) */
20993+ still_changing = 1;
20994+ while (still_changing) {
20995+ still_changing = 0;
20996+ for (i=1; i < chg_nr; i++) {
20997+ /* if <current_addr> > <last_addr>, swap */
20998+ /* or, if current=<start_addr> & last=<end_addr>, swap */
20999+ if ((change_point[i]->addr < change_point[i-1]->addr) ||
21000+ ((change_point[i]->addr == change_point[i-1]->addr) &&
21001+ (change_point[i]->addr == change_point[i]->pbios->addr) &&
21002+ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
21003+ )
21004+ {
21005+ change_tmp = change_point[i];
21006+ change_point[i] = change_point[i-1];
21007+ change_point[i-1] = change_tmp;
21008+ still_changing=1;
21009+ }
21010+ }
21011+ }
21012+
21013+ /* create a new bios memory map, removing overlaps */
21014+ overlap_entries=0; /* number of entries in the overlap table */
21015+ new_bios_entry=0; /* index for creating new bios map entries */
21016+ last_type = 0; /* start with undefined memory type */
21017+ last_addr = 0; /* start with 0 as last starting address */
21018+ /* loop through change-points, determining affect on the new bios map */
21019+ for (chgidx=0; chgidx < chg_nr; chgidx++)
21020+ {
21021+ /* keep track of all overlapping bios entries */
21022+ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
21023+ {
21024+ /* add map entry to overlap list (> 1 entry implies an overlap) */
21025+ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
21026+ }
21027+ else
21028+ {
21029+ /* remove entry from list (order independent, so swap with last) */
21030+ for (i=0; i<overlap_entries; i++)
21031+ {
21032+ if (overlap_list[i] == change_point[chgidx]->pbios)
21033+ overlap_list[i] = overlap_list[overlap_entries-1];
21034+ }
21035+ overlap_entries--;
21036+ }
21037+ /* if there are overlapping entries, decide which "type" to use */
21038+ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
21039+ current_type = 0;
21040+ for (i=0; i<overlap_entries; i++)
21041+ if (overlap_list[i]->type > current_type)
21042+ current_type = overlap_list[i]->type;
21043+ /* continue building up new bios map based on this information */
21044+ if (current_type != last_type) {
21045+ if (last_type != 0) {
21046+ new_bios[new_bios_entry].size =
21047+ change_point[chgidx]->addr - last_addr;
21048+ /* move forward only if the new size was non-zero */
21049+ if (new_bios[new_bios_entry].size != 0)
21050+ if (++new_bios_entry >= E820MAX)
21051+ break; /* no more space left for new bios entries */
21052+ }
21053+ if (current_type != 0) {
21054+ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
21055+ new_bios[new_bios_entry].type = current_type;
21056+ last_addr=change_point[chgidx]->addr;
21057+ }
21058+ last_type = current_type;
21059+ }
21060+ }
21061+ new_nr = new_bios_entry; /* retain count for new bios entries */
21062+
21063+ /* copy new bios mapping into original location */
21064+ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
21065+ *pnr_map = new_nr;
21066+
21067+ return 0;
21068+}
21069+
21070+/*
21071+ * Copy the BIOS e820 map into a safe place.
21072+ *
21073+ * Sanity-check it while we're at it..
21074+ *
21075+ * If we're lucky and live on a modern system, the setup code
21076+ * will have given us a memory map that we can use to properly
21077+ * set up memory. If we aren't, we'll fake a memory map.
21078+ *
21079+ * We check to see that the memory map contains at least 2 elements
21080+ * before we'll use it, because the detection code in setup.S may
21081+ * not be perfect and most every PC known to man has two memory
21082+ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
21083+ * thinkpad 560x, for example, does not cooperate with the memory
21084+ * detection code.)
21085+ */
21086+static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
21087+{
21088+#ifndef CONFIG_XEN
21089+ /* Only one memory region (or negative)? Ignore it */
21090+ if (nr_map < 2)
21091+ return -1;
21092+#else
21093+ BUG_ON(nr_map < 1);
21094+#endif
21095+
21096+ do {
21097+ unsigned long start = biosmap->addr;
21098+ unsigned long size = biosmap->size;
21099+ unsigned long end = start + size;
21100+ unsigned long type = biosmap->type;
21101+
21102+ /* Overflow in 64 bits? Ignore the memory map. */
21103+ if (start > end)
21104+ return -1;
21105+
21106+#ifndef CONFIG_XEN
21107+ /*
21108+ * Some BIOSes claim RAM in the 640k - 1M region.
21109+ * Not right. Fix it up.
21110+ *
21111+ * This should be removed on Hammer which is supposed to not
21112+ * have non e820 covered ISA mappings there, but I had some strange
21113+ * problems so it stays for now. -AK
21114+ */
21115+ if (type == E820_RAM) {
21116+ if (start < 0x100000ULL && end > 0xA0000ULL) {
21117+ if (start < 0xA0000ULL)
21118+ add_memory_region(start, 0xA0000ULL-start, type);
21119+ if (end <= 0x100000ULL)
21120+ continue;
21121+ start = 0x100000ULL;
21122+ size = end - start;
21123+ }
21124+ }
21125+#endif
21126+
21127+ add_memory_region(start, size, type);
21128+ } while (biosmap++,--nr_map);
21129+
21130+#ifdef CONFIG_XEN
21131+ if (is_initial_xendomain()) {
21132+ struct xen_memory_map memmap;
21133+
21134+ memmap.nr_entries = E820MAX;
21135+ set_xen_guest_handle(memmap.buffer, machine_e820.map);
21136+
21137+ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
21138+ BUG();
21139+ machine_e820.nr_map = memmap.nr_entries;
21140+ } else
21141+ machine_e820 = e820;
21142+#endif
21143+
21144+ return 0;
21145+}
21146+
21147+#ifndef CONFIG_XEN
21148+void __init setup_memory_region(void)
21149+{
21150+ char *who = "BIOS-e820";
21151+
21152+ /*
21153+ * Try to copy the BIOS-supplied E820-map.
21154+ *
21155+ * Otherwise fake a memory map; one section from 0k->640k,
21156+ * the next section from 1mb->appropriate_mem_k
21157+ */
21158+ sanitize_e820_map(E820_MAP, &E820_MAP_NR);
21159+ if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
21160+ unsigned long mem_size;
21161+
21162+ /* compare results from other methods and take the greater */
21163+ if (ALT_MEM_K < EXT_MEM_K) {
21164+ mem_size = EXT_MEM_K;
21165+ who = "BIOS-88";
21166+ } else {
21167+ mem_size = ALT_MEM_K;
21168+ who = "BIOS-e801";
21169+ }
21170+
21171+ e820.nr_map = 0;
21172+ add_memory_region(0, LOWMEMSIZE(), E820_RAM);
21173+ add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
21174+ }
21175+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
21176+ e820_print_map(who);
21177+}
21178+
21179+#else /* CONFIG_XEN */
21180+
21181+void __init setup_memory_region(void)
21182+{
21183+ int rc;
21184+ struct xen_memory_map memmap;
21185+ /*
21186+ * This is rather large for a stack variable but this early in
21187+ * the boot process we know we have plenty slack space.
21188+ */
21189+ struct e820entry map[E820MAX];
21190+
21191+ memmap.nr_entries = E820MAX;
21192+ set_xen_guest_handle(memmap.buffer, map);
21193+
21194+ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
21195+ if ( rc == -ENOSYS ) {
21196+ memmap.nr_entries = 1;
21197+ map[0].addr = 0ULL;
21198+ map[0].size = xen_start_info->nr_pages << PAGE_SHIFT;
21199+ /* 8MB slack (to balance backend allocations). */
21200+ map[0].size += 8 << 20;
21201+ map[0].type = E820_RAM;
21202+ rc = 0;
21203+ }
21204+ BUG_ON(rc);
21205+
21206+ sanitize_e820_map(map, (char *)&memmap.nr_entries);
21207+
21208+ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
21209+
21210+ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
21211+ e820_print_map("Xen");
21212+}
21213+#endif
21214+
21215+void __init parse_memopt(char *p, char **from)
21216+{
21217+ int i;
21218+ unsigned long current_end;
21219+ unsigned long end;
21220+
21221+ end_user_pfn = memparse(p, from);
21222+ end_user_pfn >>= PAGE_SHIFT;
21223+
21224+ end = end_user_pfn<<PAGE_SHIFT;
21225+ i = e820.nr_map-1;
21226+ current_end = e820.map[i].addr + e820.map[i].size;
21227+
21228+ if (current_end < end) {
21229+ /*
21230+ * The e820 map ends before our requested size so
21231+ * extend the final entry to the requested address.
21232+ */
21233+ if (e820.map[i].type == E820_RAM)
21234+ e820.map[i].size = end - e820.map[i].addr;
21235+ else
21236+ add_memory_region(current_end, end - current_end, E820_RAM);
21237+ }
21238+}
21239+
21240+void __init parse_memmapopt(char *p, char **from)
21241+{
21242+ unsigned long long start_at, mem_size;
21243+
21244+ mem_size = memparse(p, from);
21245+ p = *from;
21246+ if (*p == '@') {
21247+ start_at = memparse(p+1, from);
21248+ add_memory_region(start_at, mem_size, E820_RAM);
21249+ } else if (*p == '#') {
21250+ start_at = memparse(p+1, from);
21251+ add_memory_region(start_at, mem_size, E820_ACPI);
21252+ } else if (*p == '$') {
21253+ start_at = memparse(p+1, from);
21254+ add_memory_region(start_at, mem_size, E820_RESERVED);
21255+ } else {
21256+ end_user_pfn = (mem_size >> PAGE_SHIFT);
21257+ }
21258+ p = *from;
21259+}
21260+
21261+unsigned long pci_mem_start = 0xaeedbabe;
21262+EXPORT_SYMBOL(pci_mem_start);
21263+
21264+/*
21265+ * Search for the biggest gap in the low 32 bits of the e820
21266+ * memory space. We pass this space to PCI to assign MMIO resources
21267+ * for hotplug or unconfigured devices in.
21268+ * Hopefully the BIOS let enough space left.
21269+ */
21270+__init void e820_setup_gap(struct e820entry *e820, int nr_map)
21271+{
21272+ unsigned long gapstart, gapsize, round;
21273+ unsigned long last;
21274+ int i;
21275+ int found = 0;
21276+
21277+ last = 0x100000000ull;
21278+ gapstart = 0x10000000;
21279+ gapsize = 0x400000;
21280+ i = nr_map;
21281+ while (--i >= 0) {
21282+ unsigned long long start = e820[i].addr;
21283+ unsigned long long end = start + e820[i].size;
21284+
21285+ /*
21286+ * Since "last" is at most 4GB, we know we'll
21287+ * fit in 32 bits if this condition is true
21288+ */
21289+ if (last > end) {
21290+ unsigned long gap = last - end;
21291+
21292+ if (gap > gapsize) {
21293+ gapsize = gap;
21294+ gapstart = end;
21295+ found = 1;
21296+ }
21297+ }
21298+ if (start < last)
21299+ last = start;
21300+ }
21301+
21302+ if (!found) {
21303+ gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
21304+ printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
21305+ KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
21306+ }
21307+
21308+ /*
21309+ * See how much we want to round up: start off with
21310+ * rounding to the next 1MB area.
21311+ */
21312+ round = 0x100000;
21313+ while ((gapsize >> 4) > round)
21314+ round += round;
21315+ /* Fun with two's complement */
21316+ pci_mem_start = (gapstart + round) & -round;
21317+
21318+ printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
21319+ pci_mem_start, gapstart, gapsize);
21320+}
21321Index: head-2008-11-25/arch/x86/kernel/early_printk-xen.c
21322===================================================================
21323--- /dev/null 1970-01-01 00:00:00.000000000 +0000
21324+++ head-2008-11-25/arch/x86/kernel/early_printk-xen.c 2007-06-12 13:13:01.000000000 +0200
21325@@ -0,0 +1,302 @@
21326+#include <linux/console.h>
21327+#include <linux/kernel.h>
21328+#include <linux/init.h>
21329+#include <linux/string.h>
21330+#include <linux/screen_info.h>
21331+#include <asm/io.h>
21332+#include <asm/processor.h>
21333+#include <asm/fcntl.h>
21334+
21335+/* Simple VGA output */
21336+
21337+#ifdef __i386__
21338+#include <asm/setup.h>
21339+#define VGABASE (__ISA_IO_base + 0xb8000)
21340+#else
21341+#include <asm/bootsetup.h>
21342+#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
21343+#endif
21344+
21345+#ifndef CONFIG_XEN
21346+static int max_ypos = 25, max_xpos = 80;
21347+static int current_ypos = 25, current_xpos = 0;
21348+
21349+static void early_vga_write(struct console *con, const char *str, unsigned n)
21350+{
21351+ char c;
21352+ int i, k, j;
21353+
21354+ while ((c = *str++) != '\0' && n-- > 0) {
21355+ if (current_ypos >= max_ypos) {
21356+ /* scroll 1 line up */
21357+ for (k = 1, j = 0; k < max_ypos; k++, j++) {
21358+ for (i = 0; i < max_xpos; i++) {
21359+ writew(readw(VGABASE+2*(max_xpos*k+i)),
21360+ VGABASE + 2*(max_xpos*j + i));
21361+ }
21362+ }
21363+ for (i = 0; i < max_xpos; i++)
21364+ writew(0x720, VGABASE + 2*(max_xpos*j + i));
21365+ current_ypos = max_ypos-1;
21366+ }
21367+ if (c == '\n') {
21368+ current_xpos = 0;
21369+ current_ypos++;
21370+ } else if (c != '\r') {
21371+ writew(((0x7 << 8) | (unsigned short) c),
21372+ VGABASE + 2*(max_xpos*current_ypos +
21373+ current_xpos++));
21374+ if (current_xpos >= max_xpos) {
21375+ current_xpos = 0;
21376+ current_ypos++;
21377+ }
21378+ }
21379+ }
21380+}
21381+
21382+static struct console early_vga_console = {
21383+ .name = "earlyvga",
21384+ .write = early_vga_write,
21385+ .flags = CON_PRINTBUFFER,
21386+ .index = -1,
21387+};
21388+
21389+/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
21390+
21391+static int early_serial_base = 0x3f8; /* ttyS0 */
21392+
21393+#define XMTRDY 0x20
21394+
21395+#define DLAB 0x80
21396+
21397+#define TXR 0 /* Transmit register (WRITE) */
21398+#define RXR 0 /* Receive register (READ) */
21399+#define IER 1 /* Interrupt Enable */
21400+#define IIR 2 /* Interrupt ID */
21401+#define FCR 2 /* FIFO control */
21402+#define LCR 3 /* Line control */
21403+#define MCR 4 /* Modem control */
21404+#define LSR 5 /* Line Status */
21405+#define MSR 6 /* Modem Status */
21406+#define DLL 0 /* Divisor Latch Low */
21407+#define DLH 1 /* Divisor latch High */
21408+
21409+static int early_serial_putc(unsigned char ch)
21410+{
21411+ unsigned timeout = 0xffff;
21412+ while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
21413+ cpu_relax();
21414+ outb(ch, early_serial_base + TXR);
21415+ return timeout ? 0 : -1;
21416+}
21417+
21418+static void early_serial_write(struct console *con, const char *s, unsigned n)
21419+{
21420+ while (*s && n-- > 0) {
21421+ early_serial_putc(*s);
21422+ if (*s == '\n')
21423+ early_serial_putc('\r');
21424+ s++;
21425+ }
21426+}
21427+
21428+#define DEFAULT_BAUD 9600
21429+
21430+static __init void early_serial_init(char *s)
21431+{
21432+ unsigned char c;
21433+ unsigned divisor;
21434+ unsigned baud = DEFAULT_BAUD;
21435+ char *e;
21436+
21437+ if (*s == ',')
21438+ ++s;
21439+
21440+ if (*s) {
21441+ unsigned port;
21442+ if (!strncmp(s,"0x",2)) {
21443+ early_serial_base = simple_strtoul(s, &e, 16);
21444+ } else {
21445+ static int bases[] = { 0x3f8, 0x2f8 };
21446+
21447+ if (!strncmp(s,"ttyS",4))
21448+ s += 4;
21449+ port = simple_strtoul(s, &e, 10);
21450+ if (port > 1 || s == e)
21451+ port = 0;
21452+ early_serial_base = bases[port];
21453+ }
21454+ s += strcspn(s, ",");
21455+ if (*s == ',')
21456+ s++;
21457+ }
21458+
21459+ outb(0x3, early_serial_base + LCR); /* 8n1 */
21460+ outb(0, early_serial_base + IER); /* no interrupt */
21461+ outb(0, early_serial_base + FCR); /* no fifo */
21462+ outb(0x3, early_serial_base + MCR); /* DTR + RTS */
21463+
21464+ if (*s) {
21465+ baud = simple_strtoul(s, &e, 0);
21466+ if (baud == 0 || s == e)
21467+ baud = DEFAULT_BAUD;
21468+ }
21469+
21470+ divisor = 115200 / baud;
21471+ c = inb(early_serial_base + LCR);
21472+ outb(c | DLAB, early_serial_base + LCR);
21473+ outb(divisor & 0xff, early_serial_base + DLL);
21474+ outb((divisor >> 8) & 0xff, early_serial_base + DLH);
21475+ outb(c & ~DLAB, early_serial_base + LCR);
21476+}
21477+
21478+#else /* CONFIG_XEN */
21479+
21480+static void
21481+early_serial_write(struct console *con, const char *s, unsigned count)
21482+{
21483+ int n;
21484+
21485+ while (count > 0) {
21486+ n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
21487+ if (n <= 0)
21488+ break;
21489+ count -= n;
21490+ s += n;
21491+ }
21492+}
21493+
21494+static __init void early_serial_init(char *s)
21495+{
21496+}
21497+
21498+/*
21499+ * No early VGA console on Xen, as we do not have convenient ISA-space
21500+ * mappings. Someone should fix this for domain 0. For now, use fake serial.
21501+ */
21502+#define early_vga_console early_serial_console
21503+
21504+#endif
21505+
21506+static struct console early_serial_console = {
21507+ .name = "earlyser",
21508+ .write = early_serial_write,
21509+ .flags = CON_PRINTBUFFER,
21510+ .index = -1,
21511+};
21512+
21513+/* Console interface to a host file on AMD's SimNow! */
21514+
21515+static int simnow_fd;
21516+
21517+enum {
21518+ MAGIC1 = 0xBACCD00A,
21519+ MAGIC2 = 0xCA110000,
21520+ XOPEN = 5,
21521+ XWRITE = 4,
21522+};
21523+
21524+static noinline long simnow(long cmd, long a, long b, long c)
21525+{
21526+ long ret;
21527+ asm volatile("cpuid" :
21528+ "=a" (ret) :
21529+ "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
21530+ return ret;
21531+}
21532+
21533+void __init simnow_init(char *str)
21534+{
21535+ char *fn = "klog";
21536+ if (*str == '=')
21537+ fn = ++str;
21538+ /* error ignored */
21539+ simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
21540+}
21541+
21542+static void simnow_write(struct console *con, const char *s, unsigned n)
21543+{
21544+ simnow(XWRITE, simnow_fd, (unsigned long)s, n);
21545+}
21546+
21547+static struct console simnow_console = {
21548+ .name = "simnow",
21549+ .write = simnow_write,
21550+ .flags = CON_PRINTBUFFER,
21551+ .index = -1,
21552+};
21553+
21554+/* Direct interface for emergencies */
21555+struct console *early_console = &early_vga_console;
21556+static int early_console_initialized = 0;
21557+
21558+void early_printk(const char *fmt, ...)
21559+{
21560+ char buf[512];
21561+ int n;
21562+ va_list ap;
21563+
21564+ va_start(ap,fmt);
21565+ n = vscnprintf(buf,512,fmt,ap);
21566+ early_console->write(early_console,buf,n);
21567+ va_end(ap);
21568+}
21569+
21570+static int __initdata keep_early;
21571+
21572+int __init setup_early_printk(char *opt)
21573+{
21574+ char *space;
21575+ char buf[256];
21576+
21577+ if (early_console_initialized)
21578+ return 1;
21579+
21580+ strlcpy(buf,opt,sizeof(buf));
21581+ space = strchr(buf, ' ');
21582+ if (space)
21583+ *space = 0;
21584+
21585+ if (strstr(buf,"keep"))
21586+ keep_early = 1;
21587+
21588+ if (!strncmp(buf, "serial", 6)) {
21589+ early_serial_init(buf + 6);
21590+ early_console = &early_serial_console;
21591+ } else if (!strncmp(buf, "ttyS", 4)) {
21592+ early_serial_init(buf);
21593+ early_console = &early_serial_console;
21594+ } else if (!strncmp(buf, "vga", 3)
21595+#ifndef CONFIG_XEN
21596+ && SCREEN_INFO.orig_video_isVGA == 1) {
21597+ max_xpos = SCREEN_INFO.orig_video_cols;
21598+ max_ypos = SCREEN_INFO.orig_video_lines;
21599+ current_ypos = SCREEN_INFO.orig_y;
21600+#else
21601+ || !strncmp(buf, "xen", 3)) {
21602+#endif
21603+ early_console = &early_vga_console;
21604+ } else if (!strncmp(buf, "simnow", 6)) {
21605+ simnow_init(buf + 6);
21606+ early_console = &simnow_console;
21607+ keep_early = 1;
21608+ }
21609+ early_console_initialized = 1;
21610+ register_console(early_console);
21611+ return 0;
21612+}
21613+
21614+void __init disable_early_printk(void)
21615+{
21616+ if (!early_console_initialized || !early_console)
21617+ return;
21618+ if (!keep_early) {
21619+ printk("disabling early console\n");
21620+ unregister_console(early_console);
21621+ early_console_initialized = 0;
21622+ } else {
21623+ printk("keeping early console\n");
21624+ }
21625+}
21626+
21627+__setup("earlyprintk=", setup_early_printk);
21628Index: head-2008-11-25/arch/x86/kernel/entry_64-xen.S
21629===================================================================
21630--- /dev/null 1970-01-01 00:00:00.000000000 +0000
21631+++ head-2008-11-25/arch/x86/kernel/entry_64-xen.S 2008-10-29 09:55:56.000000000 +0100
21632@@ -0,0 +1,1322 @@
21633+/*
21634+ * linux/arch/x86_64/entry.S
21635+ *
21636+ * Copyright (C) 1991, 1992 Linus Torvalds
21637+ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
21638+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
21639+ *
21640+ * $Id$
21641+ *
21642+ * Jun Nakajima <jun.nakajima@intel.com>
21643+ * Asit Mallick <asit.k.mallick@intel.com>
21644+ * Modified for Xen
21645+ */
21646+
21647+/*
21648+ * entry.S contains the system-call and fault low-level handling routines.
21649+ *
21650+ * NOTE: This code handles signal-recognition, which happens every time
21651+ * after an interrupt and after each system call.
21652+ *
21653+ * Normal syscalls and interrupts don't save a full stack frame, this is
21654+ * only done for syscall tracing, signals or fork/exec et.al.
21655+ *
21656+ * A note on terminology:
21657+ * - top of stack: Architecture defined interrupt frame from SS to RIP
21658+ * at the top of the kernel process stack.
21659+ * - partial stack frame: partially saved registers upto R11.
21660+ * - full stack frame: Like partial stack frame, but all register saved.
21661+ *
21662+ * TODO:
21663+ * - schedule it carefully for the final hardware.
21664+ */
21665+
21666+#define ASSEMBLY 1
21667+#include <linux/linkage.h>
21668+#include <asm/segment.h>
21669+#include <asm/smp.h>
21670+#include <asm/cache.h>
21671+#include <asm/errno.h>
21672+#include <asm/dwarf2.h>
21673+#include <asm/calling.h>
21674+#include <asm/asm-offsets.h>
21675+#include <asm/msr.h>
21676+#include <asm/unistd.h>
21677+#include <asm/thread_info.h>
21678+#include <asm/hw_irq.h>
21679+#include <asm/page.h>
21680+#include <asm/irqflags.h>
21681+#include <asm/errno.h>
21682+#include <xen/interface/arch-x86_64.h>
21683+#include <xen/interface/features.h>
21684+
21685+#include "xen_entry.S"
21686+
21687+ .code64
21688+
21689+#ifndef CONFIG_PREEMPT
21690+#define retint_kernel retint_restore_args
21691+#endif
21692+
21693+
21694+.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
21695+#ifdef CONFIG_TRACE_IRQFLAGS
21696+ bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
21697+ jnc 1f
21698+ TRACE_IRQS_ON
21699+1:
21700+#endif
21701+.endm
21702+
21703+NMI_MASK = 0x80000000
21704+
21705+/*
21706+ * C code is not supposed to know about undefined top of stack. Every time
21707+ * a C function with an pt_regs argument is called from the SYSCALL based
21708+ * fast path FIXUP_TOP_OF_STACK is needed.
21709+ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
21710+ * manipulation.
21711+ */
21712+
21713+ /* %rsp:at FRAMEEND */
21714+ .macro FIXUP_TOP_OF_STACK tmp
21715+ movq $__USER_CS,CS(%rsp)
21716+ movq $-1,RCX(%rsp)
21717+ .endm
21718+
21719+ .macro RESTORE_TOP_OF_STACK tmp,offset=0
21720+ .endm
21721+
21722+ .macro FAKE_STACK_FRAME child_rip
21723+ /* push in order ss, rsp, eflags, cs, rip */
21724+ xorl %eax, %eax
21725+ pushq %rax /* ss */
21726+ CFI_ADJUST_CFA_OFFSET 8
21727+ /*CFI_REL_OFFSET ss,0*/
21728+ pushq %rax /* rsp */
21729+ CFI_ADJUST_CFA_OFFSET 8
21730+ CFI_REL_OFFSET rsp,0
21731+ pushq $(1<<9) /* eflags - interrupts on */
21732+ CFI_ADJUST_CFA_OFFSET 8
21733+ /*CFI_REL_OFFSET rflags,0*/
21734+ pushq $__KERNEL_CS /* cs */
21735+ CFI_ADJUST_CFA_OFFSET 8
21736+ /*CFI_REL_OFFSET cs,0*/
21737+ pushq \child_rip /* rip */
21738+ CFI_ADJUST_CFA_OFFSET 8
21739+ CFI_REL_OFFSET rip,0
21740+ pushq %rax /* orig rax */
21741+ CFI_ADJUST_CFA_OFFSET 8
21742+ .endm
21743+
21744+ .macro UNFAKE_STACK_FRAME
21745+ addq $8*6, %rsp
21746+ CFI_ADJUST_CFA_OFFSET -(6*8)
21747+ .endm
21748+
21749+ .macro CFI_DEFAULT_STACK start=1,adj=0
21750+ .if \start
21751+ CFI_STARTPROC simple
21752+ CFI_DEF_CFA rsp,SS+8 - \adj*ARGOFFSET
21753+ .else
21754+ CFI_DEF_CFA_OFFSET SS+8 - \adj*ARGOFFSET
21755+ .endif
21756+ .if \adj == 0
21757+ CFI_REL_OFFSET r15,R15
21758+ CFI_REL_OFFSET r14,R14
21759+ CFI_REL_OFFSET r13,R13
21760+ CFI_REL_OFFSET r12,R12
21761+ CFI_REL_OFFSET rbp,RBP
21762+ CFI_REL_OFFSET rbx,RBX
21763+ .endif
21764+ CFI_REL_OFFSET r11,R11 - \adj*ARGOFFSET
21765+ CFI_REL_OFFSET r10,R10 - \adj*ARGOFFSET
21766+ CFI_REL_OFFSET r9,R9 - \adj*ARGOFFSET
21767+ CFI_REL_OFFSET r8,R8 - \adj*ARGOFFSET
21768+ CFI_REL_OFFSET rax,RAX - \adj*ARGOFFSET
21769+ CFI_REL_OFFSET rcx,RCX - \adj*ARGOFFSET
21770+ CFI_REL_OFFSET rdx,RDX - \adj*ARGOFFSET
21771+ CFI_REL_OFFSET rsi,RSI - \adj*ARGOFFSET
21772+ CFI_REL_OFFSET rdi,RDI - \adj*ARGOFFSET
21773+ CFI_REL_OFFSET rip,RIP - \adj*ARGOFFSET
21774+ /*CFI_REL_OFFSET cs,CS - \adj*ARGOFFSET*/
21775+ /*CFI_REL_OFFSET rflags,EFLAGS - \adj*ARGOFFSET*/
21776+ CFI_REL_OFFSET rsp,RSP - \adj*ARGOFFSET
21777+ /*CFI_REL_OFFSET ss,SS - \adj*ARGOFFSET*/
21778+ .endm
21779+
21780+ /*
21781+ * Must be consistent with the definition in arch-x86/xen-x86_64.h:
21782+ * struct iret_context {
21783+ * u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
21784+ * };
21785+ * with rax, r11, and rcx being taken care of in the hypercall stub.
21786+ */
21787+ .macro HYPERVISOR_IRET flag
21788+ testb $3,1*8(%rsp)
21789+ jnz 2f
21790+ testl $NMI_MASK,2*8(%rsp)
21791+ jnz 2f
21792+
21793+ cmpb $0,(xen_features+XENFEAT_supervisor_mode_kernel)(%rip)
21794+ jne 1f
21795+
21796+ /* Direct iret to kernel space. Correct CS and SS. */
21797+ orl $3,1*8(%rsp)
21798+ orl $3,4*8(%rsp)
21799+1: iretq
21800+
21801+2: /* Slow iret via hypervisor. */
21802+ andl $~NMI_MASK, 2*8(%rsp)
21803+ pushq $\flag
21804+ jmp hypercall_page + (__HYPERVISOR_iret * 32)
21805+ .endm
21806+
21807+/*
21808+ * A newly forked process directly context switches into this.
21809+ */
21810+/* rdi: prev */
21811+ENTRY(ret_from_fork)
21812+ CFI_DEFAULT_STACK
21813+ push kernel_eflags(%rip)
21814+ CFI_ADJUST_CFA_OFFSET 4
21815+ popf # reset kernel eflags
21816+ CFI_ADJUST_CFA_OFFSET -4
21817+ call schedule_tail
21818+ GET_THREAD_INFO(%rcx)
21819+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
21820+ jnz rff_trace
21821+rff_action:
21822+ RESTORE_REST
21823+ testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
21824+ je int_ret_from_sys_call
21825+ testl $_TIF_IA32,threadinfo_flags(%rcx)
21826+ jnz int_ret_from_sys_call
21827+ RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
21828+ jmp ret_from_sys_call
21829+rff_trace:
21830+ movq %rsp,%rdi
21831+ call syscall_trace_leave
21832+ GET_THREAD_INFO(%rcx)
21833+ jmp rff_action
21834+ CFI_ENDPROC
21835+END(ret_from_fork)
21836+
21837+/*
21838+ * initial frame state for interrupts and exceptions
21839+ */
21840+ .macro _frame ref
21841+ CFI_STARTPROC simple
21842+ CFI_DEF_CFA rsp,SS+8-\ref
21843+ /*CFI_REL_OFFSET ss,SS-\ref*/
21844+ CFI_REL_OFFSET rsp,RSP-\ref
21845+ /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
21846+ /*CFI_REL_OFFSET cs,CS-\ref*/
21847+ CFI_REL_OFFSET rip,RIP-\ref
21848+ .endm
21849+
21850+/*
21851+ * System call entry. Upto 6 arguments in registers are supported.
21852+ *
21853+ * SYSCALL does not save anything on the stack and does not change the
21854+ * stack pointer.
21855+ */
21856+
21857+/*
21858+ * Register setup:
21859+ * rax system call number
21860+ * rdi arg0
21861+ * rcx return address for syscall/sysret, C arg3
21862+ * rsi arg1
21863+ * rdx arg2
21864+ * r10 arg3 (--> moved to rcx for C)
21865+ * r8 arg4
21866+ * r9 arg5
21867+ * r11 eflags for syscall/sysret, temporary for C
21868+ * r12-r15,rbp,rbx saved by C code, not touched.
21869+ *
21870+ * Interrupts are enabled on entry.
21871+ * Only called from user space.
21872+ *
21873+ * XXX if we had a free scratch register we could save the RSP into the stack frame
21874+ * and report it properly in ps. Unfortunately we haven't.
21875+ *
21876+ * When user can change the frames always force IRET. That is because
21877+ * it deals with uncanonical addresses better. SYSRET has trouble
21878+ * with them due to bugs in both AMD and Intel CPUs.
21879+ */
21880+
21881+ENTRY(system_call)
21882+ _frame (RIP-0x10)
21883+ SAVE_ARGS -8,0
21884+ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
21885+ GET_THREAD_INFO(%rcx)
21886+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
21887+ CFI_REMEMBER_STATE
21888+ jnz tracesys
21889+ cmpq $__NR_syscall_max,%rax
21890+ ja badsys
21891+ movq %r10,%rcx
21892+ call *sys_call_table(,%rax,8) # XXX: rip relative
21893+ movq %rax,RAX-ARGOFFSET(%rsp)
21894+/*
21895+ * Syscall return path ending with SYSRET (fast path)
21896+ * Has incomplete stack frame and undefined top of stack.
21897+ */
21898+ .globl ret_from_sys_call
21899+ret_from_sys_call:
21900+ movl $_TIF_ALLWORK_MASK,%edi
21901+ /* edi: flagmask */
21902+sysret_check:
21903+ GET_THREAD_INFO(%rcx)
21904+ XEN_BLOCK_EVENTS(%rsi)
21905+ TRACE_IRQS_OFF
21906+ movl threadinfo_flags(%rcx),%edx
21907+ andl %edi,%edx
21908+ CFI_REMEMBER_STATE
21909+ jnz sysret_careful
21910+ /*
21911+ * sysretq will re-enable interrupts:
21912+ */
21913+ TRACE_IRQS_ON
21914+ XEN_UNBLOCK_EVENTS(%rsi)
21915+ RESTORE_ARGS 0,8,0
21916+ HYPERVISOR_IRET VGCF_IN_SYSCALL
21917+
21918+ /* Handle reschedules */
21919+ /* edx: work, edi: workmask */
21920+sysret_careful:
21921+ CFI_RESTORE_STATE
21922+ bt $TIF_NEED_RESCHED,%edx
21923+ jnc sysret_signal
21924+ TRACE_IRQS_ON
21925+ XEN_UNBLOCK_EVENTS(%rsi)
21926+ pushq %rdi
21927+ CFI_ADJUST_CFA_OFFSET 8
21928+ call schedule
21929+ popq %rdi
21930+ CFI_ADJUST_CFA_OFFSET -8
21931+ jmp sysret_check
21932+
21933+ /* Handle a signal */
21934+sysret_signal:
21935+ TRACE_IRQS_ON
21936+/* sti */
21937+ XEN_UNBLOCK_EVENTS(%rsi)
21938+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
21939+ jz 1f
21940+
21941+ /* Really a signal */
21942+ /* edx: work flags (arg3) */
21943+ leaq do_notify_resume(%rip),%rax
21944+ leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
21945+ xorl %esi,%esi # oldset -> arg2
21946+ call ptregscall_common
21947+1: movl $_TIF_NEED_RESCHED,%edi
21948+ /* Use IRET because user could have changed frame. This
21949+ works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
21950+ XEN_BLOCK_EVENTS(%rsi)
21951+ TRACE_IRQS_OFF
21952+ jmp int_with_check
21953+
21954+badsys:
21955+ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
21956+ jmp ret_from_sys_call
21957+
21958+ /* Do syscall tracing */
21959+tracesys:
21960+ CFI_RESTORE_STATE
21961+ SAVE_REST
21962+ movq $-ENOSYS,RAX(%rsp)
21963+ FIXUP_TOP_OF_STACK %rdi
21964+ movq %rsp,%rdi
21965+ call syscall_trace_enter
21966+ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
21967+ RESTORE_REST
21968+ cmpq $__NR_syscall_max,%rax
21969+ ja 1f
21970+ movq %r10,%rcx /* fixup for C */
21971+ call *sys_call_table(,%rax,8)
21972+1: movq %rax,RAX-ARGOFFSET(%rsp)
21973+ /* Use IRET because user could have changed frame */
21974+ jmp int_ret_from_sys_call
21975+ CFI_ENDPROC
21976+END(system_call)
21977+
21978+/*
21979+ * Syscall return path ending with IRET.
21980+ * Has correct top of stack, but partial stack frame.
21981+ */
21982+ENTRY(int_ret_from_sys_call)
21983+ CFI_STARTPROC simple
21984+ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
21985+ /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
21986+ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
21987+ /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
21988+ /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
21989+ CFI_REL_OFFSET rip,RIP-ARGOFFSET
21990+ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
21991+ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
21992+ CFI_REL_OFFSET rax,RAX-ARGOFFSET
21993+ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
21994+ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
21995+ CFI_REL_OFFSET r8,R8-ARGOFFSET
21996+ CFI_REL_OFFSET r9,R9-ARGOFFSET
21997+ CFI_REL_OFFSET r10,R10-ARGOFFSET
21998+ CFI_REL_OFFSET r11,R11-ARGOFFSET
21999+ XEN_BLOCK_EVENTS(%rsi)
22000+ TRACE_IRQS_OFF
22001+ testb $3,CS-ARGOFFSET(%rsp)
22002+ jnz 1f
22003+ /* Need to set the proper %ss (not NULL) for ring 3 iretq */
22004+ movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
22005+ jmp retint_restore_args # retrun from ring3 kernel
22006+1:
22007+ movl $_TIF_ALLWORK_MASK,%edi
22008+ /* edi: mask to check */
22009+int_with_check:
22010+ GET_THREAD_INFO(%rcx)
22011+ movl threadinfo_flags(%rcx),%edx
22012+ andl %edi,%edx
22013+ jnz int_careful
22014+ andl $~TS_COMPAT,threadinfo_status(%rcx)
22015+ jmp retint_restore_args
22016+
22017+ /* Either reschedule or signal or syscall exit tracking needed. */
22018+ /* First do a reschedule test. */
22019+ /* edx: work, edi: workmask */
22020+int_careful:
22021+ bt $TIF_NEED_RESCHED,%edx
22022+ jnc int_very_careful
22023+ TRACE_IRQS_ON
22024+/* sti */
22025+ XEN_UNBLOCK_EVENTS(%rsi)
22026+ pushq %rdi
22027+ CFI_ADJUST_CFA_OFFSET 8
22028+ call schedule
22029+ popq %rdi
22030+ CFI_ADJUST_CFA_OFFSET -8
22031+ XEN_BLOCK_EVENTS(%rsi)
22032+ TRACE_IRQS_OFF
22033+ jmp int_with_check
22034+
22035+ /* handle signals and tracing -- both require a full stack frame */
22036+int_very_careful:
22037+ TRACE_IRQS_ON
22038+/* sti */
22039+ XEN_UNBLOCK_EVENTS(%rsi)
22040+ SAVE_REST
22041+ /* Check for syscall exit trace */
22042+ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
22043+ jz int_signal
22044+ pushq %rdi
22045+ CFI_ADJUST_CFA_OFFSET 8
22046+ leaq 8(%rsp),%rdi # &ptregs -> arg1
22047+ call syscall_trace_leave
22048+ popq %rdi
22049+ CFI_ADJUST_CFA_OFFSET -8
22050+ andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
22051+ XEN_BLOCK_EVENTS(%rsi)
22052+ TRACE_IRQS_OFF
22053+ jmp int_restore_rest
22054+
22055+int_signal:
22056+ testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
22057+ jz 1f
22058+ movq %rsp,%rdi # &ptregs -> arg1
22059+ xorl %esi,%esi # oldset -> arg2
22060+ call do_notify_resume
22061+1: movl $_TIF_NEED_RESCHED,%edi
22062+int_restore_rest:
22063+ RESTORE_REST
22064+ XEN_BLOCK_EVENTS(%rsi)
22065+ TRACE_IRQS_OFF
22066+ jmp int_with_check
22067+ CFI_ENDPROC
22068+END(int_ret_from_sys_call)
22069+
22070+/*
22071+ * Certain special system calls that need to save a complete full stack frame.
22072+ */
22073+
22074+ .macro PTREGSCALL label,func,arg
22075+ .globl \label
22076+\label:
22077+ leaq \func(%rip),%rax
22078+ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
22079+ jmp ptregscall_common
22080+END(\label)
22081+ .endm
22082+
22083+ CFI_STARTPROC
22084+
22085+ PTREGSCALL stub_clone, sys_clone, %r8
22086+ PTREGSCALL stub_fork, sys_fork, %rdi
22087+ PTREGSCALL stub_vfork, sys_vfork, %rdi
22088+ PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
22089+ PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
22090+ PTREGSCALL stub_iopl, sys_iopl, %rsi
22091+
22092+ENTRY(ptregscall_common)
22093+ popq %r11
22094+ CFI_ADJUST_CFA_OFFSET -8
22095+ CFI_REGISTER rip, r11
22096+ SAVE_REST
22097+ movq %r11, %r15
22098+ CFI_REGISTER rip, r15
22099+ FIXUP_TOP_OF_STACK %r11
22100+ call *%rax
22101+ RESTORE_TOP_OF_STACK %r11
22102+ movq %r15, %r11
22103+ CFI_REGISTER rip, r11
22104+ RESTORE_REST
22105+ pushq %r11
22106+ CFI_ADJUST_CFA_OFFSET 8
22107+ CFI_REL_OFFSET rip, 0
22108+ ret
22109+ CFI_ENDPROC
22110+END(ptregscall_common)
22111+
22112+ENTRY(stub_execve)
22113+ CFI_STARTPROC
22114+ popq %r11
22115+ CFI_ADJUST_CFA_OFFSET -8
22116+ CFI_REGISTER rip, r11
22117+ SAVE_REST
22118+ FIXUP_TOP_OF_STACK %r11
22119+ call sys_execve
22120+ RESTORE_TOP_OF_STACK %r11
22121+ movq %rax,RAX(%rsp)
22122+ RESTORE_REST
22123+ jmp int_ret_from_sys_call
22124+ CFI_ENDPROC
22125+END(stub_execve)
22126+
22127+/*
22128+ * sigreturn is special because it needs to restore all registers on return.
22129+ * This cannot be done with SYSRET, so use the IRET return path instead.
22130+ */
22131+ENTRY(stub_rt_sigreturn)
22132+ CFI_STARTPROC
22133+ addq $8, %rsp
22134+ CFI_ADJUST_CFA_OFFSET -8
22135+ SAVE_REST
22136+ movq %rsp,%rdi
22137+ FIXUP_TOP_OF_STACK %r11
22138+ call sys_rt_sigreturn
22139+ movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
22140+ RESTORE_REST
22141+ jmp int_ret_from_sys_call
22142+ CFI_ENDPROC
22143+END(stub_rt_sigreturn)
22144+
22145+/* initial frame state for interrupts (and exceptions without error code) */
22146+#define INTR_FRAME _frame (RIP-0x10); \
22147+ CFI_REL_OFFSET rcx,0; \
22148+ CFI_REL_OFFSET r11,8
22149+
22150+/* initial frame state for exceptions with error code (and interrupts with
22151+ vector already pushed) */
22152+#define XCPT_FRAME _frame (RIP-0x18); \
22153+ CFI_REL_OFFSET rcx,0; \
22154+ CFI_REL_OFFSET r11,8
22155+
22156+/*
22157+ * Interrupt exit.
22158+ *
22159+ */
22160+
22161+retint_check:
22162+ CFI_DEFAULT_STACK adj=1
22163+ movl threadinfo_flags(%rcx),%edx
22164+ andl %edi,%edx
22165+ CFI_REMEMBER_STATE
22166+ jnz retint_careful
22167+retint_restore_args:
22168+ movl EFLAGS-REST_SKIP(%rsp), %eax
22169+ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
22170+ XEN_GET_VCPU_INFO(%rsi)
22171+ andb evtchn_upcall_mask(%rsi),%al
22172+ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
22173+ jnz restore_all_enable_events # != 0 => enable event delivery
22174+ XEN_PUT_VCPU_INFO(%rsi)
22175+
22176+ RESTORE_ARGS 0,8,0
22177+ HYPERVISOR_IRET 0
22178+
22179+ /* edi: workmask, edx: work */
22180+retint_careful:
22181+ CFI_RESTORE_STATE
22182+ bt $TIF_NEED_RESCHED,%edx
22183+ jnc retint_signal
22184+ TRACE_IRQS_ON
22185+ XEN_UNBLOCK_EVENTS(%rsi)
22186+/* sti */
22187+ pushq %rdi
22188+ CFI_ADJUST_CFA_OFFSET 8
22189+ call schedule
22190+ popq %rdi
22191+ CFI_ADJUST_CFA_OFFSET -8
22192+ GET_THREAD_INFO(%rcx)
22193+ XEN_BLOCK_EVENTS(%rsi)
22194+/* cli */
22195+ TRACE_IRQS_OFF
22196+ jmp retint_check
22197+
22198+retint_signal:
22199+ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
22200+ jz retint_restore_args
22201+ TRACE_IRQS_ON
22202+ XEN_UNBLOCK_EVENTS(%rsi)
22203+ SAVE_REST
22204+ movq $-1,ORIG_RAX(%rsp)
22205+ xorl %esi,%esi # oldset
22206+ movq %rsp,%rdi # &pt_regs
22207+ call do_notify_resume
22208+ RESTORE_REST
22209+ XEN_BLOCK_EVENTS(%rsi)
22210+ TRACE_IRQS_OFF
22211+ movl $_TIF_NEED_RESCHED,%edi
22212+ GET_THREAD_INFO(%rcx)
22213+ jmp retint_check
22214+
22215+#ifdef CONFIG_PREEMPT
22216+ /* Returning to kernel space. Check if we need preemption */
22217+ /* rcx: threadinfo. interrupts off. */
22218+ .p2align
22219+retint_kernel:
22220+ cmpl $0,threadinfo_preempt_count(%rcx)
22221+ jnz retint_restore_args
22222+ bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
22223+ jnc retint_restore_args
22224+ bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
22225+ jnc retint_restore_args
22226+ call preempt_schedule_irq
22227+ jmp retint_kernel /* check again */
22228+#endif
22229+
22230+ CFI_ENDPROC
22231+END(retint_check)
22232+
22233+#ifndef CONFIG_XEN
22234+/*
22235+ * APIC interrupts.
22236+ */
22237+ .macro apicinterrupt num,func
22238+ INTR_FRAME
22239+ pushq $~(\num)
22240+ CFI_ADJUST_CFA_OFFSET 8
22241+ interrupt \func
22242+ jmp error_entry
22243+ CFI_ENDPROC
22244+ .endm
22245+
22246+ENTRY(thermal_interrupt)
22247+ apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
22248+END(thermal_interrupt)
22249+
22250+ENTRY(threshold_interrupt)
22251+ apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
22252+END(threshold_interrupt)
22253+
22254+#ifdef CONFIG_SMP
22255+ENTRY(reschedule_interrupt)
22256+ apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
22257+END(reschedule_interrupt)
22258+
22259+ .macro INVALIDATE_ENTRY num
22260+ENTRY(invalidate_interrupt\num)
22261+ apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
22262+END(invalidate_interrupt\num)
22263+ .endm
22264+
22265+ INVALIDATE_ENTRY 0
22266+ INVALIDATE_ENTRY 1
22267+ INVALIDATE_ENTRY 2
22268+ INVALIDATE_ENTRY 3
22269+ INVALIDATE_ENTRY 4
22270+ INVALIDATE_ENTRY 5
22271+ INVALIDATE_ENTRY 6
22272+ INVALIDATE_ENTRY 7
22273+
22274+ENTRY(call_function_interrupt)
22275+ apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
22276+END(call_function_interrupt)
22277+#endif
22278+
22279+#ifdef CONFIG_X86_LOCAL_APIC
22280+ENTRY(apic_timer_interrupt)
22281+ apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
22282+END(apic_timer_interrupt)
22283+
22284+ENTRY(error_interrupt)
22285+ apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
22286+END(error_interrupt)
22287+
22288+ENTRY(spurious_interrupt)
22289+ apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
22290+END(spurious_interrupt)
22291+#endif
22292+#endif /* !CONFIG_XEN */
22293+
22294+/*
22295+ * Exception entry points.
22296+ */
22297+ .macro zeroentry sym
22298+ INTR_FRAME
22299+ movq (%rsp),%rcx
22300+ CFI_RESTORE rcx
22301+ movq 8(%rsp),%r11
22302+ CFI_RESTORE r11
22303+ addq $0x10,%rsp /* skip rcx and r11 */
22304+ CFI_ADJUST_CFA_OFFSET -0x10
22305+ pushq $0 /* push error code/oldrax */
22306+ CFI_ADJUST_CFA_OFFSET 8
22307+ pushq %rax /* push real oldrax to the rdi slot */
22308+ CFI_ADJUST_CFA_OFFSET 8
22309+ CFI_REL_OFFSET rax,0
22310+ leaq \sym(%rip),%rax
22311+ jmp error_entry
22312+ CFI_ENDPROC
22313+ .endm
22314+
22315+ .macro errorentry sym
22316+ XCPT_FRAME
22317+ movq (%rsp),%rcx
22318+ CFI_RESTORE rcx
22319+ movq 8(%rsp),%r11
22320+ CFI_RESTORE r11
22321+ addq $0x10,%rsp /* rsp points to the error code */
22322+ CFI_ADJUST_CFA_OFFSET -0x10
22323+ pushq %rax
22324+ CFI_ADJUST_CFA_OFFSET 8
22325+ CFI_REL_OFFSET rax,0
22326+ leaq \sym(%rip),%rax
22327+ jmp error_entry
22328+ CFI_ENDPROC
22329+ .endm
22330+
22331+#if 0 /* not XEN */
22332+ /* error code is on the stack already */
22333+ /* handle NMI like exceptions that can happen everywhere */
22334+ .macro paranoidentry sym, ist=0, irqtrace=1
22335+ movq (%rsp),%rcx
22336+ movq 8(%rsp),%r11
22337+ addq $0x10,%rsp /* skip rcx and r11 */
22338+ SAVE_ALL
22339+ cld
22340+#if 0 /* not XEN */
22341+ movl $1,%ebx
22342+ movl $MSR_GS_BASE,%ecx
22343+ rdmsr
22344+ testl %edx,%edx
22345+ js 1f
22346+ swapgs
22347+ xorl %ebx,%ebx
22348+1:
22349+#endif
22350+ .if \ist
22351+ movq %gs:pda_data_offset, %rbp
22352+ .endif
22353+ movq %rsp,%rdi
22354+ movq ORIG_RAX(%rsp),%rsi
22355+ movq $-1,ORIG_RAX(%rsp)
22356+ .if \ist
22357+ subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
22358+ .endif
22359+ call \sym
22360+ .if \ist
22361+ addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
22362+ .endif
22363+/* cli */
22364+ XEN_BLOCK_EVENTS(%rsi)
22365+ .if \irqtrace
22366+ TRACE_IRQS_OFF
22367+ .endif
22368+ .endm
22369+
22370+ /*
22371+ * "Paranoid" exit path from exception stack.
22372+ * Paranoid because this is used by NMIs and cannot take
22373+ * any kernel state for granted.
22374+ * We don't do kernel preemption checks here, because only
22375+ * NMI should be common and it does not enable IRQs and
22376+ * cannot get reschedule ticks.
22377+ *
22378+ * "trace" is 0 for the NMI handler only, because irq-tracing
22379+ * is fundamentally NMI-unsafe. (we cannot change the soft and
22380+ * hard flags at once, atomically)
22381+ */
22382+ .macro paranoidexit trace=1
22383+ /* ebx: no swapgs flag */
22384+paranoid_exit\trace:
22385+ testl %ebx,%ebx /* swapgs needed? */
22386+ jnz paranoid_restore\trace
22387+ testl $3,CS(%rsp)
22388+ jnz paranoid_userspace\trace
22389+paranoid_swapgs\trace:
22390+ TRACE_IRQS_IRETQ 0
22391+ swapgs
22392+paranoid_restore\trace:
22393+ RESTORE_ALL 8
22394+ iretq
22395+paranoid_userspace\trace:
22396+ GET_THREAD_INFO(%rcx)
22397+ movl threadinfo_flags(%rcx),%ebx
22398+ andl $_TIF_WORK_MASK,%ebx
22399+ jz paranoid_swapgs\trace
22400+ movq %rsp,%rdi /* &pt_regs */
22401+ call sync_regs
22402+ movq %rax,%rsp /* switch stack for scheduling */
22403+ testl $_TIF_NEED_RESCHED,%ebx
22404+ jnz paranoid_schedule\trace
22405+ movl %ebx,%edx /* arg3: thread flags */
22406+ .if \trace
22407+ TRACE_IRQS_ON
22408+ .endif
22409+ sti
22410+ xorl %esi,%esi /* arg2: oldset */
22411+ movq %rsp,%rdi /* arg1: &pt_regs */
22412+ call do_notify_resume
22413+ cli
22414+ .if \trace
22415+ TRACE_IRQS_OFF
22416+ .endif
22417+ jmp paranoid_userspace\trace
22418+paranoid_schedule\trace:
22419+ .if \trace
22420+ TRACE_IRQS_ON
22421+ .endif
22422+ sti
22423+ call schedule
22424+ cli
22425+ .if \trace
22426+ TRACE_IRQS_OFF
22427+ .endif
22428+ jmp paranoid_userspace\trace
22429+ CFI_ENDPROC
22430+ .endm
22431+#endif
22432+
22433+/*
22434+ * Exception entry point. This expects an error code/orig_rax on the stack
22435+ * and the exception handler in %rax.
22436+ */
22437+ENTRY(error_entry)
22438+ _frame RDI
22439+ CFI_REL_OFFSET rax,0
22440+ /* rdi slot contains rax, oldrax contains error code */
22441+ cld
22442+ subq $14*8,%rsp
22443+ CFI_ADJUST_CFA_OFFSET (14*8)
22444+ movq %rsi,13*8(%rsp)
22445+ CFI_REL_OFFSET rsi,RSI
22446+ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
22447+ CFI_REGISTER rax,rsi
22448+ movq %rdx,12*8(%rsp)
22449+ CFI_REL_OFFSET rdx,RDX
22450+ movq %rcx,11*8(%rsp)
22451+ CFI_REL_OFFSET rcx,RCX
22452+ movq %rsi,10*8(%rsp) /* store rax */
22453+ CFI_REL_OFFSET rax,RAX
22454+ movq %r8, 9*8(%rsp)
22455+ CFI_REL_OFFSET r8,R8
22456+ movq %r9, 8*8(%rsp)
22457+ CFI_REL_OFFSET r9,R9
22458+ movq %r10,7*8(%rsp)
22459+ CFI_REL_OFFSET r10,R10
22460+ movq %r11,6*8(%rsp)
22461+ CFI_REL_OFFSET r11,R11
22462+ movq %rbx,5*8(%rsp)
22463+ CFI_REL_OFFSET rbx,RBX
22464+ movq %rbp,4*8(%rsp)
22465+ CFI_REL_OFFSET rbp,RBP
22466+ movq %r12,3*8(%rsp)
22467+ CFI_REL_OFFSET r12,R12
22468+ movq %r13,2*8(%rsp)
22469+ CFI_REL_OFFSET r13,R13
22470+ movq %r14,1*8(%rsp)
22471+ CFI_REL_OFFSET r14,R14
22472+ movq %r15,(%rsp)
22473+ CFI_REL_OFFSET r15,R15
22474+#if 0
22475+ cmpl $__KERNEL_CS,CS(%rsp)
22476+ CFI_REMEMBER_STATE
22477+ je error_kernelspace
22478+#endif
22479+error_call_handler:
22480+ movq %rdi, RDI(%rsp)
22481+ CFI_REL_OFFSET rdi,RDI
22482+ movq %rsp,%rdi
22483+ movq ORIG_RAX(%rsp),%rsi # get error code
22484+ movq $-1,ORIG_RAX(%rsp)
22485+ call *%rax
22486+error_exit:
22487+ RESTORE_REST
22488+/* cli */
22489+ XEN_BLOCK_EVENTS(%rsi)
22490+ TRACE_IRQS_OFF
22491+ GET_THREAD_INFO(%rcx)
22492+ testb $3,CS-ARGOFFSET(%rsp)
22493+ jz retint_kernel
22494+ movl threadinfo_flags(%rcx),%edx
22495+ movl $_TIF_WORK_MASK,%edi
22496+ andl %edi,%edx
22497+ jnz retint_careful
22498+ /*
22499+ * The iret might restore flags:
22500+ */
22501+ TRACE_IRQS_IRETQ
22502+ jmp retint_restore_args
22503+
22504+#if 0
22505+ /*
22506+ * We need to re-write the logic here because we don't do iretq to
22507+ * to return to user mode. It's still possible that we get trap/fault
22508+ * in the kernel (when accessing buffers pointed to by system calls,
22509+ * for example).
22510+ *
22511+ */
22512+ CFI_RESTORE_STATE
22513+error_kernelspace:
22514+ incl %ebx
22515+ /* There are two places in the kernel that can potentially fault with
22516+ usergs. Handle them here. The exception handlers after
22517+ iret run with kernel gs again, so don't set the user space flag.
22518+ B stepping K8s sometimes report an truncated RIP for IRET
22519+ exceptions returning to compat mode. Check for these here too. */
22520+ leaq iret_label(%rip),%rbp
22521+ cmpq %rbp,RIP(%rsp)
22522+ je error_swapgs
22523+ movl %ebp,%ebp /* zero extend */
22524+ cmpq %rbp,RIP(%rsp)
22525+ je error_swapgs
22526+ cmpq $gs_change,RIP(%rsp)
22527+ je error_swapgs
22528+ jmp error_sti
22529+#endif
22530+ CFI_ENDPROC
22531+END(error_entry)
22532+
22533+ENTRY(hypervisor_callback)
22534+ zeroentry do_hypervisor_callback
22535+END(hypervisor_callback)
22536+
22537+/*
22538+ * Copied from arch/xen/i386/kernel/entry.S
22539+ */
22540+# A note on the "critical region" in our callback handler.
22541+# We want to avoid stacking callback handlers due to events occurring
22542+# during handling of the last event. To do this, we keep events disabled
22543+# until we've done all processing. HOWEVER, we must enable events before
22544+# popping the stack frame (can't be done atomically) and so it would still
22545+# be possible to get enough handler activations to overflow the stack.
22546+# Although unlikely, bugs of that kind are hard to track down, so we'd
22547+# like to avoid the possibility.
22548+# So, on entry to the handler we detect whether we interrupted an
22549+# existing activation in its critical region -- if so, we pop the current
22550+# activation and restart the handler using the previous one.
22551+ENTRY(do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
22552+ CFI_STARTPROC
22553+# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
22554+# see the correct pointer to the pt_regs
22555+ movq %rdi, %rsp # we don't return, adjust the stack frame
22556+ CFI_ENDPROC
22557+ CFI_DEFAULT_STACK
22558+11: incl %gs:pda_irqcount
22559+ movq %rsp,%rbp
22560+ CFI_DEF_CFA_REGISTER rbp
22561+ cmovzq %gs:pda_irqstackptr,%rsp
22562+ pushq %rbp # backlink for old unwinder
22563+ call evtchn_do_upcall
22564+ popq %rsp
22565+ CFI_DEF_CFA_REGISTER rsp
22566+ decl %gs:pda_irqcount
22567+ jmp error_exit
22568+ CFI_ENDPROC
22569+END(do_hypervisor_callback)
22570+
22571+#ifdef CONFIG_X86_LOCAL_APIC
22572+KPROBE_ENTRY(nmi)
22573+ zeroentry do_nmi_callback
22574+ENTRY(do_nmi_callback)
22575+ CFI_STARTPROC
22576+ addq $8, %rsp
22577+ CFI_ENDPROC
22578+ CFI_DEFAULT_STACK
22579+ call do_nmi
22580+ orl $NMI_MASK,EFLAGS(%rsp)
22581+ RESTORE_REST
22582+ XEN_BLOCK_EVENTS(%rsi)
22583+ TRACE_IRQS_OFF
22584+ GET_THREAD_INFO(%rcx)
22585+ jmp retint_restore_args
22586+ CFI_ENDPROC
22587+ .previous .text
22588+END(nmi)
22589+#endif
22590+
22591+ ALIGN
22592+restore_all_enable_events:
22593+ CFI_DEFAULT_STACK adj=1
22594+ TRACE_IRQS_ON
22595+ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
22596+
22597+scrit: /**** START OF CRITICAL REGION ****/
22598+ XEN_TEST_PENDING(%rsi)
22599+ CFI_REMEMBER_STATE
22600+ jnz 14f # process more events if necessary...
22601+ XEN_PUT_VCPU_INFO(%rsi)
22602+ RESTORE_ARGS 0,8,0
22603+ HYPERVISOR_IRET 0
22604+
22605+ CFI_RESTORE_STATE
22606+14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
22607+ XEN_PUT_VCPU_INFO(%rsi)
22608+ SAVE_REST
22609+ movq %rsp,%rdi # set the argument again
22610+ jmp 11b
22611+ CFI_ENDPROC
22612+ecrit: /**** END OF CRITICAL REGION ****/
22613+# At this point, unlike on x86-32, we don't do the fixup to simplify the
22614+# code and the stack frame is more complex on x86-64.
22615+# When the kernel is interrupted in the critical section, the kernel
22616+# will do IRET in that case, and everything will be restored at that point,
22617+# i.e. it just resumes from the next instruction interrupted with the same context.
22618+
22619+# Hypervisor uses this for application faults while it executes.
22620+# We get here for two reasons:
22621+# 1. Fault while reloading DS, ES, FS or GS
22622+# 2. Fault while executing IRET
22623+# Category 1 we do not need to fix up as Xen has already reloaded all segment
22624+# registers that could be reloaded and zeroed the others.
22625+# Category 2 we fix up by killing the current process. We cannot use the
22626+# normal Linux return path in this case because if we use the IRET hypercall
22627+# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
22628+# We distinguish between categories by comparing each saved segment register
22629+# with its current contents: any discrepancy means we in category 1.
22630+ENTRY(failsafe_callback)
22631+ _frame (RIP-0x30)
22632+ CFI_REL_OFFSET rcx, 0
22633+ CFI_REL_OFFSET r11, 8
22634+ movw %ds,%cx
22635+ cmpw %cx,0x10(%rsp)
22636+ CFI_REMEMBER_STATE
22637+ jne 1f
22638+ movw %es,%cx
22639+ cmpw %cx,0x18(%rsp)
22640+ jne 1f
22641+ movw %fs,%cx
22642+ cmpw %cx,0x20(%rsp)
22643+ jne 1f
22644+ movw %gs,%cx
22645+ cmpw %cx,0x28(%rsp)
22646+ jne 1f
22647+ /* All segments match their saved values => Category 2 (Bad IRET). */
22648+ movq (%rsp),%rcx
22649+ CFI_RESTORE rcx
22650+ movq 8(%rsp),%r11
22651+ CFI_RESTORE r11
22652+ addq $0x30,%rsp
22653+ CFI_ADJUST_CFA_OFFSET -0x30
22654+ movq $11,%rdi /* SIGSEGV */
22655+ jmp do_exit
22656+ CFI_RESTORE_STATE
22657+1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
22658+ movq (%rsp),%rcx
22659+ CFI_RESTORE rcx
22660+ movq 8(%rsp),%r11
22661+ CFI_RESTORE r11
22662+ addq $0x30,%rsp
22663+ CFI_ADJUST_CFA_OFFSET -0x30
22664+ pushq $0
22665+ CFI_ADJUST_CFA_OFFSET 8
22666+ SAVE_ALL
22667+ jmp error_exit
22668+ CFI_ENDPROC
22669+#if 0
22670+ .section __ex_table,"a"
22671+ .align 8
22672+ .quad gs_change,bad_gs
22673+ .previous
22674+ .section .fixup,"ax"
22675+ /* running with kernelgs */
22676+bad_gs:
22677+/* swapgs */ /* switch back to user gs */
22678+ xorl %eax,%eax
22679+ movl %eax,%gs
22680+ jmp 2b
22681+ .previous
22682+#endif
22683+
22684+/*
22685+ * Create a kernel thread.
22686+ *
22687+ * C extern interface:
22688+ * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
22689+ *
22690+ * asm input arguments:
22691+ * rdi: fn, rsi: arg, rdx: flags
22692+ */
22693+ENTRY(kernel_thread)
22694+ CFI_STARTPROC
22695+ FAKE_STACK_FRAME $child_rip
22696+ SAVE_ALL
22697+
22698+ # rdi: flags, rsi: usp, rdx: will be &pt_regs
22699+ movq %rdx,%rdi
22700+ orq kernel_thread_flags(%rip),%rdi
22701+ movq $-1, %rsi
22702+ movq %rsp, %rdx
22703+
22704+ xorl %r8d,%r8d
22705+ xorl %r9d,%r9d
22706+
22707+ # clone now
22708+ call do_fork
22709+ movq %rax,RAX(%rsp)
22710+ xorl %edi,%edi
22711+
22712+ /*
22713+ * It isn't worth to check for reschedule here,
22714+ * so internally to the x86_64 port you can rely on kernel_thread()
22715+ * not to reschedule the child before returning, this avoids the need
22716+ * of hacks for example to fork off the per-CPU idle tasks.
22717+ * [Hopefully no generic code relies on the reschedule -AK]
22718+ */
22719+ RESTORE_ALL
22720+ UNFAKE_STACK_FRAME
22721+ ret
22722+ CFI_ENDPROC
22723+ENDPROC(kernel_thread)
22724+
22725+child_rip:
22726+ pushq $0 # fake return address
22727+ CFI_STARTPROC
22728+ /*
22729+ * Here we are in the child and the registers are set as they were
22730+ * at kernel_thread() invocation in the parent.
22731+ */
22732+ movq %rdi, %rax
22733+ movq %rsi, %rdi
22734+ call *%rax
22735+ # exit
22736+ xorl %edi, %edi
22737+ call do_exit
22738+ CFI_ENDPROC
22739+ENDPROC(child_rip)
22740+
22741+/*
22742+ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
22743+ *
22744+ * C extern interface:
22745+ * extern long execve(char *name, char **argv, char **envp)
22746+ *
22747+ * asm input arguments:
22748+ * rdi: name, rsi: argv, rdx: envp
22749+ *
22750+ * We want to fallback into:
22751+ * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
22752+ *
22753+ * do_sys_execve asm fallback arguments:
22754+ * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
22755+ */
22756+ENTRY(execve)
22757+ CFI_STARTPROC
22758+ FAKE_STACK_FRAME $0
22759+ SAVE_ALL
22760+ call sys_execve
22761+ movq %rax, RAX(%rsp)
22762+ RESTORE_REST
22763+ testq %rax,%rax
22764+ jne 1f
22765+ jmp int_ret_from_sys_call
22766+1: RESTORE_ARGS
22767+ UNFAKE_STACK_FRAME
22768+ ret
22769+ CFI_ENDPROC
22770+ENDPROC(execve)
22771+
22772+KPROBE_ENTRY(page_fault)
22773+ errorentry do_page_fault
22774+END(page_fault)
22775+ .previous .text
22776+
22777+ENTRY(coprocessor_error)
22778+ zeroentry do_coprocessor_error
22779+END(coprocessor_error)
22780+
22781+ENTRY(simd_coprocessor_error)
22782+ zeroentry do_simd_coprocessor_error
22783+END(simd_coprocessor_error)
22784+
22785+ENTRY(device_not_available)
22786+ zeroentry math_state_restore
22787+END(device_not_available)
22788+
22789+ /* runs on exception stack */
22790+KPROBE_ENTRY(debug)
22791+/* INTR_FRAME
22792+ pushq $0
22793+ CFI_ADJUST_CFA_OFFSET 8 */
22794+ zeroentry do_debug
22795+/* paranoidexit
22796+ CFI_ENDPROC */
22797+END(debug)
22798+ .previous .text
22799+
22800+#if 0
22801+ /* runs on exception stack */
22802+KPROBE_ENTRY(nmi)
22803+ INTR_FRAME
22804+ pushq $-1
22805+ CFI_ADJUST_CFA_OFFSET 8
22806+ paranoidentry do_nmi, 0, 0
22807+#ifdef CONFIG_TRACE_IRQFLAGS
22808+ paranoidexit 0
22809+#else
22810+ jmp paranoid_exit1
22811+ CFI_ENDPROC
22812+#endif
22813+END(nmi)
22814+ .previous .text
22815+#endif
22816+
22817+KPROBE_ENTRY(int3)
22818+/* INTR_FRAME
22819+ pushq $0
22820+ CFI_ADJUST_CFA_OFFSET 8 */
22821+ zeroentry do_int3
22822+/* jmp paranoid_exit1
22823+ CFI_ENDPROC */
22824+END(int3)
22825+ .previous .text
22826+
22827+ENTRY(overflow)
22828+ zeroentry do_overflow
22829+END(overflow)
22830+
22831+ENTRY(bounds)
22832+ zeroentry do_bounds
22833+END(bounds)
22834+
22835+ENTRY(invalid_op)
22836+ zeroentry do_invalid_op
22837+END(invalid_op)
22838+
22839+ENTRY(coprocessor_segment_overrun)
22840+ zeroentry do_coprocessor_segment_overrun
22841+END(coprocessor_segment_overrun)
22842+
22843+ENTRY(reserved)
22844+ zeroentry do_reserved
22845+END(reserved)
22846+
22847+#if 0
22848+ /* runs on exception stack */
22849+ENTRY(double_fault)
22850+ XCPT_FRAME
22851+ paranoidentry do_double_fault
22852+ jmp paranoid_exit1
22853+ CFI_ENDPROC
22854+END(double_fault)
22855+#endif
22856+
22857+ENTRY(invalid_TSS)
22858+ errorentry do_invalid_TSS
22859+END(invalid_TSS)
22860+
22861+ENTRY(segment_not_present)
22862+ errorentry do_segment_not_present
22863+END(segment_not_present)
22864+
22865+ /* runs on exception stack */
22866+ENTRY(stack_segment)
22867+/* XCPT_FRAME
22868+ paranoidentry do_stack_segment */
22869+ errorentry do_stack_segment
22870+/* jmp paranoid_exit1
22871+ CFI_ENDPROC */
22872+END(stack_segment)
22873+
22874+KPROBE_ENTRY(general_protection)
22875+ errorentry do_general_protection
22876+END(general_protection)
22877+ .previous .text
22878+
22879+ENTRY(alignment_check)
22880+ errorentry do_alignment_check
22881+END(alignment_check)
22882+
22883+ENTRY(divide_error)
22884+ zeroentry do_divide_error
22885+END(divide_error)
22886+
22887+ENTRY(spurious_interrupt_bug)
22888+ zeroentry do_spurious_interrupt_bug
22889+END(spurious_interrupt_bug)
22890+
22891+#ifdef CONFIG_X86_MCE
22892+ /* runs on exception stack */
22893+ENTRY(machine_check)
22894+ INTR_FRAME
22895+ pushq $0
22896+ CFI_ADJUST_CFA_OFFSET 8
22897+ paranoidentry do_machine_check
22898+ jmp paranoid_exit1
22899+ CFI_ENDPROC
22900+END(machine_check)
22901+#endif
22902+
22903+/* Call softirq on interrupt stack. Interrupts are off. */
22904+ENTRY(call_softirq)
22905+ CFI_STARTPROC
22906+ push %rbp
22907+ CFI_ADJUST_CFA_OFFSET 8
22908+ CFI_REL_OFFSET rbp,0
22909+ mov %rsp,%rbp
22910+ CFI_DEF_CFA_REGISTER rbp
22911+ incl %gs:pda_irqcount
22912+ cmove %gs:pda_irqstackptr,%rsp
22913+ push %rbp # backlink for old unwinder
22914+ call __do_softirq
22915+ leaveq
22916+ CFI_DEF_CFA_REGISTER rsp
22917+ CFI_ADJUST_CFA_OFFSET -8
22918+ decl %gs:pda_irqcount
22919+ ret
22920+ CFI_ENDPROC
22921+ENDPROC(call_softirq)
22922+
22923+#ifdef CONFIG_STACK_UNWIND
22924+ENTRY(arch_unwind_init_running)
22925+ CFI_STARTPROC
22926+ movq %r15, R15(%rdi)
22927+ movq %r14, R14(%rdi)
22928+ xchgq %rsi, %rdx
22929+ movq %r13, R13(%rdi)
22930+ movq %r12, R12(%rdi)
22931+ xorl %eax, %eax
22932+ movq %rbp, RBP(%rdi)
22933+ movq %rbx, RBX(%rdi)
22934+ movq (%rsp), %rcx
22935+ movq %rax, R11(%rdi)
22936+ movq %rax, R10(%rdi)
22937+ movq %rax, R9(%rdi)
22938+ movq %rax, R8(%rdi)
22939+ movq %rax, RAX(%rdi)
22940+ movq %rax, RCX(%rdi)
22941+ movq %rax, RDX(%rdi)
22942+ movq %rax, RSI(%rdi)
22943+ movq %rax, RDI(%rdi)
22944+ movq %rax, ORIG_RAX(%rdi)
22945+ movq %rcx, RIP(%rdi)
22946+ leaq 8(%rsp), %rcx
22947+ movq $__KERNEL_CS, CS(%rdi)
22948+ movq %rax, EFLAGS(%rdi)
22949+ movq %rcx, RSP(%rdi)
22950+ movq $__KERNEL_DS, SS(%rdi)
22951+ jmpq *%rdx
22952+ CFI_ENDPROC
22953+ENDPROC(arch_unwind_init_running)
22954+#endif
22955Index: head-2008-11-25/arch/x86/kernel/genapic_64-xen.c
22956===================================================================
22957--- /dev/null 1970-01-01 00:00:00.000000000 +0000
22958+++ head-2008-11-25/arch/x86/kernel/genapic_64-xen.c 2007-06-12 13:13:01.000000000 +0200
22959@@ -0,0 +1,143 @@
22960+/*
22961+ * Copyright 2004 James Cleverdon, IBM.
22962+ * Subject to the GNU Public License, v.2
22963+ *
22964+ * Generic APIC sub-arch probe layer.
22965+ *
22966+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
22967+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
22968+ * James Cleverdon.
22969+ */
22970+#include <linux/threads.h>
22971+#include <linux/cpumask.h>
22972+#include <linux/string.h>
22973+#include <linux/kernel.h>
22974+#include <linux/ctype.h>
22975+#include <linux/init.h>
22976+#include <linux/module.h>
22977+
22978+#include <asm/smp.h>
22979+#include <asm/ipi.h>
22980+
22981+#if defined(CONFIG_ACPI)
22982+#include <acpi/acpi_bus.h>
22983+#endif
22984+
22985+/* which logical CPU number maps to which CPU (physical APIC ID) */
22986+u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
22987+EXPORT_SYMBOL(x86_cpu_to_apicid);
22988+u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
22989+
22990+extern struct genapic apic_cluster;
22991+extern struct genapic apic_flat;
22992+extern struct genapic apic_physflat;
22993+
22994+#ifndef CONFIG_XEN
22995+struct genapic *genapic = &apic_flat;
22996+#else
22997+extern struct genapic apic_xen;
22998+struct genapic *genapic = &apic_xen;
22999+#endif
23000+
23001+
23002+/*
23003+ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
23004+ */
23005+void __init clustered_apic_check(void)
23006+{
23007+#ifndef CONFIG_XEN
23008+ long i;
23009+ u8 clusters, max_cluster;
23010+ u8 id;
23011+ u8 cluster_cnt[NUM_APIC_CLUSTERS];
23012+ int max_apic = 0;
23013+
23014+#if defined(CONFIG_ACPI)
23015+ /*
23016+ * Some x86_64 machines use physical APIC mode regardless of how many
23017+ * procs/clusters are present (x86_64 ES7000 is an example).
23018+ */
23019+ if (acpi_fadt.revision > FADT2_REVISION_ID)
23020+ if (acpi_fadt.force_apic_physical_destination_mode) {
23021+ genapic = &apic_cluster;
23022+ goto print;
23023+ }
23024+#endif
23025+
23026+ memset(cluster_cnt, 0, sizeof(cluster_cnt));
23027+ for (i = 0; i < NR_CPUS; i++) {
23028+ id = bios_cpu_apicid[i];
23029+ if (id == BAD_APICID)
23030+ continue;
23031+ if (id > max_apic)
23032+ max_apic = id;
23033+ cluster_cnt[APIC_CLUSTERID(id)]++;
23034+ }
23035+
23036+ /* Don't use clustered mode on AMD platforms. */
23037+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
23038+ genapic = &apic_physflat;
23039+#ifndef CONFIG_HOTPLUG_CPU
23040+ /* In the CPU hotplug case we cannot use broadcast mode
23041+ because that opens a race when a CPU is removed.
23042+ Stay at physflat mode in this case.
23043+ It is bad to do this unconditionally though. Once
23044+ we have ACPI platform support for CPU hotplug
23045+ we should detect hotplug capablity from ACPI tables and
23046+ only do this when really needed. -AK */
23047+ if (max_apic <= 8)
23048+ genapic = &apic_flat;
23049+#endif
23050+ goto print;
23051+ }
23052+
23053+ clusters = 0;
23054+ max_cluster = 0;
23055+
23056+ for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
23057+ if (cluster_cnt[i] > 0) {
23058+ ++clusters;
23059+ if (cluster_cnt[i] > max_cluster)
23060+ max_cluster = cluster_cnt[i];
23061+ }
23062+ }
23063+
23064+ /*
23065+ * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
23066+ * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
23067+ * else physical mode.
23068+ * (We don't use lowest priority delivery + HW APIC IRQ steering, so
23069+ * can ignore the clustered logical case and go straight to physical.)
23070+ */
23071+ if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster) {
23072+#ifdef CONFIG_HOTPLUG_CPU
23073+ /* Don't use APIC shortcuts in CPU hotplug to avoid races */
23074+ genapic = &apic_physflat;
23075+#else
23076+ genapic = &apic_flat;
23077+#endif
23078+ } else
23079+ genapic = &apic_cluster;
23080+
23081+print:
23082+#else
23083+ /* hardcode to xen apic functions */
23084+ genapic = &apic_xen;
23085+#endif
23086+ printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
23087+}
23088+
23089+/* Same for both flat and clustered. */
23090+
23091+#ifdef CONFIG_XEN
23092+extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
23093+#endif
23094+
23095+void send_IPI_self(int vector)
23096+{
23097+#ifndef CONFIG_XEN
23098+ __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
23099+#else
23100+ xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
23101+#endif
23102+}
23103Index: head-2008-11-25/arch/x86/kernel/genapic_xen_64.c
23104===================================================================
23105--- /dev/null 1970-01-01 00:00:00.000000000 +0000
23106+++ head-2008-11-25/arch/x86/kernel/genapic_xen_64.c 2007-06-12 13:13:01.000000000 +0200
23107@@ -0,0 +1,161 @@
23108+/*
23109+ * Copyright 2004 James Cleverdon, IBM.
23110+ * Subject to the GNU Public License, v.2
23111+ *
23112+ * Xen APIC subarch code. Maximum 8 CPUs, logical delivery.
23113+ *
23114+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
23115+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
23116+ * James Cleverdon.
23117+ *
23118+ * Hacked to pieces for Xen by Chris Wright.
23119+ */
23120+#include <linux/threads.h>
23121+#include <linux/cpumask.h>
23122+#include <linux/string.h>
23123+#include <linux/kernel.h>
23124+#include <linux/ctype.h>
23125+#include <linux/init.h>
23126+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
23127+#include <asm/smp.h>
23128+#include <asm/ipi.h>
23129+#else
23130+#include <asm/apic.h>
23131+#include <asm/apicdef.h>
23132+#include <asm/genapic.h>
23133+#endif
23134+#include <xen/evtchn.h>
23135+
23136+DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
23137+
23138+static inline void __send_IPI_one(unsigned int cpu, int vector)
23139+{
23140+ int irq = per_cpu(ipi_to_irq, cpu)[vector];
23141+ BUG_ON(irq < 0);
23142+ notify_remote_via_irq(irq);
23143+}
23144+
23145+void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
23146+{
23147+ int cpu;
23148+
23149+ switch (shortcut) {
23150+ case APIC_DEST_SELF:
23151+ __send_IPI_one(smp_processor_id(), vector);
23152+ break;
23153+ case APIC_DEST_ALLBUT:
23154+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23155+ if (cpu == smp_processor_id())
23156+ continue;
23157+ if (cpu_isset(cpu, cpu_online_map)) {
23158+ __send_IPI_one(cpu, vector);
23159+ }
23160+ }
23161+ break;
23162+ case APIC_DEST_ALLINC:
23163+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23164+ if (cpu_isset(cpu, cpu_online_map)) {
23165+ __send_IPI_one(cpu, vector);
23166+ }
23167+ }
23168+ break;
23169+ default:
23170+ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
23171+ vector);
23172+ break;
23173+ }
23174+}
23175+
23176+static cpumask_t xen_target_cpus(void)
23177+{
23178+ return cpu_online_map;
23179+}
23180+
23181+/*
23182+ * Set up the logical destination ID.
23183+ * Do nothing, not called now.
23184+ */
23185+static void xen_init_apic_ldr(void)
23186+{
23187+ Dprintk("%s\n", __FUNCTION__);
23188+ return;
23189+}
23190+
23191+static void xen_send_IPI_allbutself(int vector)
23192+{
23193+ /*
23194+ * if there are no other CPUs in the system then
23195+ * we get an APIC send error if we try to broadcast.
23196+ * thus we have to avoid sending IPIs in this case.
23197+ */
23198+ Dprintk("%s\n", __FUNCTION__);
23199+ if (num_online_cpus() > 1)
23200+ xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
23201+}
23202+
23203+static void xen_send_IPI_all(int vector)
23204+{
23205+ Dprintk("%s\n", __FUNCTION__);
23206+ xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
23207+}
23208+
23209+static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
23210+{
23211+ unsigned long mask = cpus_addr(cpumask)[0];
23212+ unsigned int cpu;
23213+ unsigned long flags;
23214+
23215+ Dprintk("%s\n", __FUNCTION__);
23216+ local_irq_save(flags);
23217+ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
23218+
23219+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23220+ if (cpu_isset(cpu, cpumask)) {
23221+ __send_IPI_one(cpu, vector);
23222+ }
23223+ }
23224+ local_irq_restore(flags);
23225+}
23226+
23227+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
23228+static int xen_apic_id_registered(void)
23229+{
23230+ /* better be set */
23231+ Dprintk("%s\n", __FUNCTION__);
23232+ return physid_isset(smp_processor_id(), phys_cpu_present_map);
23233+}
23234+#endif
23235+
23236+static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
23237+{
23238+ Dprintk("%s\n", __FUNCTION__);
23239+ return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
23240+}
23241+
23242+static unsigned int phys_pkg_id(int index_msb)
23243+{
23244+ u32 ebx;
23245+
23246+ Dprintk("%s\n", __FUNCTION__);
23247+ ebx = cpuid_ebx(1);
23248+ return ((ebx >> 24) & 0xFF) >> index_msb;
23249+}
23250+
23251+struct genapic apic_xen = {
23252+ .name = "xen",
23253+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
23254+ .int_delivery_mode = dest_LowestPrio,
23255+#endif
23256+ .int_dest_mode = (APIC_DEST_LOGICAL != 0),
23257+ .int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
23258+ .target_cpus = xen_target_cpus,
23259+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
23260+ .apic_id_registered = xen_apic_id_registered,
23261+#endif
23262+ .init_apic_ldr = xen_init_apic_ldr,
23263+ .send_IPI_all = xen_send_IPI_all,
23264+ .send_IPI_allbutself = xen_send_IPI_allbutself,
23265+ .send_IPI_mask = xen_send_IPI_mask,
23266+ .cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
23267+ .phys_pkg_id = phys_pkg_id,
23268+};
23269Index: head-2008-11-25/arch/x86/kernel/head_64-xen.S
23270===================================================================
23271--- /dev/null 1970-01-01 00:00:00.000000000 +0000
23272+++ head-2008-11-25/arch/x86/kernel/head_64-xen.S 2007-08-06 15:10:49.000000000 +0200
23273@@ -0,0 +1,214 @@
23274+/*
23275+ * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
23276+ *
23277+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
23278+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
23279+ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
23280+ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
23281+ *
23282+ * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
23283+ *
23284+ * Jun Nakajima <jun.nakajima@intel.com>
23285+ * Modified for Xen
23286+ */
23287+
23288+
23289+#include <linux/linkage.h>
23290+#include <linux/threads.h>
23291+#include <linux/init.h>
23292+#include <linux/elfnote.h>
23293+#include <asm/desc.h>
23294+#include <asm/segment.h>
23295+#include <asm/page.h>
23296+#include <asm/msr.h>
23297+#include <asm/cache.h>
23298+#include <asm/dwarf2.h>
23299+#include <xen/interface/elfnote.h>
23300+
23301+ .section .bootstrap.text, "ax", @progbits
23302+ .code64
23303+ .globl startup_64
23304+startup_64:
23305+ movq $(init_thread_union+THREAD_SIZE-8),%rsp
23306+
23307+ /* rsi is pointer to startup info structure.
23308+ pass it to C */
23309+ movq %rsi,%rdi
23310+ pushq $0 # fake return address
23311+ jmp x86_64_start_kernel
23312+
23313+#ifdef CONFIG_ACPI_SLEEP
23314+.org 0xf00
23315+ .globl pGDT32
23316+pGDT32:
23317+ .word gdt_end-cpu_gdt_table-1
23318+ .long cpu_gdt_table-__START_KERNEL_map
23319+#endif
23320+ENTRY(stext)
23321+ENTRY(_stext)
23322+
23323+ $page = 0
23324+#define NEXT_PAGE(name) \
23325+ $page = $page + 1; \
23326+ .org $page * 0x1000; \
23327+ phys_##name = $page * 0x1000 + __PHYSICAL_START; \
23328+ENTRY(name)
23329+
23330+NEXT_PAGE(init_level4_pgt)
23331+ /* This gets initialized in x86_64_start_kernel */
23332+ .fill 512,8,0
23333+NEXT_PAGE(init_level4_user_pgt)
23334+ /*
23335+ * We update two pgd entries to make kernel and user pgd consistent
23336+ * at pgd_populate(). It can be used for kernel modules. So we place
23337+ * this page here for those cases to avoid memory corruption.
23338+ * We also use this page to establish the initial mapping for the
23339+ * vsyscall area.
23340+ */
23341+ .fill 512,8,0
23342+
23343+NEXT_PAGE(level3_kernel_pgt)
23344+ .fill 512,8,0
23345+
23346+ /*
23347+ * This is used for vsyscall area mapping as we have a different
23348+ * level4 page table for user.
23349+ */
23350+NEXT_PAGE(level3_user_pgt)
23351+ .fill 512,8,0
23352+
23353+NEXT_PAGE(level2_kernel_pgt)
23354+ .fill 512,8,0
23355+
23356+NEXT_PAGE(hypercall_page)
23357+ CFI_STARTPROC
23358+ .rept 0x1000 / 0x20
23359+ .skip 1 /* push %rcx */
23360+ CFI_ADJUST_CFA_OFFSET 8
23361+ CFI_REL_OFFSET rcx,0
23362+ .skip 2 /* push %r11 */
23363+ CFI_ADJUST_CFA_OFFSET 8
23364+ CFI_REL_OFFSET rcx,0
23365+ .skip 5 /* mov $#,%eax */
23366+ .skip 2 /* syscall */
23367+ .skip 2 /* pop %r11 */
23368+ CFI_ADJUST_CFA_OFFSET -8
23369+ CFI_RESTORE r11
23370+ .skip 1 /* pop %rcx */
23371+ CFI_ADJUST_CFA_OFFSET -8
23372+ CFI_RESTORE rcx
23373+ .align 0x20,0 /* ret */
23374+ .endr
23375+ CFI_ENDPROC
23376+
23377+#undef NEXT_PAGE
23378+
23379+ .data
23380+/* Just dummy symbol to allow compilation. Not used in sleep path */
23381+#ifdef CONFIG_ACPI_SLEEP
23382+ .align PAGE_SIZE
23383+ENTRY(wakeup_level4_pgt)
23384+ .fill 512,8,0
23385+#endif
23386+
23387+ .data
23388+
23389+ .align 16
23390+ .globl cpu_gdt_descr
23391+cpu_gdt_descr:
23392+ .word gdt_end-cpu_gdt_table-1
23393+gdt:
23394+ .quad cpu_gdt_table
23395+#ifdef CONFIG_SMP
23396+ .rept NR_CPUS-1
23397+ .word 0
23398+ .quad 0
23399+ .endr
23400+#endif
23401+
23402+/* We need valid kernel segments for data and code in long mode too
23403+ * IRET will check the segment types kkeil 2000/10/28
23404+ * Also sysret mandates a special GDT layout
23405+ */
23406+
23407+ .section .data.page_aligned, "aw"
23408+ .align PAGE_SIZE
23409+
23410+/* The TLS descriptors are currently at a different place compared to i386.
23411+ Hopefully nobody expects them at a fixed place (Wine?) */
23412+
23413+ENTRY(cpu_gdt_table)
23414+ .quad 0x0000000000000000 /* NULL descriptor */
23415+ .quad 0x0 /* unused */
23416+ .quad 0x00af9a000000ffff /* __KERNEL_CS */
23417+ .quad 0x00cf92000000ffff /* __KERNEL_DS */
23418+ .quad 0x00cffa000000ffff /* __USER32_CS */
23419+ .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
23420+ .quad 0x00affa000000ffff /* __USER_CS */
23421+ .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
23422+ .quad 0,0 /* TSS */
23423+ .quad 0,0 /* LDT */
23424+ .quad 0,0,0 /* three TLS descriptors */
23425+ .quad 0 /* unused */
23426+gdt_end:
23427+ /* asm/segment.h:GDT_ENTRIES must match this */
23428+ /* This should be a multiple of the cache line size */
23429+ /* GDTs of other CPUs are now dynamically allocated */
23430+
23431+ /* zero the remaining page */
23432+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
23433+
23434+ .section .bss.page_aligned, "aw", @nobits
23435+ .align PAGE_SIZE
23436+ENTRY(empty_zero_page)
23437+ .skip PAGE_SIZE
23438+
23439+#if CONFIG_XEN_COMPAT <= 0x030002
23440+/*
23441+ * __xen_guest information
23442+ */
23443+.macro utoh value
23444+ .if (\value) < 0 || (\value) >= 0x10
23445+ utoh (((\value)>>4)&0x0fffffffffffffff)
23446+ .endif
23447+ .if ((\value) & 0xf) < 10
23448+ .byte '0' + ((\value) & 0xf)
23449+ .else
23450+ .byte 'A' + ((\value) & 0xf) - 10
23451+ .endif
23452+.endm
23453+
23454+.section __xen_guest
23455+ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
23456+ .ascii ",XEN_VER=xen-3.0"
23457+ .ascii ",VIRT_BASE=0x"
23458+ utoh __START_KERNEL_map
23459+ .ascii ",ELF_PADDR_OFFSET=0x"
23460+ utoh __START_KERNEL_map
23461+ .ascii ",VIRT_ENTRY=0x"
23462+ utoh (__START_KERNEL_map + __PHYSICAL_START)
23463+ .ascii ",HYPERCALL_PAGE=0x"
23464+ utoh (phys_hypercall_page >> PAGE_SHIFT)
23465+ .ascii ",FEATURES=writable_page_tables"
23466+ .ascii "|writable_descriptor_tables"
23467+ .ascii "|auto_translated_physmap"
23468+ .ascii "|supervisor_mode_kernel"
23469+ .ascii ",LOADER=generic"
23470+ .byte 0
23471+#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
23472+
23473+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
23474+ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
23475+ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
23476+ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .quad, __START_KERNEL_map)
23477+#if CONFIG_XEN_COMPAT <= 0x030002
23478+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, __START_KERNEL_map)
23479+#else
23480+ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, 0)
23481+#endif
23482+ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad, startup_64)
23483+ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad, hypercall_page)
23484+ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
23485+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
23486+ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
23487+ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
23488Index: head-2008-11-25/arch/x86/kernel/head64-xen.c
23489===================================================================
23490--- /dev/null 1970-01-01 00:00:00.000000000 +0000
23491+++ head-2008-11-25/arch/x86/kernel/head64-xen.c 2007-06-12 13:13:01.000000000 +0200
23492@@ -0,0 +1,162 @@
23493+/*
23494+ * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
23495+ *
23496+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
23497+ *
23498+ * Jun Nakajima <jun.nakajima@intel.com>
23499+ * Modified for Xen.
23500+ */
23501+
23502+#include <linux/init.h>
23503+#include <linux/linkage.h>
23504+#include <linux/types.h>
23505+#include <linux/kernel.h>
23506+#include <linux/string.h>
23507+#include <linux/percpu.h>
23508+#include <linux/module.h>
23509+
23510+#include <asm/processor.h>
23511+#include <asm/proto.h>
23512+#include <asm/smp.h>
23513+#include <asm/bootsetup.h>
23514+#include <asm/setup.h>
23515+#include <asm/desc.h>
23516+#include <asm/pgtable.h>
23517+#include <asm/sections.h>
23518+
23519+unsigned long start_pfn;
23520+
23521+/* Don't add a printk in there. printk relies on the PDA which is not initialized
23522+ yet. */
23523+#if 0
23524+static void __init clear_bss(void)
23525+{
23526+ memset(__bss_start, 0,
23527+ (unsigned long) __bss_stop - (unsigned long) __bss_start);
23528+}
23529+#endif
23530+
23531+#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
23532+#define OLD_CL_MAGIC_ADDR 0x90020
23533+#define OLD_CL_MAGIC 0xA33F
23534+#define OLD_CL_BASE_ADDR 0x90000
23535+#define OLD_CL_OFFSET 0x90022
23536+
23537+extern char saved_command_line[];
23538+
23539+static void __init copy_bootdata(char *real_mode_data)
23540+{
23541+#ifndef CONFIG_XEN
23542+ int new_data;
23543+ char * command_line;
23544+
23545+ memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
23546+ new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
23547+ if (!new_data) {
23548+ if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
23549+ printk("so old bootloader that it does not support commandline?!\n");
23550+ return;
23551+ }
23552+ new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
23553+ printk("old bootloader convention, maybe loadlin?\n");
23554+ }
23555+ command_line = (char *) ((u64)(new_data));
23556+ memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
23557+#else
23558+ int max_cmdline;
23559+
23560+ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
23561+ max_cmdline = COMMAND_LINE_SIZE;
23562+ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
23563+ saved_command_line[max_cmdline-1] = '\0';
23564+#endif
23565+ printk("Bootdata ok (command line is %s)\n", saved_command_line);
23566+}
23567+
23568+static void __init setup_boot_cpu_data(void)
23569+{
23570+ unsigned int dummy, eax;
23571+
23572+ /* get vendor info */
23573+ cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
23574+ (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
23575+ (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
23576+ (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
23577+
23578+ /* get cpu type */
23579+ cpuid(1, &eax, &dummy, &dummy,
23580+ (unsigned int *) &boot_cpu_data.x86_capability);
23581+ boot_cpu_data.x86 = (eax >> 8) & 0xf;
23582+ boot_cpu_data.x86_model = (eax >> 4) & 0xf;
23583+ boot_cpu_data.x86_mask = eax & 0xf;
23584+}
23585+
23586+#include <xen/interface/memory.h>
23587+unsigned long *machine_to_phys_mapping;
23588+EXPORT_SYMBOL(machine_to_phys_mapping);
23589+unsigned int machine_to_phys_order;
23590+EXPORT_SYMBOL(machine_to_phys_order);
23591+
23592+void __init x86_64_start_kernel(char * real_mode_data)
23593+{
23594+ struct xen_machphys_mapping mapping;
23595+ unsigned long machine_to_phys_nr_ents;
23596+ char *s;
23597+ int i;
23598+
23599+ setup_xen_features();
23600+
23601+ xen_start_info = (struct start_info *)real_mode_data;
23602+ if (!xen_feature(XENFEAT_auto_translated_physmap))
23603+ phys_to_machine_mapping =
23604+ (unsigned long *)xen_start_info->mfn_list;
23605+ start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) +
23606+ xen_start_info->nr_pt_frames;
23607+
23608+ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
23609+ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
23610+ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
23611+ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
23612+ machine_to_phys_nr_ents = mapping.max_mfn + 1;
23613+ }
23614+ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
23615+ machine_to_phys_order++;
23616+
23617+#if 0
23618+ for (i = 0; i < 256; i++)
23619+ set_intr_gate(i, early_idt_handler);
23620+ asm volatile("lidt %0" :: "m" (idt_descr));
23621+#endif
23622+
23623+ /*
23624+ * This must be called really, really early:
23625+ */
23626+ lockdep_init();
23627+
23628+ for (i = 0; i < NR_CPUS; i++)
23629+ cpu_pda(i) = &boot_cpu_pda[i];
23630+
23631+ pda_init(0);
23632+ copy_bootdata(real_mode_data);
23633+#ifdef CONFIG_SMP
23634+ cpu_set(0, cpu_online_map);
23635+#endif
23636+ s = strstr(saved_command_line, "earlyprintk=");
23637+ if (s != NULL)
23638+ setup_early_printk(strchr(s, '=') + 1);
23639+#ifdef CONFIG_NUMA
23640+ s = strstr(saved_command_line, "numa=");
23641+ if (s != NULL)
23642+ numa_setup(s+5);
23643+#endif
23644+#ifdef CONFIG_X86_IO_APIC
23645+ if (strstr(saved_command_line, "disableapic"))
23646+ disable_apic = 1;
23647+#endif
23648+ /* You need early console to see that */
23649+ if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
23650+ panic("Kernel too big for kernel mapping\n");
23651+
23652+ setup_boot_cpu_data();
23653+ start_kernel();
23654+}
23655Index: head-2008-11-25/arch/x86/kernel/io_apic_64-xen.c
23656===================================================================
23657--- /dev/null 1970-01-01 00:00:00.000000000 +0000
23658+++ head-2008-11-25/arch/x86/kernel/io_apic_64-xen.c 2008-11-25 12:22:34.000000000 +0100
23659@@ -0,0 +1,2268 @@
23660+/*
23661+ * Intel IO-APIC support for multi-Pentium hosts.
23662+ *
23663+ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
23664+ *
23665+ * Many thanks to Stig Venaas for trying out countless experimental
23666+ * patches and reporting/debugging problems patiently!
23667+ *
23668+ * (c) 1999, Multiple IO-APIC support, developed by
23669+ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
23670+ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
23671+ * further tested and cleaned up by Zach Brown <zab@redhat.com>
23672+ * and Ingo Molnar <mingo@redhat.com>
23673+ *
23674+ * Fixes
23675+ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
23676+ * thanks to Eric Gilmore
23677+ * and Rolf G. Tews
23678+ * for testing these extensively
23679+ * Paul Diefenbaugh : Added full ACPI support
23680+ */
23681+
23682+#include <linux/mm.h>
23683+#include <linux/interrupt.h>
23684+#include <linux/init.h>
23685+#include <linux/delay.h>
23686+#include <linux/sched.h>
23687+#include <linux/smp_lock.h>
23688+#include <linux/mc146818rtc.h>
23689+#include <linux/acpi.h>
23690+#include <linux/sysdev.h>
23691+#ifdef CONFIG_ACPI
23692+#include <acpi/acpi_bus.h>
23693+#endif
23694+
23695+#include <asm/io.h>
23696+#include <asm/smp.h>
23697+#include <asm/desc.h>
23698+#include <asm/proto.h>
23699+#include <asm/mach_apic.h>
23700+#include <asm/acpi.h>
23701+#include <asm/dma.h>
23702+#include <asm/nmi.h>
23703+
23704+#define __apicdebuginit __init
23705+
23706+int sis_apic_bug; /* not actually supported, dummy for compile */
23707+
23708+static int no_timer_check;
23709+
23710+int disable_timer_pin_1 __initdata;
23711+
23712+#ifndef CONFIG_XEN
23713+int timer_over_8254 __initdata = 0;
23714+
23715+/* Where if anywhere is the i8259 connect in external int mode */
23716+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
23717+#endif
23718+
23719+static DEFINE_SPINLOCK(ioapic_lock);
23720+static DEFINE_SPINLOCK(vector_lock);
23721+
23722+/*
23723+ * # of IRQ routing registers
23724+ */
23725+int nr_ioapic_registers[MAX_IO_APICS];
23726+
23727+/*
23728+ * Rough estimation of how many shared IRQs there are, can
23729+ * be changed anytime.
23730+ */
23731+#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
23732+#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
23733+
23734+/*
23735+ * This is performance-critical, we want to do it O(1)
23736+ *
23737+ * the indexing order of this array favors 1:1 mappings
23738+ * between pins and IRQs.
23739+ */
23740+
23741+static struct irq_pin_list {
23742+ short apic, pin, next;
23743+} irq_2_pin[PIN_MAP_SIZE];
23744+
23745+int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
23746+#ifdef CONFIG_PCI_MSI
23747+#define vector_to_irq(vector) \
23748+ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
23749+#else
23750+#define vector_to_irq(vector) (vector)
23751+#endif
23752+
23753+#ifdef CONFIG_XEN
23754+
23755+#include <xen/interface/xen.h>
23756+#include <xen/interface/physdev.h>
23757+#include <xen/evtchn.h>
23758+
23759+/* Fake i8259 */
23760+#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
23761+#define disable_8259A_irq(_irq) ((void)0)
23762+#define i8259A_irq_pending(_irq) (0)
23763+
23764+unsigned long io_apic_irqs;
23765+
23766+static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
23767+{
23768+ struct physdev_apic apic_op;
23769+ int ret;
23770+
23771+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
23772+ apic_op.reg = reg;
23773+ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
23774+ if (ret)
23775+ return ret;
23776+ return apic_op.value;
23777+}
23778+
23779+static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
23780+{
23781+ struct physdev_apic apic_op;
23782+
23783+ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
23784+ apic_op.reg = reg;
23785+ apic_op.value = value;
23786+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
23787+}
23788+
23789+#define io_apic_read(a,r) xen_io_apic_read(a,r)
23790+#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
23791+
23792+#define clear_IO_APIC() ((void)0)
23793+
23794+#else
23795+
23796+#ifdef CONFIG_SMP
23797+static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
23798+{
23799+ unsigned long flags;
23800+ unsigned int dest;
23801+ cpumask_t tmp;
23802+
23803+ cpus_and(tmp, mask, cpu_online_map);
23804+ if (cpus_empty(tmp))
23805+ tmp = TARGET_CPUS;
23806+
23807+ cpus_and(mask, tmp, CPU_MASK_ALL);
23808+
23809+ dest = cpu_mask_to_apicid(mask);
23810+
23811+ /*
23812+ * Only the high 8 bits are valid.
23813+ */
23814+ dest = SET_APIC_LOGICAL_ID(dest);
23815+
23816+ spin_lock_irqsave(&ioapic_lock, flags);
23817+ __DO_ACTION(1, = dest, )
23818+ set_irq_info(irq, mask);
23819+ spin_unlock_irqrestore(&ioapic_lock, flags);
23820+}
23821+#endif
23822+
23823+#endif /* !CONFIG_XEN */
23824+
23825+/*
23826+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
23827+ * shared ISA-space IRQs, so we have to support them. We are super
23828+ * fast in the common case, and fast for shared ISA-space IRQs.
23829+ */
23830+static void add_pin_to_irq(unsigned int irq, int apic, int pin)
23831+{
23832+ static int first_free_entry = NR_IRQS;
23833+ struct irq_pin_list *entry = irq_2_pin + irq;
23834+
23835+ BUG_ON(irq >= NR_IRQS);
23836+ while (entry->next)
23837+ entry = irq_2_pin + entry->next;
23838+
23839+ if (entry->pin != -1) {
23840+ entry->next = first_free_entry;
23841+ entry = irq_2_pin + entry->next;
23842+ if (++first_free_entry >= PIN_MAP_SIZE)
23843+ panic("io_apic.c: ran out of irq_2_pin entries!");
23844+ }
23845+ entry->apic = apic;
23846+ entry->pin = pin;
23847+}
23848+
23849+#ifndef CONFIG_XEN
23850+#define __DO_ACTION(R, ACTION, FINAL) \
23851+ \
23852+{ \
23853+ int pin; \
23854+ struct irq_pin_list *entry = irq_2_pin + irq; \
23855+ \
23856+ BUG_ON(irq >= NR_IRQS); \
23857+ for (;;) { \
23858+ unsigned int reg; \
23859+ pin = entry->pin; \
23860+ if (pin == -1) \
23861+ break; \
23862+ reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
23863+ reg ACTION; \
23864+ io_apic_modify(entry->apic, reg); \
23865+ if (!entry->next) \
23866+ break; \
23867+ entry = irq_2_pin + entry->next; \
23868+ } \
23869+ FINAL; \
23870+}
23871+
23872+#define DO_ACTION(name,R,ACTION, FINAL) \
23873+ \
23874+ static void name##_IO_APIC_irq (unsigned int irq) \
23875+ __DO_ACTION(R, ACTION, FINAL)
23876+
23877+DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
23878+ /* mask = 1 */
23879+DO_ACTION( __unmask, 0, &= 0xfffeffff, )
23880+ /* mask = 0 */
23881+
23882+static void mask_IO_APIC_irq (unsigned int irq)
23883+{
23884+ unsigned long flags;
23885+
23886+ spin_lock_irqsave(&ioapic_lock, flags);
23887+ __mask_IO_APIC_irq(irq);
23888+ spin_unlock_irqrestore(&ioapic_lock, flags);
23889+}
23890+
23891+static void unmask_IO_APIC_irq (unsigned int irq)
23892+{
23893+ unsigned long flags;
23894+
23895+ spin_lock_irqsave(&ioapic_lock, flags);
23896+ __unmask_IO_APIC_irq(irq);
23897+ spin_unlock_irqrestore(&ioapic_lock, flags);
23898+}
23899+
23900+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
23901+{
23902+ struct IO_APIC_route_entry entry;
23903+ unsigned long flags;
23904+
23905+ /* Check delivery_mode to be sure we're not clearing an SMI pin */
23906+ spin_lock_irqsave(&ioapic_lock, flags);
23907+ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
23908+ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
23909+ spin_unlock_irqrestore(&ioapic_lock, flags);
23910+ if (entry.delivery_mode == dest_SMI)
23911+ return;
23912+ /*
23913+ * Disable it in the IO-APIC irq-routing table:
23914+ */
23915+ memset(&entry, 0, sizeof(entry));
23916+ entry.mask = 1;
23917+ spin_lock_irqsave(&ioapic_lock, flags);
23918+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
23919+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
23920+ spin_unlock_irqrestore(&ioapic_lock, flags);
23921+}
23922+
23923+static void clear_IO_APIC (void)
23924+{
23925+ int apic, pin;
23926+
23927+ for (apic = 0; apic < nr_ioapics; apic++)
23928+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
23929+ clear_IO_APIC_pin(apic, pin);
23930+}
23931+
23932+#endif /* !CONFIG_XEN */
23933+
23934+static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
23935+
23936+/*
23937+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
23938+ * specific CPU-side IRQs.
23939+ */
23940+
23941+#define MAX_PIRQS 8
23942+static int pirq_entries [MAX_PIRQS];
23943+static int pirqs_enabled;
23944+int skip_ioapic_setup;
23945+int ioapic_force;
23946+
23947+/* dummy parsing: see setup.c */
23948+
23949+static int __init disable_ioapic_setup(char *str)
23950+{
23951+ skip_ioapic_setup = 1;
23952+ return 1;
23953+}
23954+
23955+static int __init enable_ioapic_setup(char *str)
23956+{
23957+ ioapic_force = 1;
23958+ skip_ioapic_setup = 0;
23959+ return 1;
23960+}
23961+
23962+__setup("noapic", disable_ioapic_setup);
23963+__setup("apic", enable_ioapic_setup);
23964+
23965+#ifndef CONFIG_XEN
23966+static int __init setup_disable_8254_timer(char *s)
23967+{
23968+ timer_over_8254 = -1;
23969+ return 1;
23970+}
23971+static int __init setup_enable_8254_timer(char *s)
23972+{
23973+ timer_over_8254 = 2;
23974+ return 1;
23975+}
23976+
23977+__setup("disable_8254_timer", setup_disable_8254_timer);
23978+__setup("enable_8254_timer", setup_enable_8254_timer);
23979+#endif /* !CONFIG_XEN */
23980+
23981+#include <asm/pci-direct.h>
23982+#include <linux/pci_ids.h>
23983+#include <linux/pci.h>
23984+
23985+
23986+#ifdef CONFIG_ACPI
23987+
23988+static int nvidia_hpet_detected __initdata;
23989+
23990+static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
23991+{
23992+ nvidia_hpet_detected = 1;
23993+ return 0;
23994+}
23995+#endif
23996+
23997+/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
23998+ off. Check for an Nvidia or VIA PCI bridge and turn it off.
23999+ Use pci direct infrastructure because this runs before the PCI subsystem.
24000+
24001+ Can be overwritten with "apic"
24002+
24003+ And another hack to disable the IOMMU on VIA chipsets.
24004+
24005+ ... and others. Really should move this somewhere else.
24006+
24007+ Kludge-O-Rama. */
24008+void __init check_ioapic(void)
24009+{
24010+ int num,slot,func;
24011+ /* Poor man's PCI discovery */
24012+ for (num = 0; num < 32; num++) {
24013+ for (slot = 0; slot < 32; slot++) {
24014+ for (func = 0; func < 8; func++) {
24015+ u32 class;
24016+ u32 vendor;
24017+ u8 type;
24018+ class = read_pci_config(num,slot,func,
24019+ PCI_CLASS_REVISION);
24020+ if (class == 0xffffffff)
24021+ break;
24022+
24023+ if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
24024+ continue;
24025+
24026+ vendor = read_pci_config(num, slot, func,
24027+ PCI_VENDOR_ID);
24028+ vendor &= 0xffff;
24029+ switch (vendor) {
24030+ case PCI_VENDOR_ID_VIA:
24031+#ifdef CONFIG_IOMMU
24032+ if ((end_pfn > MAX_DMA32_PFN ||
24033+ force_iommu) &&
24034+ !iommu_aperture_allowed) {
24035+ printk(KERN_INFO
24036+ "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
24037+ iommu_aperture_disabled = 1;
24038+ }
24039+#endif
24040+ return;
24041+ case PCI_VENDOR_ID_NVIDIA:
24042+#ifdef CONFIG_ACPI
24043+ /*
24044+ * All timer overrides on Nvidia are
24045+ * wrong unless HPET is enabled.
24046+ */
24047+ nvidia_hpet_detected = 0;
24048+ acpi_table_parse(ACPI_HPET,
24049+ nvidia_hpet_check);
24050+ if (nvidia_hpet_detected == 0) {
24051+ acpi_skip_timer_override = 1;
24052+ printk(KERN_INFO "Nvidia board "
24053+ "detected. Ignoring ACPI "
24054+ "timer override.\n");
24055+ }
24056+#endif
24057+ /* RED-PEN skip them on mptables too? */
24058+ return;
24059+ case PCI_VENDOR_ID_ATI:
24060+
24061+ /* This should be actually default, but
24062+ for 2.6.16 let's do it for ATI only where
24063+ it's really needed. */
24064+#ifndef CONFIG_XEN
24065+ if (timer_over_8254 == 1) {
24066+ timer_over_8254 = 0;
24067+ printk(KERN_INFO
24068+ "ATI board detected. Disabling timer routing over 8254.\n");
24069+ }
24070+#endif
24071+ return;
24072+ }
24073+
24074+
24075+ /* No multi-function device? */
24076+ type = read_pci_config_byte(num,slot,func,
24077+ PCI_HEADER_TYPE);
24078+ if (!(type & 0x80))
24079+ break;
24080+ }
24081+ }
24082+ }
24083+}
24084+
24085+static int __init ioapic_pirq_setup(char *str)
24086+{
24087+ int i, max;
24088+ int ints[MAX_PIRQS+1];
24089+
24090+ get_options(str, ARRAY_SIZE(ints), ints);
24091+
24092+ for (i = 0; i < MAX_PIRQS; i++)
24093+ pirq_entries[i] = -1;
24094+
24095+ pirqs_enabled = 1;
24096+ apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
24097+ max = MAX_PIRQS;
24098+ if (ints[0] < MAX_PIRQS)
24099+ max = ints[0];
24100+
24101+ for (i = 0; i < max; i++) {
24102+ apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
24103+ /*
24104+ * PIRQs are mapped upside down, usually.
24105+ */
24106+ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
24107+ }
24108+ return 1;
24109+}
24110+
24111+__setup("pirq=", ioapic_pirq_setup);
24112+
24113+/*
24114+ * Find the IRQ entry number of a certain pin.
24115+ */
24116+static int find_irq_entry(int apic, int pin, int type)
24117+{
24118+ int i;
24119+
24120+ for (i = 0; i < mp_irq_entries; i++)
24121+ if (mp_irqs[i].mpc_irqtype == type &&
24122+ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
24123+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
24124+ mp_irqs[i].mpc_dstirq == pin)
24125+ return i;
24126+
24127+ return -1;
24128+}
24129+
24130+#ifndef CONFIG_XEN
24131+/*
24132+ * Find the pin to which IRQ[irq] (ISA) is connected
24133+ */
24134+static int __init find_isa_irq_pin(int irq, int type)
24135+{
24136+ int i;
24137+
24138+ for (i = 0; i < mp_irq_entries; i++) {
24139+ int lbus = mp_irqs[i].mpc_srcbus;
24140+
24141+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
24142+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
24143+ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
24144+ (mp_irqs[i].mpc_irqtype == type) &&
24145+ (mp_irqs[i].mpc_srcbusirq == irq))
24146+
24147+ return mp_irqs[i].mpc_dstirq;
24148+ }
24149+ return -1;
24150+}
24151+
24152+static int __init find_isa_irq_apic(int irq, int type)
24153+{
24154+ int i;
24155+
24156+ for (i = 0; i < mp_irq_entries; i++) {
24157+ int lbus = mp_irqs[i].mpc_srcbus;
24158+
24159+ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
24160+ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
24161+ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
24162+ (mp_irqs[i].mpc_irqtype == type) &&
24163+ (mp_irqs[i].mpc_srcbusirq == irq))
24164+ break;
24165+ }
24166+ if (i < mp_irq_entries) {
24167+ int apic;
24168+ for(apic = 0; apic < nr_ioapics; apic++) {
24169+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
24170+ return apic;
24171+ }
24172+ }
24173+
24174+ return -1;
24175+}
24176+#endif
24177+
24178+/*
24179+ * Find a specific PCI IRQ entry.
24180+ * Not an __init, possibly needed by modules
24181+ */
24182+static int pin_2_irq(int idx, int apic, int pin);
24183+
24184+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
24185+{
24186+ int apic, i, best_guess = -1;
24187+
24188+ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
24189+ bus, slot, pin);
24190+ if (mp_bus_id_to_pci_bus[bus] == -1) {
24191+ apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
24192+ return -1;
24193+ }
24194+ for (i = 0; i < mp_irq_entries; i++) {
24195+ int lbus = mp_irqs[i].mpc_srcbus;
24196+
24197+ for (apic = 0; apic < nr_ioapics; apic++)
24198+ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
24199+ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
24200+ break;
24201+
24202+ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
24203+ !mp_irqs[i].mpc_irqtype &&
24204+ (bus == lbus) &&
24205+ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
24206+ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
24207+
24208+ if (!(apic || IO_APIC_IRQ(irq)))
24209+ continue;
24210+
24211+ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
24212+ return irq;
24213+ /*
24214+ * Use the first all-but-pin matching entry as a
24215+ * best-guess fuzzy result for broken mptables.
24216+ */
24217+ if (best_guess < 0)
24218+ best_guess = irq;
24219+ }
24220+ }
24221+ BUG_ON(best_guess >= NR_IRQS);
24222+ return best_guess;
24223+}
24224+
24225+/*
24226+ * EISA Edge/Level control register, ELCR
24227+ */
24228+static int EISA_ELCR(unsigned int irq)
24229+{
24230+ if (irq < 16) {
24231+ unsigned int port = 0x4d0 + (irq >> 3);
24232+ return (inb(port) >> (irq & 7)) & 1;
24233+ }
24234+ apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
24235+ return 0;
24236+}
24237+
24238+/* EISA interrupts are always polarity zero and can be edge or level
24239+ * trigger depending on the ELCR value. If an interrupt is listed as
24240+ * EISA conforming in the MP table, that means its trigger type must
24241+ * be read in from the ELCR */
24242+
24243+#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
24244+#define default_EISA_polarity(idx) (0)
24245+
24246+/* ISA interrupts are always polarity zero edge triggered,
24247+ * when listed as conforming in the MP table. */
24248+
24249+#define default_ISA_trigger(idx) (0)
24250+#define default_ISA_polarity(idx) (0)
24251+
24252+/* PCI interrupts are always polarity one level triggered,
24253+ * when listed as conforming in the MP table. */
24254+
24255+#define default_PCI_trigger(idx) (1)
24256+#define default_PCI_polarity(idx) (1)
24257+
24258+/* MCA interrupts are always polarity zero level triggered,
24259+ * when listed as conforming in the MP table. */
24260+
24261+#define default_MCA_trigger(idx) (1)
24262+#define default_MCA_polarity(idx) (0)
24263+
24264+static int __init MPBIOS_polarity(int idx)
24265+{
24266+ int bus = mp_irqs[idx].mpc_srcbus;
24267+ int polarity;
24268+
24269+ /*
24270+ * Determine IRQ line polarity (high active or low active):
24271+ */
24272+ switch (mp_irqs[idx].mpc_irqflag & 3)
24273+ {
24274+ case 0: /* conforms, ie. bus-type dependent polarity */
24275+ {
24276+ switch (mp_bus_id_to_type[bus])
24277+ {
24278+ case MP_BUS_ISA: /* ISA pin */
24279+ {
24280+ polarity = default_ISA_polarity(idx);
24281+ break;
24282+ }
24283+ case MP_BUS_EISA: /* EISA pin */
24284+ {
24285+ polarity = default_EISA_polarity(idx);
24286+ break;
24287+ }
24288+ case MP_BUS_PCI: /* PCI pin */
24289+ {
24290+ polarity = default_PCI_polarity(idx);
24291+ break;
24292+ }
24293+ case MP_BUS_MCA: /* MCA pin */
24294+ {
24295+ polarity = default_MCA_polarity(idx);
24296+ break;
24297+ }
24298+ default:
24299+ {
24300+ printk(KERN_WARNING "broken BIOS!!\n");
24301+ polarity = 1;
24302+ break;
24303+ }
24304+ }
24305+ break;
24306+ }
24307+ case 1: /* high active */
24308+ {
24309+ polarity = 0;
24310+ break;
24311+ }
24312+ case 2: /* reserved */
24313+ {
24314+ printk(KERN_WARNING "broken BIOS!!\n");
24315+ polarity = 1;
24316+ break;
24317+ }
24318+ case 3: /* low active */
24319+ {
24320+ polarity = 1;
24321+ break;
24322+ }
24323+ default: /* invalid */
24324+ {
24325+ printk(KERN_WARNING "broken BIOS!!\n");
24326+ polarity = 1;
24327+ break;
24328+ }
24329+ }
24330+ return polarity;
24331+}
24332+
24333+static int MPBIOS_trigger(int idx)
24334+{
24335+ int bus = mp_irqs[idx].mpc_srcbus;
24336+ int trigger;
24337+
24338+ /*
24339+ * Determine IRQ trigger mode (edge or level sensitive):
24340+ */
24341+ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
24342+ {
24343+ case 0: /* conforms, ie. bus-type dependent */
24344+ {
24345+ switch (mp_bus_id_to_type[bus])
24346+ {
24347+ case MP_BUS_ISA: /* ISA pin */
24348+ {
24349+ trigger = default_ISA_trigger(idx);
24350+ break;
24351+ }
24352+ case MP_BUS_EISA: /* EISA pin */
24353+ {
24354+ trigger = default_EISA_trigger(idx);
24355+ break;
24356+ }
24357+ case MP_BUS_PCI: /* PCI pin */
24358+ {
24359+ trigger = default_PCI_trigger(idx);
24360+ break;
24361+ }
24362+ case MP_BUS_MCA: /* MCA pin */
24363+ {
24364+ trigger = default_MCA_trigger(idx);
24365+ break;
24366+ }
24367+ default:
24368+ {
24369+ printk(KERN_WARNING "broken BIOS!!\n");
24370+ trigger = 1;
24371+ break;
24372+ }
24373+ }
24374+ break;
24375+ }
24376+ case 1: /* edge */
24377+ {
24378+ trigger = 0;
24379+ break;
24380+ }
24381+ case 2: /* reserved */
24382+ {
24383+ printk(KERN_WARNING "broken BIOS!!\n");
24384+ trigger = 1;
24385+ break;
24386+ }
24387+ case 3: /* level */
24388+ {
24389+ trigger = 1;
24390+ break;
24391+ }
24392+ default: /* invalid */
24393+ {
24394+ printk(KERN_WARNING "broken BIOS!!\n");
24395+ trigger = 0;
24396+ break;
24397+ }
24398+ }
24399+ return trigger;
24400+}
24401+
24402+static inline int irq_polarity(int idx)
24403+{
24404+ return MPBIOS_polarity(idx);
24405+}
24406+
24407+static inline int irq_trigger(int idx)
24408+{
24409+ return MPBIOS_trigger(idx);
24410+}
24411+
24412+static int next_irq = 16;
24413+
24414+/*
24415+ * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
24416+ * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
24417+ * from ACPI, which can reach 800 in large boxen.
24418+ *
24419+ * Compact the sparse GSI space into a sequential IRQ series and reuse
24420+ * vectors if possible.
24421+ */
24422+int gsi_irq_sharing(int gsi)
24423+{
24424+ int i, tries, vector;
24425+
24426+ BUG_ON(gsi >= NR_IRQ_VECTORS);
24427+
24428+ if (platform_legacy_irq(gsi))
24429+ return gsi;
24430+
24431+ if (gsi_2_irq[gsi] != 0xFF)
24432+ return (int)gsi_2_irq[gsi];
24433+
24434+ tries = NR_IRQS;
24435+ try_again:
24436+ vector = assign_irq_vector(gsi);
24437+
24438+ /*
24439+ * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
24440+ * use of vector and if found, return that IRQ. However, we never want
24441+ * to share legacy IRQs, which usually have a different trigger mode
24442+ * than PCI.
24443+ */
24444+ for (i = 0; i < NR_IRQS; i++)
24445+ if (IO_APIC_VECTOR(i) == vector)
24446+ break;
24447+ if (platform_legacy_irq(i)) {
24448+ if (--tries >= 0) {
24449+ IO_APIC_VECTOR(i) = 0;
24450+ goto try_again;
24451+ }
24452+ panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
24453+ }
24454+ if (i < NR_IRQS) {
24455+ gsi_2_irq[gsi] = i;
24456+ printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
24457+ gsi, vector, i);
24458+ return i;
24459+ }
24460+
24461+ i = next_irq++;
24462+ BUG_ON(i >= NR_IRQS);
24463+ gsi_2_irq[gsi] = i;
24464+ IO_APIC_VECTOR(i) = vector;
24465+ printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
24466+ gsi, vector, i);
24467+ return i;
24468+}
24469+
24470+static int pin_2_irq(int idx, int apic, int pin)
24471+{
24472+ int irq, i;
24473+ int bus = mp_irqs[idx].mpc_srcbus;
24474+
24475+ /*
24476+ * Debugging check, we are in big trouble if this message pops up!
24477+ */
24478+ if (mp_irqs[idx].mpc_dstirq != pin)
24479+ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
24480+
24481+ switch (mp_bus_id_to_type[bus])
24482+ {
24483+ case MP_BUS_ISA: /* ISA pin */
24484+ case MP_BUS_EISA:
24485+ case MP_BUS_MCA:
24486+ {
24487+ irq = mp_irqs[idx].mpc_srcbusirq;
24488+ break;
24489+ }
24490+ case MP_BUS_PCI: /* PCI pin */
24491+ {
24492+ /*
24493+ * PCI IRQs are mapped in order
24494+ */
24495+ i = irq = 0;
24496+ while (i < apic)
24497+ irq += nr_ioapic_registers[i++];
24498+ irq += pin;
24499+ irq = gsi_irq_sharing(irq);
24500+ break;
24501+ }
24502+ default:
24503+ {
24504+ printk(KERN_ERR "unknown bus type %d.\n",bus);
24505+ irq = 0;
24506+ break;
24507+ }
24508+ }
24509+ BUG_ON(irq >= NR_IRQS);
24510+
24511+ /*
24512+ * PCI IRQ command line redirection. Yes, limits are hardcoded.
24513+ */
24514+ if ((pin >= 16) && (pin <= 23)) {
24515+ if (pirq_entries[pin-16] != -1) {
24516+ if (!pirq_entries[pin-16]) {
24517+ apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
24518+ } else {
24519+ irq = pirq_entries[pin-16];
24520+ apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
24521+ pin-16, irq);
24522+ }
24523+ }
24524+ }
24525+ BUG_ON(irq >= NR_IRQS);
24526+ return irq;
24527+}
24528+
24529+static inline int IO_APIC_irq_trigger(int irq)
24530+{
24531+ int apic, idx, pin;
24532+
24533+ for (apic = 0; apic < nr_ioapics; apic++) {
24534+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
24535+ idx = find_irq_entry(apic,pin,mp_INT);
24536+ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
24537+ return irq_trigger(idx);
24538+ }
24539+ }
24540+ /*
24541+ * nonexistent IRQs are edge default
24542+ */
24543+ return 0;
24544+}
24545+
24546+/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
24547+u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
24548+
24549+int assign_irq_vector(int irq)
24550+{
24551+ unsigned long flags;
24552+ int vector;
24553+ struct physdev_irq irq_op;
24554+
24555+ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
24556+
24557+ if (irq < PIRQ_BASE || irq - PIRQ_BASE > NR_PIRQS)
24558+ return -EINVAL;
24559+
24560+ spin_lock_irqsave(&vector_lock, flags);
24561+
24562+ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
24563+ spin_unlock_irqrestore(&vector_lock, flags);
24564+ return IO_APIC_VECTOR(irq);
24565+ }
24566+
24567+ irq_op.irq = irq;
24568+ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
24569+ spin_unlock_irqrestore(&vector_lock, flags);
24570+ return -ENOSPC;
24571+ }
24572+
24573+ vector = irq_op.vector;
24574+ vector_irq[vector] = irq;
24575+ if (irq != AUTO_ASSIGN)
24576+ IO_APIC_VECTOR(irq) = vector;
24577+
24578+ spin_unlock_irqrestore(&vector_lock, flags);
24579+
24580+ return vector;
24581+}
24582+
24583+extern void (*interrupt[NR_IRQS])(void);
24584+#ifndef CONFIG_XEN
24585+static struct hw_interrupt_type ioapic_level_type;
24586+static struct hw_interrupt_type ioapic_edge_type;
24587+
24588+#define IOAPIC_AUTO -1
24589+#define IOAPIC_EDGE 0
24590+#define IOAPIC_LEVEL 1
24591+
24592+static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
24593+{
24594+ unsigned idx;
24595+
24596+ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
24597+
24598+ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
24599+ trigger == IOAPIC_LEVEL)
24600+ irq_desc[idx].chip = &ioapic_level_type;
24601+ else
24602+ irq_desc[idx].chip = &ioapic_edge_type;
24603+ set_intr_gate(vector, interrupt[idx]);
24604+}
24605+#else
24606+#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq)
24607+#endif /* !CONFIG_XEN */
24608+
24609+static void __init setup_IO_APIC_irqs(void)
24610+{
24611+ struct IO_APIC_route_entry entry;
24612+ int apic, pin, idx, irq, first_notcon = 1, vector;
24613+ unsigned long flags;
24614+
24615+ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
24616+
24617+ for (apic = 0; apic < nr_ioapics; apic++) {
24618+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
24619+
24620+ /*
24621+ * add it to the IO-APIC irq-routing table:
24622+ */
24623+ memset(&entry,0,sizeof(entry));
24624+
24625+ entry.delivery_mode = INT_DELIVERY_MODE;
24626+ entry.dest_mode = INT_DEST_MODE;
24627+ entry.mask = 0; /* enable IRQ */
24628+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
24629+
24630+ idx = find_irq_entry(apic,pin,mp_INT);
24631+ if (idx == -1) {
24632+ if (first_notcon) {
24633+ apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
24634+ first_notcon = 0;
24635+ } else
24636+ apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
24637+ continue;
24638+ }
24639+
24640+ entry.trigger = irq_trigger(idx);
24641+ entry.polarity = irq_polarity(idx);
24642+
24643+ if (irq_trigger(idx)) {
24644+ entry.trigger = 1;
24645+ entry.mask = 1;
24646+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
24647+ }
24648+
24649+ irq = pin_2_irq(idx, apic, pin);
24650+ add_pin_to_irq(irq, apic, pin);
24651+
24652+ if (/* !apic && */ !IO_APIC_IRQ(irq))
24653+ continue;
24654+
24655+ if (IO_APIC_IRQ(irq)) {
24656+ vector = assign_irq_vector(irq);
24657+ entry.vector = vector;
24658+
24659+ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
24660+ if (!apic && (irq < 16))
24661+ disable_8259A_irq(irq);
24662+ }
24663+ spin_lock_irqsave(&ioapic_lock, flags);
24664+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
24665+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
24666+ set_native_irq_info(irq, TARGET_CPUS);
24667+ spin_unlock_irqrestore(&ioapic_lock, flags);
24668+ }
24669+ }
24670+
24671+ if (!first_notcon)
24672+ apic_printk(APIC_VERBOSE," not connected.\n");
24673+}
24674+
24675+#ifndef CONFIG_XEN
24676+/*
24677+ * Set up the 8259A-master output pin as broadcast to all
24678+ * CPUs.
24679+ */
24680+static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
24681+{
24682+ struct IO_APIC_route_entry entry;
24683+ unsigned long flags;
24684+
24685+ memset(&entry,0,sizeof(entry));
24686+
24687+ disable_8259A_irq(0);
24688+
24689+ /* mask LVT0 */
24690+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
24691+
24692+ /*
24693+ * We use logical delivery to get the timer IRQ
24694+ * to the first CPU.
24695+ */
24696+ entry.dest_mode = INT_DEST_MODE;
24697+ entry.mask = 0; /* unmask IRQ now */
24698+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
24699+ entry.delivery_mode = INT_DELIVERY_MODE;
24700+ entry.polarity = 0;
24701+ entry.trigger = 0;
24702+ entry.vector = vector;
24703+
24704+ /*
24705+ * The timer IRQ doesn't have to know that behind the
24706+ * scene we have a 8259A-master in AEOI mode ...
24707+ */
24708+ irq_desc[0].chip = &ioapic_edge_type;
24709+
24710+ /*
24711+ * Add it to the IO-APIC irq-routing table:
24712+ */
24713+ spin_lock_irqsave(&ioapic_lock, flags);
24714+ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
24715+ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
24716+ spin_unlock_irqrestore(&ioapic_lock, flags);
24717+
24718+ enable_8259A_irq(0);
24719+}
24720+
24721+void __init UNEXPECTED_IO_APIC(void)
24722+{
24723+}
24724+
24725+void __apicdebuginit print_IO_APIC(void)
24726+{
24727+ int apic, i;
24728+ union IO_APIC_reg_00 reg_00;
24729+ union IO_APIC_reg_01 reg_01;
24730+ union IO_APIC_reg_02 reg_02;
24731+ unsigned long flags;
24732+
24733+ if (apic_verbosity == APIC_QUIET)
24734+ return;
24735+
24736+ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
24737+ for (i = 0; i < nr_ioapics; i++)
24738+ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
24739+ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
24740+
24741+ /*
24742+ * We are a bit conservative about what we expect. We have to
24743+ * know about every hardware change ASAP.
24744+ */
24745+ printk(KERN_INFO "testing the IO APIC.......................\n");
24746+
24747+ for (apic = 0; apic < nr_ioapics; apic++) {
24748+
24749+ spin_lock_irqsave(&ioapic_lock, flags);
24750+ reg_00.raw = io_apic_read(apic, 0);
24751+ reg_01.raw = io_apic_read(apic, 1);
24752+ if (reg_01.bits.version >= 0x10)
24753+ reg_02.raw = io_apic_read(apic, 2);
24754+ spin_unlock_irqrestore(&ioapic_lock, flags);
24755+
24756+ printk("\n");
24757+ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
24758+ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
24759+ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
24760+ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
24761+ UNEXPECTED_IO_APIC();
24762+
24763+ printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
24764+ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
24765+ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
24766+ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
24767+ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
24768+ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
24769+ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
24770+ (reg_01.bits.entries != 0x2E) &&
24771+ (reg_01.bits.entries != 0x3F) &&
24772+ (reg_01.bits.entries != 0x03)
24773+ )
24774+ UNEXPECTED_IO_APIC();
24775+
24776+ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
24777+ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
24778+ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
24779+ (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
24780+ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
24781+ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
24782+ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
24783+ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
24784+ )
24785+ UNEXPECTED_IO_APIC();
24786+ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
24787+ UNEXPECTED_IO_APIC();
24788+
24789+ if (reg_01.bits.version >= 0x10) {
24790+ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
24791+ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
24792+ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
24793+ UNEXPECTED_IO_APIC();
24794+ }
24795+
24796+ printk(KERN_DEBUG ".... IRQ redirection table:\n");
24797+
24798+ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
24799+ " Stat Dest Deli Vect: \n");
24800+
24801+ for (i = 0; i <= reg_01.bits.entries; i++) {
24802+ struct IO_APIC_route_entry entry;
24803+
24804+ spin_lock_irqsave(&ioapic_lock, flags);
24805+ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
24806+ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
24807+ spin_unlock_irqrestore(&ioapic_lock, flags);
24808+
24809+ printk(KERN_DEBUG " %02x %03X %02X ",
24810+ i,
24811+ entry.dest.logical.logical_dest,
24812+ entry.dest.physical.physical_dest
24813+ );
24814+
24815+ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
24816+ entry.mask,
24817+ entry.trigger,
24818+ entry.irr,
24819+ entry.polarity,
24820+ entry.delivery_status,
24821+ entry.dest_mode,
24822+ entry.delivery_mode,
24823+ entry.vector
24824+ );
24825+ }
24826+ }
24827+ if (use_pci_vector())
24828+ printk(KERN_INFO "Using vector-based indexing\n");
24829+ printk(KERN_DEBUG "IRQ to pin mappings:\n");
24830+ for (i = 0; i < NR_IRQS; i++) {
24831+ struct irq_pin_list *entry = irq_2_pin + i;
24832+ if (entry->pin < 0)
24833+ continue;
24834+ if (use_pci_vector() && !platform_legacy_irq(i))
24835+ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
24836+ else
24837+ printk(KERN_DEBUG "IRQ%d ", i);
24838+ for (;;) {
24839+ printk("-> %d:%d", entry->apic, entry->pin);
24840+ if (!entry->next)
24841+ break;
24842+ entry = irq_2_pin + entry->next;
24843+ }
24844+ printk("\n");
24845+ }
24846+
24847+ printk(KERN_INFO ".................................... done.\n");
24848+
24849+ return;
24850+}
24851+
24852+static __apicdebuginit void print_APIC_bitfield (int base)
24853+{
24854+ unsigned int v;
24855+ int i, j;
24856+
24857+ if (apic_verbosity == APIC_QUIET)
24858+ return;
24859+
24860+ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
24861+ for (i = 0; i < 8; i++) {
24862+ v = apic_read(base + i*0x10);
24863+ for (j = 0; j < 32; j++) {
24864+ if (v & (1<<j))
24865+ printk("1");
24866+ else
24867+ printk("0");
24868+ }
24869+ printk("\n");
24870+ }
24871+}
24872+
24873+void __apicdebuginit print_local_APIC(void * dummy)
24874+{
24875+ unsigned int v, ver, maxlvt;
24876+
24877+ if (apic_verbosity == APIC_QUIET)
24878+ return;
24879+
24880+ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
24881+ smp_processor_id(), hard_smp_processor_id());
24882+ v = apic_read(APIC_ID);
24883+ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
24884+ v = apic_read(APIC_LVR);
24885+ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
24886+ ver = GET_APIC_VERSION(v);
24887+ maxlvt = get_maxlvt();
24888+
24889+ v = apic_read(APIC_TASKPRI);
24890+ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
24891+
24892+ v = apic_read(APIC_ARBPRI);
24893+ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
24894+ v & APIC_ARBPRI_MASK);
24895+ v = apic_read(APIC_PROCPRI);
24896+ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
24897+
24898+ v = apic_read(APIC_EOI);
24899+ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
24900+ v = apic_read(APIC_RRR);
24901+ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
24902+ v = apic_read(APIC_LDR);
24903+ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
24904+ v = apic_read(APIC_DFR);
24905+ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
24906+ v = apic_read(APIC_SPIV);
24907+ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
24908+
24909+ printk(KERN_DEBUG "... APIC ISR field:\n");
24910+ print_APIC_bitfield(APIC_ISR);
24911+ printk(KERN_DEBUG "... APIC TMR field:\n");
24912+ print_APIC_bitfield(APIC_TMR);
24913+ printk(KERN_DEBUG "... APIC IRR field:\n");
24914+ print_APIC_bitfield(APIC_IRR);
24915+
24916+ v = apic_read(APIC_ESR);
24917+ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
24918+
24919+ v = apic_read(APIC_ICR);
24920+ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
24921+ v = apic_read(APIC_ICR2);
24922+ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
24923+
24924+ v = apic_read(APIC_LVTT);
24925+ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
24926+
24927+ if (maxlvt > 3) { /* PC is LVT#4. */
24928+ v = apic_read(APIC_LVTPC);
24929+ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
24930+ }
24931+ v = apic_read(APIC_LVT0);
24932+ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
24933+ v = apic_read(APIC_LVT1);
24934+ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
24935+
24936+ if (maxlvt > 2) { /* ERR is LVT#3. */
24937+ v = apic_read(APIC_LVTERR);
24938+ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
24939+ }
24940+
24941+ v = apic_read(APIC_TMICT);
24942+ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
24943+ v = apic_read(APIC_TMCCT);
24944+ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
24945+ v = apic_read(APIC_TDCR);
24946+ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
24947+ printk("\n");
24948+}
24949+
24950+void print_all_local_APICs (void)
24951+{
24952+ on_each_cpu(print_local_APIC, NULL, 1, 1);
24953+}
24954+
24955+void __apicdebuginit print_PIC(void)
24956+{
24957+ unsigned int v;
24958+ unsigned long flags;
24959+
24960+ if (apic_verbosity == APIC_QUIET)
24961+ return;
24962+
24963+ printk(KERN_DEBUG "\nprinting PIC contents\n");
24964+
24965+ spin_lock_irqsave(&i8259A_lock, flags);
24966+
24967+ v = inb(0xa1) << 8 | inb(0x21);
24968+ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
24969+
24970+ v = inb(0xa0) << 8 | inb(0x20);
24971+ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
24972+
24973+ outb(0x0b,0xa0);
24974+ outb(0x0b,0x20);
24975+ v = inb(0xa0) << 8 | inb(0x20);
24976+ outb(0x0a,0xa0);
24977+ outb(0x0a,0x20);
24978+
24979+ spin_unlock_irqrestore(&i8259A_lock, flags);
24980+
24981+ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
24982+
24983+ v = inb(0x4d1) << 8 | inb(0x4d0);
24984+ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
24985+}
24986+#endif /* !CONFIG_XEN */
24987+
24988+static void __init enable_IO_APIC(void)
24989+{
24990+ union IO_APIC_reg_01 reg_01;
24991+#ifndef CONFIG_XEN
24992+ int i8259_apic, i8259_pin;
24993+#endif
24994+ int i, apic;
24995+ unsigned long flags;
24996+
24997+ for (i = 0; i < PIN_MAP_SIZE; i++) {
24998+ irq_2_pin[i].pin = -1;
24999+ irq_2_pin[i].next = 0;
25000+ }
25001+ if (!pirqs_enabled)
25002+ for (i = 0; i < MAX_PIRQS; i++)
25003+ pirq_entries[i] = -1;
25004+
25005+ /*
25006+ * The number of IO-APIC IRQ registers (== #pins):
25007+ */
25008+ for (apic = 0; apic < nr_ioapics; apic++) {
25009+ spin_lock_irqsave(&ioapic_lock, flags);
25010+ reg_01.raw = io_apic_read(apic, 1);
25011+ spin_unlock_irqrestore(&ioapic_lock, flags);
25012+ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
25013+ }
25014+#ifndef CONFIG_XEN
25015+ for(apic = 0; apic < nr_ioapics; apic++) {
25016+ int pin;
25017+ /* See if any of the pins is in ExtINT mode */
25018+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
25019+ struct IO_APIC_route_entry entry;
25020+ spin_lock_irqsave(&ioapic_lock, flags);
25021+ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
25022+ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
25023+ spin_unlock_irqrestore(&ioapic_lock, flags);
25024+
25025+
25026+ /* If the interrupt line is enabled and in ExtInt mode
25027+ * I have found the pin where the i8259 is connected.
25028+ */
25029+ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
25030+ ioapic_i8259.apic = apic;
25031+ ioapic_i8259.pin = pin;
25032+ goto found_i8259;
25033+ }
25034+ }
25035+ }
25036+ found_i8259:
25037+ /* Look to see what if the MP table has reported the ExtINT */
25038+ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
25039+ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
25040+ /* Trust the MP table if nothing is setup in the hardware */
25041+ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
25042+ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
25043+ ioapic_i8259.pin = i8259_pin;
25044+ ioapic_i8259.apic = i8259_apic;
25045+ }
25046+ /* Complain if the MP table and the hardware disagree */
25047+ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
25048+ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
25049+ {
25050+ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
25051+ }
25052+#endif
25053+
25054+ /*
25055+ * Do not trust the IO-APIC being empty at bootup
25056+ */
25057+ clear_IO_APIC();
25058+}
25059+
25060+/*
25061+ * Not an __init, needed by the reboot code
25062+ */
25063+void disable_IO_APIC(void)
25064+{
25065+ /*
25066+ * Clear the IO-APIC before rebooting:
25067+ */
25068+ clear_IO_APIC();
25069+
25070+#ifndef CONFIG_XEN
25071+ /*
25072+ * If the i8259 is routed through an IOAPIC
25073+ * Put that IOAPIC in virtual wire mode
25074+ * so legacy interrupts can be delivered.
25075+ */
25076+ if (ioapic_i8259.pin != -1) {
25077+ struct IO_APIC_route_entry entry;
25078+ unsigned long flags;
25079+
25080+ memset(&entry, 0, sizeof(entry));
25081+ entry.mask = 0; /* Enabled */
25082+ entry.trigger = 0; /* Edge */
25083+ entry.irr = 0;
25084+ entry.polarity = 0; /* High */
25085+ entry.delivery_status = 0;
25086+ entry.dest_mode = 0; /* Physical */
25087+ entry.delivery_mode = dest_ExtINT; /* ExtInt */
25088+ entry.vector = 0;
25089+ entry.dest.physical.physical_dest =
25090+ GET_APIC_ID(apic_read(APIC_ID));
25091+
25092+ /*
25093+ * Add it to the IO-APIC irq-routing table:
25094+ */
25095+ spin_lock_irqsave(&ioapic_lock, flags);
25096+ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
25097+ *(((int *)&entry)+1));
25098+ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
25099+ *(((int *)&entry)+0));
25100+ spin_unlock_irqrestore(&ioapic_lock, flags);
25101+ }
25102+
25103+ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
25104+#endif
25105+}
25106+
25107+/*
25108+ * function to set the IO-APIC physical IDs based on the
25109+ * values stored in the MPC table.
25110+ *
25111+ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
25112+ */
25113+
25114+#ifndef CONFIG_XEN
25115+static void __init setup_ioapic_ids_from_mpc (void)
25116+{
25117+ union IO_APIC_reg_00 reg_00;
25118+ int apic;
25119+ int i;
25120+ unsigned char old_id;
25121+ unsigned long flags;
25122+
25123+ /*
25124+ * Set the IOAPIC ID to the value stored in the MPC table.
25125+ */
25126+ for (apic = 0; apic < nr_ioapics; apic++) {
25127+
25128+ /* Read the register 0 value */
25129+ spin_lock_irqsave(&ioapic_lock, flags);
25130+ reg_00.raw = io_apic_read(apic, 0);
25131+ spin_unlock_irqrestore(&ioapic_lock, flags);
25132+
25133+ old_id = mp_ioapics[apic].mpc_apicid;
25134+
25135+
25136+ printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
25137+
25138+
25139+ /*
25140+ * We need to adjust the IRQ routing table
25141+ * if the ID changed.
25142+ */
25143+ if (old_id != mp_ioapics[apic].mpc_apicid)
25144+ for (i = 0; i < mp_irq_entries; i++)
25145+ if (mp_irqs[i].mpc_dstapic == old_id)
25146+ mp_irqs[i].mpc_dstapic
25147+ = mp_ioapics[apic].mpc_apicid;
25148+
25149+ /*
25150+ * Read the right value from the MPC table and
25151+ * write it into the ID register.
25152+ */
25153+ apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
25154+ mp_ioapics[apic].mpc_apicid);
25155+
25156+ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
25157+ spin_lock_irqsave(&ioapic_lock, flags);
25158+ io_apic_write(apic, 0, reg_00.raw);
25159+ spin_unlock_irqrestore(&ioapic_lock, flags);
25160+
25161+ /*
25162+ * Sanity check
25163+ */
25164+ spin_lock_irqsave(&ioapic_lock, flags);
25165+ reg_00.raw = io_apic_read(apic, 0);
25166+ spin_unlock_irqrestore(&ioapic_lock, flags);
25167+ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
25168+ printk("could not set ID!\n");
25169+ else
25170+ apic_printk(APIC_VERBOSE," ok.\n");
25171+ }
25172+}
25173+#else
25174+static void __init setup_ioapic_ids_from_mpc(void) { }
25175+#endif
25176+
25177+/*
25178+ * There is a nasty bug in some older SMP boards, their mptable lies
25179+ * about the timer IRQ. We do the following to work around the situation:
25180+ *
25181+ * - timer IRQ defaults to IO-APIC IRQ
25182+ * - if this function detects that timer IRQs are defunct, then we fall
25183+ * back to ISA timer IRQs
25184+ */
25185+#ifndef CONFIG_XEN
25186+static int __init timer_irq_works(void)
25187+{
25188+ unsigned long t1 = jiffies;
25189+
25190+ local_irq_enable();
25191+ /* Let ten ticks pass... */
25192+ mdelay((10 * 1000) / HZ);
25193+
25194+ /*
25195+ * Expect a few ticks at least, to be sure some possible
25196+ * glue logic does not lock up after one or two first
25197+ * ticks in a non-ExtINT mode. Also the local APIC
25198+ * might have cached one ExtINT interrupt. Finally, at
25199+ * least one tick may be lost due to delays.
25200+ */
25201+
25202+ /* jiffies wrap? */
25203+ if (jiffies - t1 > 4)
25204+ return 1;
25205+ return 0;
25206+}
25207+
25208+/*
25209+ * In the SMP+IOAPIC case it might happen that there are an unspecified
25210+ * number of pending IRQ events unhandled. These cases are very rare,
25211+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
25212+ * better to do it this way as thus we do not have to be aware of
25213+ * 'pending' interrupts in the IRQ path, except at this point.
25214+ */
25215+/*
25216+ * Edge triggered needs to resend any interrupt
25217+ * that was delayed but this is now handled in the device
25218+ * independent code.
25219+ */
25220+
25221+/*
25222+ * Starting up a edge-triggered IO-APIC interrupt is
25223+ * nasty - we need to make sure that we get the edge.
25224+ * If it is already asserted for some reason, we need
25225+ * return 1 to indicate that is was pending.
25226+ *
25227+ * This is not complete - we should be able to fake
25228+ * an edge even if it isn't on the 8259A...
25229+ */
25230+
25231+static unsigned int startup_edge_ioapic_irq(unsigned int irq)
25232+{
25233+ int was_pending = 0;
25234+ unsigned long flags;
25235+
25236+ spin_lock_irqsave(&ioapic_lock, flags);
25237+ if (irq < 16) {
25238+ disable_8259A_irq(irq);
25239+ if (i8259A_irq_pending(irq))
25240+ was_pending = 1;
25241+ }
25242+ __unmask_IO_APIC_irq(irq);
25243+ spin_unlock_irqrestore(&ioapic_lock, flags);
25244+
25245+ return was_pending;
25246+}
25247+
25248+/*
25249+ * Once we have recorded IRQ_PENDING already, we can mask the
25250+ * interrupt for real. This prevents IRQ storms from unhandled
25251+ * devices.
25252+ */
25253+static void ack_edge_ioapic_irq(unsigned int irq)
25254+{
25255+ move_irq(irq);
25256+ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
25257+ == (IRQ_PENDING | IRQ_DISABLED))
25258+ mask_IO_APIC_irq(irq);
25259+ ack_APIC_irq();
25260+}
25261+
25262+/*
25263+ * Level triggered interrupts can just be masked,
25264+ * and shutting down and starting up the interrupt
25265+ * is the same as enabling and disabling them -- except
25266+ * with a startup need to return a "was pending" value.
25267+ *
25268+ * Level triggered interrupts are special because we
25269+ * do not touch any IO-APIC register while handling
25270+ * them. We ack the APIC in the end-IRQ handler, not
25271+ * in the start-IRQ-handler. Protection against reentrance
25272+ * from the same interrupt is still provided, both by the
25273+ * generic IRQ layer and by the fact that an unacked local
25274+ * APIC does not accept IRQs.
25275+ */
25276+static unsigned int startup_level_ioapic_irq (unsigned int irq)
25277+{
25278+ unmask_IO_APIC_irq(irq);
25279+
25280+ return 0; /* don't check for pending */
25281+}
25282+
25283+static void end_level_ioapic_irq (unsigned int irq)
25284+{
25285+ move_irq(irq);
25286+ ack_APIC_irq();
25287+}
25288+
25289+#ifdef CONFIG_PCI_MSI
25290+static unsigned int startup_edge_ioapic_vector(unsigned int vector)
25291+{
25292+ int irq = vector_to_irq(vector);
25293+
25294+ return startup_edge_ioapic_irq(irq);
25295+}
25296+
25297+static void ack_edge_ioapic_vector(unsigned int vector)
25298+{
25299+ int irq = vector_to_irq(vector);
25300+
25301+ move_native_irq(vector);
25302+ ack_edge_ioapic_irq(irq);
25303+}
25304+
25305+static unsigned int startup_level_ioapic_vector (unsigned int vector)
25306+{
25307+ int irq = vector_to_irq(vector);
25308+
25309+ return startup_level_ioapic_irq (irq);
25310+}
25311+
25312+static void end_level_ioapic_vector (unsigned int vector)
25313+{
25314+ int irq = vector_to_irq(vector);
25315+
25316+ move_native_irq(vector);
25317+ end_level_ioapic_irq(irq);
25318+}
25319+
25320+static void mask_IO_APIC_vector (unsigned int vector)
25321+{
25322+ int irq = vector_to_irq(vector);
25323+
25324+ mask_IO_APIC_irq(irq);
25325+}
25326+
25327+static void unmask_IO_APIC_vector (unsigned int vector)
25328+{
25329+ int irq = vector_to_irq(vector);
25330+
25331+ unmask_IO_APIC_irq(irq);
25332+}
25333+
25334+#ifdef CONFIG_SMP
25335+static void set_ioapic_affinity_vector (unsigned int vector,
25336+ cpumask_t cpu_mask)
25337+{
25338+ int irq = vector_to_irq(vector);
25339+
25340+ set_native_irq_info(vector, cpu_mask);
25341+ set_ioapic_affinity_irq(irq, cpu_mask);
25342+}
25343+#endif // CONFIG_SMP
25344+#endif // CONFIG_PCI_MSI
25345+
25346+static int ioapic_retrigger(unsigned int irq)
25347+{
25348+ send_IPI_self(IO_APIC_VECTOR(irq));
25349+
25350+ return 1;
25351+}
25352+
25353+/*
25354+ * Level and edge triggered IO-APIC interrupts need different handling,
25355+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
25356+ * handled with the level-triggered descriptor, but that one has slightly
25357+ * more overhead. Level-triggered interrupts cannot be handled with the
25358+ * edge-triggered handler, without risking IRQ storms and other ugly
25359+ * races.
25360+ */
25361+
25362+static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
25363+ .typename = "IO-APIC-edge",
25364+ .startup = startup_edge_ioapic,
25365+ .shutdown = shutdown_edge_ioapic,
25366+ .enable = enable_edge_ioapic,
25367+ .disable = disable_edge_ioapic,
25368+ .ack = ack_edge_ioapic,
25369+ .end = end_edge_ioapic,
25370+#ifdef CONFIG_SMP
25371+ .set_affinity = set_ioapic_affinity,
25372+#endif
25373+ .retrigger = ioapic_retrigger,
25374+};
25375+
25376+static struct hw_interrupt_type ioapic_level_type __read_mostly = {
25377+ .typename = "IO-APIC-level",
25378+ .startup = startup_level_ioapic,
25379+ .shutdown = shutdown_level_ioapic,
25380+ .enable = enable_level_ioapic,
25381+ .disable = disable_level_ioapic,
25382+ .ack = mask_and_ack_level_ioapic,
25383+ .end = end_level_ioapic,
25384+#ifdef CONFIG_SMP
25385+ .set_affinity = set_ioapic_affinity,
25386+#endif
25387+ .retrigger = ioapic_retrigger,
25388+};
25389+#endif /* !CONFIG_XEN */
25390+
25391+static inline void init_IO_APIC_traps(void)
25392+{
25393+ int irq;
25394+
25395+ /*
25396+ * NOTE! The local APIC isn't very good at handling
25397+ * multiple interrupts at the same interrupt level.
25398+ * As the interrupt level is determined by taking the
25399+ * vector number and shifting that right by 4, we
25400+ * want to spread these out a bit so that they don't
25401+ * all fall in the same interrupt level.
25402+ *
25403+ * Also, we've got to be careful not to trash gate
25404+ * 0x80, because int 0x80 is hm, kind of importantish. ;)
25405+ */
25406+ for (irq = 0; irq < NR_IRQS ; irq++) {
25407+ int tmp = irq;
25408+ if (use_pci_vector()) {
25409+ if (!platform_legacy_irq(tmp))
25410+ if ((tmp = vector_to_irq(tmp)) == -1)
25411+ continue;
25412+ }
25413+ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
25414+ /*
25415+ * Hmm.. We don't have an entry for this,
25416+ * so default to an old-fashioned 8259
25417+ * interrupt if we can..
25418+ */
25419+ if (irq < 16)
25420+ make_8259A_irq(irq);
25421+#ifndef CONFIG_XEN
25422+ else
25423+ /* Strange. Oh, well.. */
25424+ irq_desc[irq].chip = &no_irq_type;
25425+#endif
25426+ }
25427+ }
25428+}
25429+
25430+#ifndef CONFIG_XEN
25431+static void enable_lapic_irq (unsigned int irq)
25432+{
25433+ unsigned long v;
25434+
25435+ v = apic_read(APIC_LVT0);
25436+ apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
25437+}
25438+
25439+static void disable_lapic_irq (unsigned int irq)
25440+{
25441+ unsigned long v;
25442+
25443+ v = apic_read(APIC_LVT0);
25444+ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
25445+}
25446+
25447+static void ack_lapic_irq (unsigned int irq)
25448+{
25449+ ack_APIC_irq();
25450+}
25451+
25452+static void end_lapic_irq (unsigned int i) { /* nothing */ }
25453+
25454+static struct hw_interrupt_type lapic_irq_type __read_mostly = {
25455+ .typename = "local-APIC-edge",
25456+ .startup = NULL, /* startup_irq() not used for IRQ0 */
25457+ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
25458+ .enable = enable_lapic_irq,
25459+ .disable = disable_lapic_irq,
25460+ .ack = ack_lapic_irq,
25461+ .end = end_lapic_irq,
25462+};
25463+
25464+static void setup_nmi (void)
25465+{
25466+ /*
25467+ * Dirty trick to enable the NMI watchdog ...
25468+ * We put the 8259A master into AEOI mode and
25469+ * unmask on all local APICs LVT0 as NMI.
25470+ *
25471+ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
25472+ * is from Maciej W. Rozycki - so we do not have to EOI from
25473+ * the NMI handler or the timer interrupt.
25474+ */
25475+ printk(KERN_INFO "activating NMI Watchdog ...");
25476+
25477+ enable_NMI_through_LVT0(NULL);
25478+
25479+ printk(" done.\n");
25480+}
25481+
25482+/*
25483+ * This looks a bit hackish but it's about the only one way of sending
25484+ * a few INTA cycles to 8259As and any associated glue logic. ICR does
25485+ * not support the ExtINT mode, unfortunately. We need to send these
25486+ * cycles as some i82489DX-based boards have glue logic that keeps the
25487+ * 8259A interrupt line asserted until INTA. --macro
25488+ */
25489+static inline void unlock_ExtINT_logic(void)
25490+{
25491+ int apic, pin, i;
25492+ struct IO_APIC_route_entry entry0, entry1;
25493+ unsigned char save_control, save_freq_select;
25494+ unsigned long flags;
25495+
25496+ pin = find_isa_irq_pin(8, mp_INT);
25497+ apic = find_isa_irq_apic(8, mp_INT);
25498+ if (pin == -1)
25499+ return;
25500+
25501+ spin_lock_irqsave(&ioapic_lock, flags);
25502+ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
25503+ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
25504+ spin_unlock_irqrestore(&ioapic_lock, flags);
25505+ clear_IO_APIC_pin(apic, pin);
25506+
25507+ memset(&entry1, 0, sizeof(entry1));
25508+
25509+ entry1.dest_mode = 0; /* physical delivery */
25510+ entry1.mask = 0; /* unmask IRQ now */
25511+ entry1.dest.physical.physical_dest = hard_smp_processor_id();
25512+ entry1.delivery_mode = dest_ExtINT;
25513+ entry1.polarity = entry0.polarity;
25514+ entry1.trigger = 0;
25515+ entry1.vector = 0;
25516+
25517+ spin_lock_irqsave(&ioapic_lock, flags);
25518+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
25519+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
25520+ spin_unlock_irqrestore(&ioapic_lock, flags);
25521+
25522+ save_control = CMOS_READ(RTC_CONTROL);
25523+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
25524+ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
25525+ RTC_FREQ_SELECT);
25526+ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
25527+
25528+ i = 100;
25529+ while (i-- > 0) {
25530+ mdelay(10);
25531+ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
25532+ i -= 10;
25533+ }
25534+
25535+ CMOS_WRITE(save_control, RTC_CONTROL);
25536+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
25537+ clear_IO_APIC_pin(apic, pin);
25538+
25539+ spin_lock_irqsave(&ioapic_lock, flags);
25540+ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
25541+ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
25542+ spin_unlock_irqrestore(&ioapic_lock, flags);
25543+}
25544+
25545+int timer_uses_ioapic_pin_0;
25546+
25547+/*
25548+ * This code may look a bit paranoid, but it's supposed to cooperate with
25549+ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
25550+ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
25551+ * fanatically on his truly buggy board.
25552+ *
25553+ * FIXME: really need to revamp this for modern platforms only.
25554+ */
25555+static inline void check_timer(void)
25556+{
25557+ int apic1, pin1, apic2, pin2;
25558+ int vector;
25559+
25560+ /*
25561+ * get/set the timer IRQ vector:
25562+ */
25563+ disable_8259A_irq(0);
25564+ vector = assign_irq_vector(0);
25565+ set_intr_gate(vector, interrupt[0]);
25566+
25567+ /*
25568+ * Subtle, code in do_timer_interrupt() expects an AEOI
25569+ * mode for the 8259A whenever interrupts are routed
25570+ * through I/O APICs. Also IRQ0 has to be enabled in
25571+ * the 8259A which implies the virtual wire has to be
25572+ * disabled in the local APIC.
25573+ */
25574+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
25575+ init_8259A(1);
25576+ if (timer_over_8254 > 0)
25577+ enable_8259A_irq(0);
25578+
25579+ pin1 = find_isa_irq_pin(0, mp_INT);
25580+ apic1 = find_isa_irq_apic(0, mp_INT);
25581+ pin2 = ioapic_i8259.pin;
25582+ apic2 = ioapic_i8259.apic;
25583+
25584+ if (pin1 == 0)
25585+ timer_uses_ioapic_pin_0 = 1;
25586+
25587+ apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
25588+ vector, apic1, pin1, apic2, pin2);
25589+
25590+ if (pin1 != -1) {
25591+ /*
25592+ * Ok, does IRQ0 through the IOAPIC work?
25593+ */
25594+ unmask_IO_APIC_irq(0);
25595+ if (!no_timer_check && timer_irq_works()) {
25596+ nmi_watchdog_default();
25597+ if (nmi_watchdog == NMI_IO_APIC) {
25598+ disable_8259A_irq(0);
25599+ setup_nmi();
25600+ enable_8259A_irq(0);
25601+ }
25602+ if (disable_timer_pin_1 > 0)
25603+ clear_IO_APIC_pin(0, pin1);
25604+ return;
25605+ }
25606+ clear_IO_APIC_pin(apic1, pin1);
25607+ apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
25608+ "connected to IO-APIC\n");
25609+ }
25610+
25611+ apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
25612+ "through the 8259A ... ");
25613+ if (pin2 != -1) {
25614+ apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
25615+ apic2, pin2);
25616+ /*
25617+ * legacy devices should be connected to IO APIC #0
25618+ */
25619+ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
25620+ if (timer_irq_works()) {
25621+ apic_printk(APIC_VERBOSE," works.\n");
25622+ nmi_watchdog_default();
25623+ if (nmi_watchdog == NMI_IO_APIC) {
25624+ setup_nmi();
25625+ }
25626+ return;
25627+ }
25628+ /*
25629+ * Cleanup, just in case ...
25630+ */
25631+ clear_IO_APIC_pin(apic2, pin2);
25632+ }
25633+ apic_printk(APIC_VERBOSE," failed.\n");
25634+
25635+ if (nmi_watchdog == NMI_IO_APIC) {
25636+ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
25637+ nmi_watchdog = 0;
25638+ }
25639+
25640+ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
25641+
25642+ disable_8259A_irq(0);
25643+ irq_desc[0].chip = &lapic_irq_type;
25644+ apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
25645+ enable_8259A_irq(0);
25646+
25647+ if (timer_irq_works()) {
25648+ apic_printk(APIC_VERBOSE," works.\n");
25649+ return;
25650+ }
25651+ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
25652+ apic_printk(APIC_VERBOSE," failed.\n");
25653+
25654+ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
25655+
25656+ init_8259A(0);
25657+ make_8259A_irq(0);
25658+ apic_write(APIC_LVT0, APIC_DM_EXTINT);
25659+
25660+ unlock_ExtINT_logic();
25661+
25662+ if (timer_irq_works()) {
25663+ apic_printk(APIC_VERBOSE," works.\n");
25664+ return;
25665+ }
25666+ apic_printk(APIC_VERBOSE," failed :(.\n");
25667+ panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
25668+}
25669+#else
25670+#define check_timer() ((void)0)
25671+int timer_uses_ioapic_pin_0 = 0;
25672+#endif /* !CONFIG_XEN */
25673+
25674+static int __init notimercheck(char *s)
25675+{
25676+ no_timer_check = 1;
25677+ return 1;
25678+}
25679+__setup("no_timer_check", notimercheck);
25680+
25681+/*
25682+ *
25683+ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
25684+ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
25685+ * Linux doesn't really care, as it's not actually used
25686+ * for any interrupt handling anyway.
25687+ */
25688+#define PIC_IRQS (1<<2)
25689+
25690+void __init setup_IO_APIC(void)
25691+{
25692+ enable_IO_APIC();
25693+
25694+ if (acpi_ioapic)
25695+ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
25696+ else
25697+ io_apic_irqs = ~PIC_IRQS;
25698+
25699+ apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
25700+
25701+ /*
25702+ * Set up the IO-APIC IRQ routing table.
25703+ */
25704+ if (!acpi_ioapic)
25705+ setup_ioapic_ids_from_mpc();
25706+#ifndef CONFIG_XEN
25707+ sync_Arb_IDs();
25708+#endif /* !CONFIG_XEN */
25709+ setup_IO_APIC_irqs();
25710+ init_IO_APIC_traps();
25711+ check_timer();
25712+ if (!acpi_ioapic)
25713+ print_IO_APIC();
25714+}
25715+
25716+struct sysfs_ioapic_data {
25717+ struct sys_device dev;
25718+ struct IO_APIC_route_entry entry[0];
25719+};
25720+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
25721+
25722+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
25723+{
25724+ struct IO_APIC_route_entry *entry;
25725+ struct sysfs_ioapic_data *data;
25726+ unsigned long flags;
25727+ int i;
25728+
25729+ data = container_of(dev, struct sysfs_ioapic_data, dev);
25730+ entry = data->entry;
25731+ spin_lock_irqsave(&ioapic_lock, flags);
25732+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
25733+ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
25734+ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
25735+ }
25736+ spin_unlock_irqrestore(&ioapic_lock, flags);
25737+
25738+ return 0;
25739+}
25740+
25741+static int ioapic_resume(struct sys_device *dev)
25742+{
25743+ struct IO_APIC_route_entry *entry;
25744+ struct sysfs_ioapic_data *data;
25745+ unsigned long flags;
25746+ union IO_APIC_reg_00 reg_00;
25747+ int i;
25748+
25749+ data = container_of(dev, struct sysfs_ioapic_data, dev);
25750+ entry = data->entry;
25751+
25752+ spin_lock_irqsave(&ioapic_lock, flags);
25753+ reg_00.raw = io_apic_read(dev->id, 0);
25754+ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
25755+ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
25756+ io_apic_write(dev->id, 0, reg_00.raw);
25757+ }
25758+ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
25759+ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
25760+ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
25761+ }
25762+ spin_unlock_irqrestore(&ioapic_lock, flags);
25763+
25764+ return 0;
25765+}
25766+
25767+static struct sysdev_class ioapic_sysdev_class = {
25768+ set_kset_name("ioapic"),
25769+#ifndef CONFIG_XEN
25770+ .suspend = ioapic_suspend,
25771+ .resume = ioapic_resume,
25772+#endif
25773+};
25774+
25775+static int __init ioapic_init_sysfs(void)
25776+{
25777+ struct sys_device * dev;
25778+ int i, size, error = 0;
25779+
25780+ error = sysdev_class_register(&ioapic_sysdev_class);
25781+ if (error)
25782+ return error;
25783+
25784+ for (i = 0; i < nr_ioapics; i++ ) {
25785+ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
25786+ * sizeof(struct IO_APIC_route_entry);
25787+ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
25788+ if (!mp_ioapic_data[i]) {
25789+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
25790+ continue;
25791+ }
25792+ memset(mp_ioapic_data[i], 0, size);
25793+ dev = &mp_ioapic_data[i]->dev;
25794+ dev->id = i;
25795+ dev->cls = &ioapic_sysdev_class;
25796+ error = sysdev_register(dev);
25797+ if (error) {
25798+ kfree(mp_ioapic_data[i]);
25799+ mp_ioapic_data[i] = NULL;
25800+ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
25801+ continue;
25802+ }
25803+ }
25804+
25805+ return 0;
25806+}
25807+
25808+device_initcall(ioapic_init_sysfs);
25809+
25810+/* --------------------------------------------------------------------------
25811+ ACPI-based IOAPIC Configuration
25812+ -------------------------------------------------------------------------- */
25813+
25814+#ifdef CONFIG_ACPI
25815+
25816+#define IO_APIC_MAX_ID 0xFE
25817+
25818+int __init io_apic_get_version (int ioapic)
25819+{
25820+ union IO_APIC_reg_01 reg_01;
25821+ unsigned long flags;
25822+
25823+ spin_lock_irqsave(&ioapic_lock, flags);
25824+ reg_01.raw = io_apic_read(ioapic, 1);
25825+ spin_unlock_irqrestore(&ioapic_lock, flags);
25826+
25827+ return reg_01.bits.version;
25828+}
25829+
25830+
25831+int __init io_apic_get_redir_entries (int ioapic)
25832+{
25833+ union IO_APIC_reg_01 reg_01;
25834+ unsigned long flags;
25835+
25836+ spin_lock_irqsave(&ioapic_lock, flags);
25837+ reg_01.raw = io_apic_read(ioapic, 1);
25838+ spin_unlock_irqrestore(&ioapic_lock, flags);
25839+
25840+ return reg_01.bits.entries;
25841+}
25842+
25843+
25844+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
25845+{
25846+ struct IO_APIC_route_entry entry;
25847+ unsigned long flags;
25848+
25849+ if (!IO_APIC_IRQ(irq)) {
25850+ apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
25851+ ioapic);
25852+ return -EINVAL;
25853+ }
25854+
25855+ /*
25856+ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
25857+ * Note that we mask (disable) IRQs now -- these get enabled when the
25858+ * corresponding device driver registers for this IRQ.
25859+ */
25860+
25861+ memset(&entry,0,sizeof(entry));
25862+
25863+ entry.delivery_mode = INT_DELIVERY_MODE;
25864+ entry.dest_mode = INT_DEST_MODE;
25865+ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
25866+ entry.trigger = edge_level;
25867+ entry.polarity = active_high_low;
25868+ entry.mask = 1; /* Disabled (masked) */
25869+
25870+ irq = gsi_irq_sharing(irq);
25871+ /*
25872+ * IRQs < 16 are already in the irq_2_pin[] map
25873+ */
25874+ if (irq >= 16)
25875+ add_pin_to_irq(irq, ioapic, pin);
25876+
25877+ entry.vector = assign_irq_vector(irq);
25878+
25879+ apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
25880+ "IRQ %d Mode:%i Active:%i)\n", ioapic,
25881+ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
25882+ edge_level, active_high_low);
25883+
25884+ ioapic_register_intr(irq, entry.vector, edge_level);
25885+
25886+ if (!ioapic && (irq < 16))
25887+ disable_8259A_irq(irq);
25888+
25889+ spin_lock_irqsave(&ioapic_lock, flags);
25890+ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
25891+ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
25892+ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
25893+ spin_unlock_irqrestore(&ioapic_lock, flags);
25894+
25895+ return 0;
25896+}
25897+
25898+#endif /* CONFIG_ACPI */
25899+
25900+
25901+#ifndef CONFIG_XEN
25902+/*
25903+ * This function currently is only a helper for the i386 smp boot process where
25904+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
25905+ * so mask in all cases should simply be TARGET_CPUS
25906+ */
25907+#ifdef CONFIG_SMP
25908+void __init setup_ioapic_dest(void)
25909+{
25910+ int pin, ioapic, irq, irq_entry;
25911+
25912+ if (skip_ioapic_setup == 1)
25913+ return;
25914+
25915+ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
25916+ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
25917+ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
25918+ if (irq_entry == -1)
25919+ continue;
25920+ irq = pin_2_irq(irq_entry, ioapic, pin);
25921+ set_ioapic_affinity_irq(irq, TARGET_CPUS);
25922+ }
25923+
25924+ }
25925+}
25926+#endif
25927+#endif /* !CONFIG_XEN */
25928Index: head-2008-11-25/arch/x86/kernel/ioport_64-xen.c
25929===================================================================
25930--- /dev/null 1970-01-01 00:00:00.000000000 +0000
25931+++ head-2008-11-25/arch/x86/kernel/ioport_64-xen.c 2008-01-28 12:24:19.000000000 +0100
25932@@ -0,0 +1,100 @@
25933+/*
25934+ * linux/arch/x86_64/kernel/ioport.c
25935+ *
25936+ * This contains the io-permission bitmap code - written by obz, with changes
25937+ * by Linus.
25938+ */
25939+
25940+#include <linux/sched.h>
25941+#include <linux/kernel.h>
25942+#include <linux/capability.h>
25943+#include <linux/errno.h>
25944+#include <linux/types.h>
25945+#include <linux/ioport.h>
25946+#include <linux/mm.h>
25947+#include <linux/smp.h>
25948+#include <linux/smp_lock.h>
25949+#include <linux/stddef.h>
25950+#include <linux/slab.h>
25951+#include <linux/thread_info.h>
25952+#include <xen/interface/physdev.h>
25953+
25954+/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
25955+static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
25956+{
25957+ int i;
25958+
25959+ if (new_value)
25960+ for (i = base; i < base + extent; i++)
25961+ __set_bit(i, bitmap);
25962+ else
25963+ for (i = base; i < base + extent; i++)
25964+ clear_bit(i, bitmap);
25965+}
25966+
25967+/*
25968+ * this changes the io permissions bitmap in the current task.
25969+ */
25970+asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
25971+{
25972+ struct thread_struct * t = &current->thread;
25973+ unsigned long *bitmap;
25974+ struct physdev_set_iobitmap set_iobitmap;
25975+
25976+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
25977+ return -EINVAL;
25978+ if (turn_on && !capable(CAP_SYS_RAWIO))
25979+ return -EPERM;
25980+
25981+ /*
25982+ * If it's the first ioperm() call in this thread's lifetime, set the
25983+ * IO bitmap up. ioperm() is much less timing critical than clone(),
25984+ * this is why we delay this operation until now:
25985+ */
25986+ if (!t->io_bitmap_ptr) {
25987+ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
25988+ if (!bitmap)
25989+ return -ENOMEM;
25990+
25991+ memset(bitmap, 0xff, IO_BITMAP_BYTES);
25992+ t->io_bitmap_ptr = bitmap;
25993+
25994+ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
25995+ set_iobitmap.nr_ports = IO_BITMAP_BITS;
25996+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
25997+ &set_iobitmap));
25998+ }
25999+
26000+ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
26001+
26002+ return 0;
26003+}
26004+
26005+/*
26006+ * sys_iopl has to be used when you want to access the IO ports
26007+ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
26008+ * you'd need 8kB of bitmaps/process, which is a bit excessive.
26009+ *
26010+ */
26011+
26012+asmlinkage long sys_iopl(unsigned int new_iopl, struct pt_regs *regs)
26013+{
26014+ unsigned int old_iopl = current->thread.iopl;
26015+ struct physdev_set_iopl set_iopl;
26016+
26017+ if (new_iopl > 3)
26018+ return -EINVAL;
26019+
26020+ /* Need "raw I/O" privileges for direct port access. */
26021+ if ((new_iopl > old_iopl) && !capable(CAP_SYS_RAWIO))
26022+ return -EPERM;
26023+
26024+ /* Change our version of the privilege levels. */
26025+ current->thread.iopl = new_iopl;
26026+
26027+ /* Force the change at ring 0. */
26028+ set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
26029+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
26030+
26031+ return 0;
26032+}
26033Index: head-2008-11-25/arch/x86/kernel/irq_64-xen.c
26034===================================================================
26035--- /dev/null 1970-01-01 00:00:00.000000000 +0000
26036+++ head-2008-11-25/arch/x86/kernel/irq_64-xen.c 2008-10-29 09:55:56.000000000 +0100
26037@@ -0,0 +1,197 @@
26038+/*
26039+ * linux/arch/x86_64/kernel/irq.c
26040+ *
26041+ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
26042+ *
26043+ * This file contains the lowest level x86_64-specific interrupt
26044+ * entry and irq statistics code. All the remaining irq logic is
26045+ * done by the generic kernel/irq/ code and in the
26046+ * x86_64-specific irq controller code. (e.g. i8259.c and
26047+ * io_apic.c.)
26048+ */
26049+
26050+#include <linux/kernel_stat.h>
26051+#include <linux/interrupt.h>
26052+#include <linux/seq_file.h>
26053+#include <linux/module.h>
26054+#include <linux/delay.h>
26055+#include <asm/uaccess.h>
26056+#include <asm/io_apic.h>
26057+#include <asm/idle.h>
26058+
26059+atomic_t irq_err_count;
26060+#ifdef CONFIG_X86_IO_APIC
26061+#ifdef APIC_MISMATCH_DEBUG
26062+atomic_t irq_mis_count;
26063+#endif
26064+#endif
26065+
26066+#ifdef CONFIG_DEBUG_STACKOVERFLOW
26067+/*
26068+ * Probabilistic stack overflow check:
26069+ *
26070+ * Only check the stack in process context, because everything else
26071+ * runs on the big interrupt stacks. Checking reliably is too expensive,
26072+ * so we just check from interrupts.
26073+ */
26074+static inline void stack_overflow_check(struct pt_regs *regs)
26075+{
26076+ u64 curbase = (u64) current->thread_info;
26077+ static unsigned long warned = -60*HZ;
26078+
26079+ if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
26080+ regs->rsp < curbase + sizeof(struct thread_info) + 128 &&
26081+ time_after(jiffies, warned + 60*HZ)) {
26082+ printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
26083+ current->comm, curbase, regs->rsp);
26084+ show_stack(NULL,NULL);
26085+ warned = jiffies;
26086+ }
26087+}
26088+#endif
26089+
26090+/*
26091+ * Generic, controller-independent functions:
26092+ */
26093+
26094+int show_interrupts(struct seq_file *p, void *v)
26095+{
26096+ int i = *(loff_t *) v, j;
26097+ struct irqaction * action;
26098+ unsigned long flags;
26099+
26100+ if (i == 0) {
26101+ seq_printf(p, " ");
26102+ for_each_online_cpu(j)
26103+ seq_printf(p, "CPU%-8d",j);
26104+ seq_putc(p, '\n');
26105+ }
26106+
26107+ if (i < NR_IRQS) {
26108+ spin_lock_irqsave(&irq_desc[i].lock, flags);
26109+ action = irq_desc[i].action;
26110+ if (!action)
26111+ goto skip;
26112+ seq_printf(p, "%3d: ",i);
26113+#ifndef CONFIG_SMP
26114+ seq_printf(p, "%10u ", kstat_irqs(i));
26115+#else
26116+ for_each_online_cpu(j)
26117+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
26118+#endif
26119+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
26120+
26121+ seq_printf(p, " %s", action->name);
26122+ for (action=action->next; action; action = action->next)
26123+ seq_printf(p, ", %s", action->name);
26124+ seq_putc(p, '\n');
26125+skip:
26126+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
26127+ } else if (i == NR_IRQS) {
26128+ seq_printf(p, "NMI: ");
26129+ for_each_online_cpu(j)
26130+ seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
26131+ seq_putc(p, '\n');
26132+#ifdef CONFIG_X86_LOCAL_APIC
26133+ seq_printf(p, "LOC: ");
26134+ for_each_online_cpu(j)
26135+ seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
26136+ seq_putc(p, '\n');
26137+#endif
26138+ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
26139+#ifdef CONFIG_X86_IO_APIC
26140+#ifdef APIC_MISMATCH_DEBUG
26141+ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
26142+#endif
26143+#endif
26144+ }
26145+ return 0;
26146+}
26147+
26148+/*
26149+ * do_IRQ handles all normal device IRQ's (the special
26150+ * SMP cross-CPU interrupts have their own specific
26151+ * handlers).
26152+ */
26153+asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
26154+{
26155+ /* high bit used in ret_from_ code */
26156+ unsigned irq = ~regs->orig_rax;
26157+
26158+ if (unlikely(irq >= NR_IRQS)) {
26159+ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
26160+ __FUNCTION__, irq);
26161+ BUG();
26162+ }
26163+
26164+ /*exit_idle();*/
26165+ /*irq_enter();*/
26166+#ifdef CONFIG_DEBUG_STACKOVERFLOW
26167+ stack_overflow_check(regs);
26168+#endif
26169+ __do_IRQ(irq, regs);
26170+ /*irq_exit();*/
26171+
26172+ return 1;
26173+}
26174+
26175+#ifdef CONFIG_HOTPLUG_CPU
26176+void fixup_irqs(cpumask_t map)
26177+{
26178+ unsigned int irq;
26179+ static int warned;
26180+
26181+ for (irq = 0; irq < NR_IRQS; irq++) {
26182+ cpumask_t mask;
26183+ if (irq == 2)
26184+ continue;
26185+
26186+ cpus_and(mask, irq_desc[irq].affinity, map);
26187+ if (any_online_cpu(mask) == NR_CPUS) {
26188+ /*printk("Breaking affinity for irq %i\n", irq);*/
26189+ mask = map;
26190+ }
26191+ if (irq_desc[irq].chip->set_affinity)
26192+ irq_desc[irq].chip->set_affinity(irq, mask);
26193+ else if (irq_desc[irq].action && !(warned++))
26194+ printk("Cannot set affinity for irq %i\n", irq);
26195+ }
26196+
26197+ /* That doesn't seem sufficient. Give it 1ms. */
26198+ local_irq_enable();
26199+ mdelay(1);
26200+ local_irq_disable();
26201+}
26202+#endif
26203+
26204+extern void call_softirq(void);
26205+
26206+asmlinkage void do_softirq(void)
26207+{
26208+ __u32 pending;
26209+ unsigned long flags;
26210+
26211+ if (in_interrupt())
26212+ return;
26213+
26214+ local_irq_save(flags);
26215+ pending = local_softirq_pending();
26216+ /* Switch to interrupt stack */
26217+ if (pending) {
26218+ call_softirq();
26219+ WARN_ON_ONCE(softirq_count());
26220+ }
26221+ local_irq_restore(flags);
26222+}
26223+EXPORT_SYMBOL(do_softirq);
26224+
26225+#ifndef CONFIG_X86_LOCAL_APIC
26226+/*
26227+ * 'what should we do if we get a hw irq event on an illegal vector'.
26228+ * each architecture has to answer this themselves.
26229+ */
26230+void ack_bad_irq(unsigned int irq)
26231+{
26232+ printk("unexpected IRQ trap at vector %02x\n", irq);
26233+}
26234+#endif
26235Index: head-2008-11-25/arch/x86/kernel/ldt_64-xen.c
26236===================================================================
26237--- /dev/null 1970-01-01 00:00:00.000000000 +0000
26238+++ head-2008-11-25/arch/x86/kernel/ldt_64-xen.c 2007-06-12 13:13:01.000000000 +0200
26239@@ -0,0 +1,282 @@
26240+/*
26241+ * linux/arch/x86_64/kernel/ldt.c
26242+ *
26243+ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
26244+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
26245+ * Copyright (C) 2002 Andi Kleen
26246+ *
26247+ * This handles calls from both 32bit and 64bit mode.
26248+ */
26249+
26250+#include <linux/errno.h>
26251+#include <linux/sched.h>
26252+#include <linux/string.h>
26253+#include <linux/mm.h>
26254+#include <linux/smp.h>
26255+#include <linux/smp_lock.h>
26256+#include <linux/vmalloc.h>
26257+#include <linux/slab.h>
26258+
26259+#include <asm/uaccess.h>
26260+#include <asm/system.h>
26261+#include <asm/ldt.h>
26262+#include <asm/desc.h>
26263+#include <asm/proto.h>
26264+#include <asm/pgalloc.h>
26265+
26266+#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
26267+static void flush_ldt(void *null)
26268+{
26269+ if (current->active_mm)
26270+ load_LDT(&current->active_mm->context);
26271+}
26272+#endif
26273+
26274+static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
26275+{
26276+ void *oldldt;
26277+ void *newldt;
26278+ unsigned oldsize;
26279+
26280+ if (mincount <= (unsigned)pc->size)
26281+ return 0;
26282+ oldsize = pc->size;
26283+ mincount = (mincount+511)&(~511);
26284+ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
26285+ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
26286+ else
26287+ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
26288+
26289+ if (!newldt)
26290+ return -ENOMEM;
26291+
26292+ if (oldsize)
26293+ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
26294+ oldldt = pc->ldt;
26295+ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
26296+ wmb();
26297+ pc->ldt = newldt;
26298+ wmb();
26299+ pc->size = mincount;
26300+ wmb();
26301+ if (reload) {
26302+#ifdef CONFIG_SMP
26303+ cpumask_t mask;
26304+
26305+ preempt_disable();
26306+#endif
26307+ make_pages_readonly(
26308+ pc->ldt,
26309+ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
26310+ XENFEAT_writable_descriptor_tables);
26311+ load_LDT(pc);
26312+#ifdef CONFIG_SMP
26313+ mask = cpumask_of_cpu(smp_processor_id());
26314+ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
26315+ smp_call_function(flush_ldt, NULL, 1, 1);
26316+ preempt_enable();
26317+#endif
26318+ }
26319+ if (oldsize) {
26320+ make_pages_writable(
26321+ oldldt,
26322+ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
26323+ XENFEAT_writable_descriptor_tables);
26324+ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
26325+ vfree(oldldt);
26326+ else
26327+ kfree(oldldt);
26328+ }
26329+ return 0;
26330+}
26331+
26332+static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
26333+{
26334+ int err = alloc_ldt(new, old->size, 0);
26335+ if (err < 0)
26336+ return err;
26337+ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
26338+ make_pages_readonly(
26339+ new->ldt,
26340+ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
26341+ XENFEAT_writable_descriptor_tables);
26342+ return 0;
26343+}
26344+
26345+/*
26346+ * we do not have to muck with descriptors here, that is
26347+ * done in switch_mm() as needed.
26348+ */
26349+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
26350+{
26351+ struct mm_struct * old_mm;
26352+ int retval = 0;
26353+
26354+ memset(&mm->context, 0, sizeof(mm->context));
26355+ init_MUTEX(&mm->context.sem);
26356+ old_mm = current->mm;
26357+ if (old_mm && old_mm->context.size > 0) {
26358+ down(&old_mm->context.sem);
26359+ retval = copy_ldt(&mm->context, &old_mm->context);
26360+ up(&old_mm->context.sem);
26361+ }
26362+ if (retval == 0) {
26363+ spin_lock(&mm_unpinned_lock);
26364+ list_add(&mm->context.unpinned, &mm_unpinned);
26365+ spin_unlock(&mm_unpinned_lock);
26366+ }
26367+ return retval;
26368+}
26369+
26370+/*
26371+ *
26372+ * Don't touch the LDT register - we're already in the next thread.
26373+ */
26374+void destroy_context(struct mm_struct *mm)
26375+{
26376+ if (mm->context.size) {
26377+ if (mm == current->active_mm)
26378+ clear_LDT();
26379+ make_pages_writable(
26380+ mm->context.ldt,
26381+ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
26382+ XENFEAT_writable_descriptor_tables);
26383+ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
26384+ vfree(mm->context.ldt);
26385+ else
26386+ kfree(mm->context.ldt);
26387+ mm->context.size = 0;
26388+ }
26389+ if (!mm->context.pinned) {
26390+ spin_lock(&mm_unpinned_lock);
26391+ list_del(&mm->context.unpinned);
26392+ spin_unlock(&mm_unpinned_lock);
26393+ }
26394+}
26395+
26396+static int read_ldt(void __user * ptr, unsigned long bytecount)
26397+{
26398+ int err;
26399+ unsigned long size;
26400+ struct mm_struct * mm = current->mm;
26401+
26402+ if (!mm->context.size)
26403+ return 0;
26404+ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
26405+ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
26406+
26407+ down(&mm->context.sem);
26408+ size = mm->context.size*LDT_ENTRY_SIZE;
26409+ if (size > bytecount)
26410+ size = bytecount;
26411+
26412+ err = 0;
26413+ if (copy_to_user(ptr, mm->context.ldt, size))
26414+ err = -EFAULT;
26415+ up(&mm->context.sem);
26416+ if (err < 0)
26417+ goto error_return;
26418+ if (size != bytecount) {
26419+ /* zero-fill the rest */
26420+ if (clear_user(ptr+size, bytecount-size) != 0) {
26421+ err = -EFAULT;
26422+ goto error_return;
26423+ }
26424+ }
26425+ return bytecount;
26426+error_return:
26427+ return err;
26428+}
26429+
26430+static int read_default_ldt(void __user * ptr, unsigned long bytecount)
26431+{
26432+ /* Arbitrary number */
26433+ /* x86-64 default LDT is all zeros */
26434+ if (bytecount > 128)
26435+ bytecount = 128;
26436+ if (clear_user(ptr, bytecount))
26437+ return -EFAULT;
26438+ return bytecount;
26439+}
26440+
26441+static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
26442+{
26443+ struct task_struct *me = current;
26444+ struct mm_struct * mm = me->mm;
26445+ __u32 entry_1, entry_2, *lp;
26446+ unsigned long mach_lp;
26447+ int error;
26448+ struct user_desc ldt_info;
26449+
26450+ error = -EINVAL;
26451+
26452+ if (bytecount != sizeof(ldt_info))
26453+ goto out;
26454+ error = -EFAULT;
26455+ if (copy_from_user(&ldt_info, ptr, bytecount))
26456+ goto out;
26457+
26458+ error = -EINVAL;
26459+ if (ldt_info.entry_number >= LDT_ENTRIES)
26460+ goto out;
26461+ if (ldt_info.contents == 3) {
26462+ if (oldmode)
26463+ goto out;
26464+ if (ldt_info.seg_not_present == 0)
26465+ goto out;
26466+ }
26467+
26468+ down(&mm->context.sem);
26469+ if (ldt_info.entry_number >= (unsigned)mm->context.size) {
26470+ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
26471+ if (error < 0)
26472+ goto out_unlock;
26473+ }
26474+
26475+ lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
26476+ mach_lp = arbitrary_virt_to_machine(lp);
26477+
26478+ /* Allow LDTs to be cleared by the user. */
26479+ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
26480+ if (oldmode || LDT_empty(&ldt_info)) {
26481+ entry_1 = 0;
26482+ entry_2 = 0;
26483+ goto install;
26484+ }
26485+ }
26486+
26487+ entry_1 = LDT_entry_a(&ldt_info);
26488+ entry_2 = LDT_entry_b(&ldt_info);
26489+ if (oldmode)
26490+ entry_2 &= ~(1 << 20);
26491+
26492+ /* Install the new entry ... */
26493+install:
26494+ error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
26495+
26496+out_unlock:
26497+ up(&mm->context.sem);
26498+out:
26499+ return error;
26500+}
26501+
26502+asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
26503+{
26504+ int ret = -ENOSYS;
26505+
26506+ switch (func) {
26507+ case 0:
26508+ ret = read_ldt(ptr, bytecount);
26509+ break;
26510+ case 1:
26511+ ret = write_ldt(ptr, bytecount, 1);
26512+ break;
26513+ case 2:
26514+ ret = read_default_ldt(ptr, bytecount);
26515+ break;
26516+ case 0x11:
26517+ ret = write_ldt(ptr, bytecount, 0);
26518+ break;
26519+ }
26520+ return ret;
26521+}
26522Index: head-2008-11-25/arch/x86/kernel/mpparse_64-xen.c
26523===================================================================
26524--- /dev/null 1970-01-01 00:00:00.000000000 +0000
26525+++ head-2008-11-25/arch/x86/kernel/mpparse_64-xen.c 2007-06-12 13:13:01.000000000 +0200
26526@@ -0,0 +1,1011 @@
26527+/*
26528+ * Intel Multiprocessor Specification 1.1 and 1.4
26529+ * compliant MP-table parsing routines.
26530+ *
26531+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
26532+ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
26533+ *
26534+ * Fixes
26535+ * Erich Boleyn : MP v1.4 and additional changes.
26536+ * Alan Cox : Added EBDA scanning
26537+ * Ingo Molnar : various cleanups and rewrites
26538+ * Maciej W. Rozycki: Bits for default MP configurations
26539+ * Paul Diefenbaugh: Added full ACPI support
26540+ */
26541+
26542+#include <linux/mm.h>
26543+#include <linux/init.h>
26544+#include <linux/delay.h>
26545+#include <linux/bootmem.h>
26546+#include <linux/smp_lock.h>
26547+#include <linux/kernel_stat.h>
26548+#include <linux/mc146818rtc.h>
26549+#include <linux/acpi.h>
26550+#include <linux/module.h>
26551+
26552+#include <asm/smp.h>
26553+#include <asm/mtrr.h>
26554+#include <asm/mpspec.h>
26555+#include <asm/pgalloc.h>
26556+#include <asm/io_apic.h>
26557+#include <asm/proto.h>
26558+#include <asm/acpi.h>
26559+
26560+/* Have we found an MP table */
26561+int smp_found_config;
26562+unsigned int __initdata maxcpus = NR_CPUS;
26563+
26564+int acpi_found_madt;
26565+
26566+/*
26567+ * Various Linux-internal data structures created from the
26568+ * MP-table.
26569+ */
26570+unsigned char apic_version [MAX_APICS];
26571+unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
26572+int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
26573+
26574+static int mp_current_pci_id = 0;
26575+/* I/O APIC entries */
26576+struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
26577+
26578+/* # of MP IRQ source entries */
26579+struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
26580+
26581+/* MP IRQ source entries */
26582+int mp_irq_entries;
26583+
26584+int nr_ioapics;
26585+int pic_mode;
26586+unsigned long mp_lapic_addr = 0;
26587+
26588+
26589+
26590+/* Processor that is doing the boot up */
26591+unsigned int boot_cpu_id = -1U;
26592+/* Internal processor count */
26593+unsigned int num_processors __initdata = 0;
26594+
26595+unsigned disabled_cpus __initdata;
26596+
26597+/* Bitmask of physically existing CPUs */
26598+physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
26599+
26600+/* ACPI MADT entry parsing functions */
26601+#ifdef CONFIG_ACPI
26602+extern struct acpi_boot_flags acpi_boot;
26603+#ifdef CONFIG_X86_LOCAL_APIC
26604+extern int acpi_parse_lapic (acpi_table_entry_header *header);
26605+extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
26606+extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
26607+#endif /*CONFIG_X86_LOCAL_APIC*/
26608+#ifdef CONFIG_X86_IO_APIC
26609+extern int acpi_parse_ioapic (acpi_table_entry_header *header);
26610+#endif /*CONFIG_X86_IO_APIC*/
26611+#endif /*CONFIG_ACPI*/
26612+
26613+u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
26614+
26615+
26616+/*
26617+ * Intel MP BIOS table parsing routines:
26618+ */
26619+
26620+/*
26621+ * Checksum an MP configuration block.
26622+ */
26623+
26624+static int __init mpf_checksum(unsigned char *mp, int len)
26625+{
26626+ int sum = 0;
26627+
26628+ while (len--)
26629+ sum += *mp++;
26630+
26631+ return sum & 0xFF;
26632+}
26633+
26634+#ifndef CONFIG_XEN
26635+static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
26636+{
26637+ int cpu;
26638+ unsigned char ver;
26639+ cpumask_t tmp_map;
26640+
26641+ if (!(m->mpc_cpuflag & CPU_ENABLED)) {
26642+ disabled_cpus++;
26643+ return;
26644+ }
26645+
26646+ printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
26647+ m->mpc_apicid,
26648+ (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
26649+ (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
26650+ m->mpc_apicver);
26651+
26652+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
26653+ Dprintk(" Bootup CPU\n");
26654+ boot_cpu_id = m->mpc_apicid;
26655+ }
26656+ if (num_processors >= NR_CPUS) {
26657+ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
26658+ " Processor ignored.\n", NR_CPUS);
26659+ return;
26660+ }
26661+
26662+ num_processors++;
26663+ cpus_complement(tmp_map, cpu_present_map);
26664+ cpu = first_cpu(tmp_map);
26665+
26666+#if MAX_APICS < 255
26667+ if ((int)m->mpc_apicid > MAX_APICS) {
26668+ printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
26669+ m->mpc_apicid, MAX_APICS);
26670+ return;
26671+ }
26672+#endif
26673+ ver = m->mpc_apicver;
26674+
26675+ physid_set(m->mpc_apicid, phys_cpu_present_map);
26676+ /*
26677+ * Validate version
26678+ */
26679+ if (ver == 0x0) {
26680+ printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
26681+ ver = 0x10;
26682+ }
26683+ apic_version[m->mpc_apicid] = ver;
26684+ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
26685+ /*
26686+ * bios_cpu_apicid is required to have processors listed
26687+ * in same order as logical cpu numbers. Hence the first
26688+ * entry is BSP, and so on.
26689+ */
26690+ cpu = 0;
26691+ }
26692+ bios_cpu_apicid[cpu] = m->mpc_apicid;
26693+ x86_cpu_to_apicid[cpu] = m->mpc_apicid;
26694+
26695+ cpu_set(cpu, cpu_possible_map);
26696+ cpu_set(cpu, cpu_present_map);
26697+}
26698+#else
26699+static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
26700+{
26701+ num_processors++;
26702+}
26703+#endif /* CONFIG_XEN */
26704+
26705+static void __init MP_bus_info (struct mpc_config_bus *m)
26706+{
26707+ char str[7];
26708+
26709+ memcpy(str, m->mpc_bustype, 6);
26710+ str[6] = 0;
26711+ Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
26712+
26713+ if (strncmp(str, "ISA", 3) == 0) {
26714+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
26715+ } else if (strncmp(str, "EISA", 4) == 0) {
26716+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
26717+ } else if (strncmp(str, "PCI", 3) == 0) {
26718+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
26719+ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
26720+ mp_current_pci_id++;
26721+ } else if (strncmp(str, "MCA", 3) == 0) {
26722+ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
26723+ } else {
26724+ printk(KERN_ERR "Unknown bustype %s\n", str);
26725+ }
26726+}
26727+
26728+static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
26729+{
26730+ if (!(m->mpc_flags & MPC_APIC_USABLE))
26731+ return;
26732+
26733+ printk("I/O APIC #%d Version %d at 0x%X.\n",
26734+ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
26735+ if (nr_ioapics >= MAX_IO_APICS) {
26736+ printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
26737+ MAX_IO_APICS, nr_ioapics);
26738+ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
26739+ }
26740+ if (!m->mpc_apicaddr) {
26741+ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
26742+ " found in MP table, skipping!\n");
26743+ return;
26744+ }
26745+ mp_ioapics[nr_ioapics] = *m;
26746+ nr_ioapics++;
26747+}
26748+
26749+static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
26750+{
26751+ mp_irqs [mp_irq_entries] = *m;
26752+ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
26753+ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
26754+ m->mpc_irqtype, m->mpc_irqflag & 3,
26755+ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
26756+ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
26757+ if (++mp_irq_entries >= MAX_IRQ_SOURCES)
26758+ panic("Max # of irq sources exceeded!!\n");
26759+}
26760+
26761+static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
26762+{
26763+ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
26764+ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
26765+ m->mpc_irqtype, m->mpc_irqflag & 3,
26766+ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
26767+ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
26768+ /*
26769+ * Well it seems all SMP boards in existence
26770+ * use ExtINT/LVT1 == LINT0 and
26771+ * NMI/LVT2 == LINT1 - the following check
26772+ * will show us if this assumptions is false.
26773+ * Until then we do not have to add baggage.
26774+ */
26775+ if ((m->mpc_irqtype == mp_ExtINT) &&
26776+ (m->mpc_destapiclint != 0))
26777+ BUG();
26778+ if ((m->mpc_irqtype == mp_NMI) &&
26779+ (m->mpc_destapiclint != 1))
26780+ BUG();
26781+}
26782+
26783+/*
26784+ * Read/parse the MPC
26785+ */
26786+
26787+static int __init smp_read_mpc(struct mp_config_table *mpc)
26788+{
26789+ char str[16];
26790+ int count=sizeof(*mpc);
26791+ unsigned char *mpt=((unsigned char *)mpc)+count;
26792+
26793+ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
26794+ printk("SMP mptable: bad signature [%c%c%c%c]!\n",
26795+ mpc->mpc_signature[0],
26796+ mpc->mpc_signature[1],
26797+ mpc->mpc_signature[2],
26798+ mpc->mpc_signature[3]);
26799+ return 0;
26800+ }
26801+ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
26802+ printk("SMP mptable: checksum error!\n");
26803+ return 0;
26804+ }
26805+ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
26806+ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
26807+ mpc->mpc_spec);
26808+ return 0;
26809+ }
26810+ if (!mpc->mpc_lapic) {
26811+ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
26812+ return 0;
26813+ }
26814+ memcpy(str,mpc->mpc_oem,8);
26815+ str[8]=0;
26816+ printk(KERN_INFO "OEM ID: %s ",str);
26817+
26818+ memcpy(str,mpc->mpc_productid,12);
26819+ str[12]=0;
26820+ printk("Product ID: %s ",str);
26821+
26822+ printk("APIC at: 0x%X\n",mpc->mpc_lapic);
26823+
26824+ /* save the local APIC address, it might be non-default */
26825+ if (!acpi_lapic)
26826+ mp_lapic_addr = mpc->mpc_lapic;
26827+
26828+ /*
26829+ * Now process the configuration blocks.
26830+ */
26831+ while (count < mpc->mpc_length) {
26832+ switch(*mpt) {
26833+ case MP_PROCESSOR:
26834+ {
26835+ struct mpc_config_processor *m=
26836+ (struct mpc_config_processor *)mpt;
26837+ if (!acpi_lapic)
26838+ MP_processor_info(m);
26839+ mpt += sizeof(*m);
26840+ count += sizeof(*m);
26841+ break;
26842+ }
26843+ case MP_BUS:
26844+ {
26845+ struct mpc_config_bus *m=
26846+ (struct mpc_config_bus *)mpt;
26847+ MP_bus_info(m);
26848+ mpt += sizeof(*m);
26849+ count += sizeof(*m);
26850+ break;
26851+ }
26852+ case MP_IOAPIC:
26853+ {
26854+ struct mpc_config_ioapic *m=
26855+ (struct mpc_config_ioapic *)mpt;
26856+ MP_ioapic_info(m);
26857+ mpt+=sizeof(*m);
26858+ count+=sizeof(*m);
26859+ break;
26860+ }
26861+ case MP_INTSRC:
26862+ {
26863+ struct mpc_config_intsrc *m=
26864+ (struct mpc_config_intsrc *)mpt;
26865+
26866+ MP_intsrc_info(m);
26867+ mpt+=sizeof(*m);
26868+ count+=sizeof(*m);
26869+ break;
26870+ }
26871+ case MP_LINTSRC:
26872+ {
26873+ struct mpc_config_lintsrc *m=
26874+ (struct mpc_config_lintsrc *)mpt;
26875+ MP_lintsrc_info(m);
26876+ mpt+=sizeof(*m);
26877+ count+=sizeof(*m);
26878+ break;
26879+ }
26880+ }
26881+ }
26882+ clustered_apic_check();
26883+ if (!num_processors)
26884+ printk(KERN_ERR "SMP mptable: no processors registered!\n");
26885+ return num_processors;
26886+}
26887+
26888+static int __init ELCR_trigger(unsigned int irq)
26889+{
26890+ unsigned int port;
26891+
26892+ port = 0x4d0 + (irq >> 3);
26893+ return (inb(port) >> (irq & 7)) & 1;
26894+}
26895+
26896+static void __init construct_default_ioirq_mptable(int mpc_default_type)
26897+{
26898+ struct mpc_config_intsrc intsrc;
26899+ int i;
26900+ int ELCR_fallback = 0;
26901+
26902+ intsrc.mpc_type = MP_INTSRC;
26903+ intsrc.mpc_irqflag = 0; /* conforming */
26904+ intsrc.mpc_srcbus = 0;
26905+ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
26906+
26907+ intsrc.mpc_irqtype = mp_INT;
26908+
26909+ /*
26910+ * If true, we have an ISA/PCI system with no IRQ entries
26911+ * in the MP table. To prevent the PCI interrupts from being set up
26912+ * incorrectly, we try to use the ELCR. The sanity check to see if
26913+ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
26914+ * never be level sensitive, so we simply see if the ELCR agrees.
26915+ * If it does, we assume it's valid.
26916+ */
26917+ if (mpc_default_type == 5) {
26918+ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
26919+
26920+ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
26921+ printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
26922+ else {
26923+ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
26924+ ELCR_fallback = 1;
26925+ }
26926+ }
26927+
26928+ for (i = 0; i < 16; i++) {
26929+ switch (mpc_default_type) {
26930+ case 2:
26931+ if (i == 0 || i == 13)
26932+ continue; /* IRQ0 & IRQ13 not connected */
26933+ /* fall through */
26934+ default:
26935+ if (i == 2)
26936+ continue; /* IRQ2 is never connected */
26937+ }
26938+
26939+ if (ELCR_fallback) {
26940+ /*
26941+ * If the ELCR indicates a level-sensitive interrupt, we
26942+ * copy that information over to the MP table in the
26943+ * irqflag field (level sensitive, active high polarity).
26944+ */
26945+ if (ELCR_trigger(i))
26946+ intsrc.mpc_irqflag = 13;
26947+ else
26948+ intsrc.mpc_irqflag = 0;
26949+ }
26950+
26951+ intsrc.mpc_srcbusirq = i;
26952+ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
26953+ MP_intsrc_info(&intsrc);
26954+ }
26955+
26956+ intsrc.mpc_irqtype = mp_ExtINT;
26957+ intsrc.mpc_srcbusirq = 0;
26958+ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
26959+ MP_intsrc_info(&intsrc);
26960+}
26961+
26962+static inline void __init construct_default_ISA_mptable(int mpc_default_type)
26963+{
26964+ struct mpc_config_processor processor;
26965+ struct mpc_config_bus bus;
26966+ struct mpc_config_ioapic ioapic;
26967+ struct mpc_config_lintsrc lintsrc;
26968+ int linttypes[2] = { mp_ExtINT, mp_NMI };
26969+ int i;
26970+
26971+ /*
26972+ * local APIC has default address
26973+ */
26974+ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
26975+
26976+ /*
26977+ * 2 CPUs, numbered 0 & 1.
26978+ */
26979+ processor.mpc_type = MP_PROCESSOR;
26980+ /* Either an integrated APIC or a discrete 82489DX. */
26981+ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
26982+ processor.mpc_cpuflag = CPU_ENABLED;
26983+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
26984+ (boot_cpu_data.x86_model << 4) |
26985+ boot_cpu_data.x86_mask;
26986+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
26987+ processor.mpc_reserved[0] = 0;
26988+ processor.mpc_reserved[1] = 0;
26989+ for (i = 0; i < 2; i++) {
26990+ processor.mpc_apicid = i;
26991+ MP_processor_info(&processor);
26992+ }
26993+
26994+ bus.mpc_type = MP_BUS;
26995+ bus.mpc_busid = 0;
26996+ switch (mpc_default_type) {
26997+ default:
26998+ printk(KERN_ERR "???\nUnknown standard configuration %d\n",
26999+ mpc_default_type);
27000+ /* fall through */
27001+ case 1:
27002+ case 5:
27003+ memcpy(bus.mpc_bustype, "ISA ", 6);
27004+ break;
27005+ case 2:
27006+ case 6:
27007+ case 3:
27008+ memcpy(bus.mpc_bustype, "EISA ", 6);
27009+ break;
27010+ case 4:
27011+ case 7:
27012+ memcpy(bus.mpc_bustype, "MCA ", 6);
27013+ }
27014+ MP_bus_info(&bus);
27015+ if (mpc_default_type > 4) {
27016+ bus.mpc_busid = 1;
27017+ memcpy(bus.mpc_bustype, "PCI ", 6);
27018+ MP_bus_info(&bus);
27019+ }
27020+
27021+ ioapic.mpc_type = MP_IOAPIC;
27022+ ioapic.mpc_apicid = 2;
27023+ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
27024+ ioapic.mpc_flags = MPC_APIC_USABLE;
27025+ ioapic.mpc_apicaddr = 0xFEC00000;
27026+ MP_ioapic_info(&ioapic);
27027+
27028+ /*
27029+ * We set up most of the low 16 IO-APIC pins according to MPS rules.
27030+ */
27031+ construct_default_ioirq_mptable(mpc_default_type);
27032+
27033+ lintsrc.mpc_type = MP_LINTSRC;
27034+ lintsrc.mpc_irqflag = 0; /* conforming */
27035+ lintsrc.mpc_srcbusid = 0;
27036+ lintsrc.mpc_srcbusirq = 0;
27037+ lintsrc.mpc_destapic = MP_APIC_ALL;
27038+ for (i = 0; i < 2; i++) {
27039+ lintsrc.mpc_irqtype = linttypes[i];
27040+ lintsrc.mpc_destapiclint = i;
27041+ MP_lintsrc_info(&lintsrc);
27042+ }
27043+}
27044+
27045+static struct intel_mp_floating *mpf_found;
27046+
27047+/*
27048+ * Scan the memory blocks for an SMP configuration block.
27049+ */
27050+void __init get_smp_config (void)
27051+{
27052+ struct intel_mp_floating *mpf = mpf_found;
27053+
27054+ /*
27055+ * ACPI supports both logical (e.g. Hyper-Threading) and physical
27056+ * processors, where MPS only supports physical.
27057+ */
27058+ if (acpi_lapic && acpi_ioapic) {
27059+ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
27060+ return;
27061+ }
27062+ else if (acpi_lapic)
27063+ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
27064+
27065+ printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
27066+ if (mpf->mpf_feature2 & (1<<7)) {
27067+ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
27068+ pic_mode = 1;
27069+ } else {
27070+ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
27071+ pic_mode = 0;
27072+ }
27073+
27074+ /*
27075+ * Now see if we need to read further.
27076+ */
27077+ if (mpf->mpf_feature1 != 0) {
27078+
27079+ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
27080+ construct_default_ISA_mptable(mpf->mpf_feature1);
27081+
27082+ } else if (mpf->mpf_physptr) {
27083+
27084+ /*
27085+ * Read the physical hardware table. Anything here will
27086+ * override the defaults.
27087+ */
27088+ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
27089+ smp_found_config = 0;
27090+ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
27091+ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
27092+ return;
27093+ }
27094+ /*
27095+ * If there are no explicit MP IRQ entries, then we are
27096+ * broken. We set up most of the low 16 IO-APIC pins to
27097+ * ISA defaults and hope it will work.
27098+ */
27099+ if (!mp_irq_entries) {
27100+ struct mpc_config_bus bus;
27101+
27102+ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
27103+
27104+ bus.mpc_type = MP_BUS;
27105+ bus.mpc_busid = 0;
27106+ memcpy(bus.mpc_bustype, "ISA ", 6);
27107+ MP_bus_info(&bus);
27108+
27109+ construct_default_ioirq_mptable(0);
27110+ }
27111+
27112+ } else
27113+ BUG();
27114+
27115+ printk(KERN_INFO "Processors: %d\n", num_processors);
27116+ /*
27117+ * Only use the first configuration found.
27118+ */
27119+}
27120+
27121+static int __init smp_scan_config (unsigned long base, unsigned long length)
27122+{
27123+ extern void __bad_mpf_size(void);
27124+ unsigned int *bp = isa_bus_to_virt(base);
27125+ struct intel_mp_floating *mpf;
27126+
27127+ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
27128+ if (sizeof(*mpf) != 16)
27129+ __bad_mpf_size();
27130+
27131+ while (length > 0) {
27132+ mpf = (struct intel_mp_floating *)bp;
27133+ if ((*bp == SMP_MAGIC_IDENT) &&
27134+ (mpf->mpf_length == 1) &&
27135+ !mpf_checksum((unsigned char *)bp, 16) &&
27136+ ((mpf->mpf_specification == 1)
27137+ || (mpf->mpf_specification == 4)) ) {
27138+
27139+ smp_found_config = 1;
27140+ mpf_found = mpf;
27141+ return 1;
27142+ }
27143+ bp += 4;
27144+ length -= 16;
27145+ }
27146+ return 0;
27147+}
27148+
27149+void __init find_intel_smp (void)
27150+{
27151+ unsigned int address;
27152+
27153+ /*
27154+ * FIXME: Linux assumes you have 640K of base ram..
27155+ * this continues the error...
27156+ *
27157+ * 1) Scan the bottom 1K for a signature
27158+ * 2) Scan the top 1K of base RAM
27159+ * 3) Scan the 64K of bios
27160+ */
27161+ if (smp_scan_config(0x0,0x400) ||
27162+ smp_scan_config(639*0x400,0x400) ||
27163+ smp_scan_config(0xF0000,0x10000))
27164+ return;
27165+ /*
27166+ * If it is an SMP machine we should know now, unless the
27167+ * configuration is in an EISA/MCA bus machine with an
27168+ * extended bios data area.
27169+ *
27170+ * there is a real-mode segmented pointer pointing to the
27171+ * 4K EBDA area at 0x40E, calculate and scan it here.
27172+ *
27173+ * NOTE! There are Linux loaders that will corrupt the EBDA
27174+ * area, and as such this kind of SMP config may be less
27175+ * trustworthy, simply because the SMP table may have been
27176+ * stomped on during early boot. These loaders are buggy and
27177+ * should be fixed.
27178+ */
27179+
27180+ address = *(unsigned short *)phys_to_virt(0x40E);
27181+ address <<= 4;
27182+ if (smp_scan_config(address, 0x1000))
27183+ return;
27184+
27185+ /* If we have come this far, we did not find an MP table */
27186+ printk(KERN_INFO "No mptable found.\n");
27187+}
27188+
27189+/*
27190+ * - Intel MP Configuration Table
27191+ */
27192+void __init find_smp_config (void)
27193+{
27194+#ifdef CONFIG_X86_LOCAL_APIC
27195+ find_intel_smp();
27196+#endif
27197+}
27198+
27199+
27200+/* --------------------------------------------------------------------------
27201+ ACPI-based MP Configuration
27202+ -------------------------------------------------------------------------- */
27203+
27204+#ifdef CONFIG_ACPI
27205+
27206+void __init mp_register_lapic_address (
27207+ u64 address)
27208+{
27209+#ifndef CONFIG_XEN
27210+ mp_lapic_addr = (unsigned long) address;
27211+
27212+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
27213+
27214+ if (boot_cpu_id == -1U)
27215+ boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
27216+
27217+ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
27218+#endif
27219+}
27220+
27221+
27222+void __cpuinit mp_register_lapic (
27223+ u8 id,
27224+ u8 enabled)
27225+{
27226+ struct mpc_config_processor processor;
27227+ int boot_cpu = 0;
27228+
27229+ if (id >= MAX_APICS) {
27230+ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
27231+ id, MAX_APICS);
27232+ return;
27233+ }
27234+
27235+ if (id == boot_cpu_physical_apicid)
27236+ boot_cpu = 1;
27237+
27238+#ifndef CONFIG_XEN
27239+ processor.mpc_type = MP_PROCESSOR;
27240+ processor.mpc_apicid = id;
27241+ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
27242+ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
27243+ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
27244+ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
27245+ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
27246+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
27247+ processor.mpc_reserved[0] = 0;
27248+ processor.mpc_reserved[1] = 0;
27249+#endif
27250+
27251+ MP_processor_info(&processor);
27252+}
27253+
27254+#ifdef CONFIG_X86_IO_APIC
27255+
27256+#define MP_ISA_BUS 0
27257+#define MP_MAX_IOAPIC_PIN 127
27258+
27259+static struct mp_ioapic_routing {
27260+ int apic_id;
27261+ int gsi_start;
27262+ int gsi_end;
27263+ u32 pin_programmed[4];
27264+} mp_ioapic_routing[MAX_IO_APICS];
27265+
27266+
27267+static int mp_find_ioapic (
27268+ int gsi)
27269+{
27270+ int i = 0;
27271+
27272+ /* Find the IOAPIC that manages this GSI. */
27273+ for (i = 0; i < nr_ioapics; i++) {
27274+ if ((gsi >= mp_ioapic_routing[i].gsi_start)
27275+ && (gsi <= mp_ioapic_routing[i].gsi_end))
27276+ return i;
27277+ }
27278+
27279+ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
27280+
27281+ return -1;
27282+}
27283+
27284+
27285+void __init mp_register_ioapic (
27286+ u8 id,
27287+ u32 address,
27288+ u32 gsi_base)
27289+{
27290+ int idx = 0;
27291+
27292+ if (nr_ioapics >= MAX_IO_APICS) {
27293+ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
27294+ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
27295+ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
27296+ }
27297+ if (!address) {
27298+ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
27299+ " found in MADT table, skipping!\n");
27300+ return;
27301+ }
27302+
27303+ idx = nr_ioapics++;
27304+
27305+ mp_ioapics[idx].mpc_type = MP_IOAPIC;
27306+ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
27307+ mp_ioapics[idx].mpc_apicaddr = address;
27308+
27309+#ifndef CONFIG_XEN
27310+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
27311+#endif
27312+ mp_ioapics[idx].mpc_apicid = id;
27313+ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
27314+
27315+ /*
27316+ * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
27317+ * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
27318+ */
27319+ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
27320+ mp_ioapic_routing[idx].gsi_start = gsi_base;
27321+ mp_ioapic_routing[idx].gsi_end = gsi_base +
27322+ io_apic_get_redir_entries(idx);
27323+
27324+ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
27325+ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
27326+ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
27327+ mp_ioapic_routing[idx].gsi_start,
27328+ mp_ioapic_routing[idx].gsi_end);
27329+
27330+ return;
27331+}
27332+
27333+
27334+void __init mp_override_legacy_irq (
27335+ u8 bus_irq,
27336+ u8 polarity,
27337+ u8 trigger,
27338+ u32 gsi)
27339+{
27340+ struct mpc_config_intsrc intsrc;
27341+ int ioapic = -1;
27342+ int pin = -1;
27343+
27344+ /*
27345+ * Convert 'gsi' to 'ioapic.pin'.
27346+ */
27347+ ioapic = mp_find_ioapic(gsi);
27348+ if (ioapic < 0)
27349+ return;
27350+ pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
27351+
27352+ /*
27353+ * TBD: This check is for faulty timer entries, where the override
27354+ * erroneously sets the trigger to level, resulting in a HUGE
27355+ * increase of timer interrupts!
27356+ */
27357+ if ((bus_irq == 0) && (trigger == 3))
27358+ trigger = 1;
27359+
27360+ intsrc.mpc_type = MP_INTSRC;
27361+ intsrc.mpc_irqtype = mp_INT;
27362+ intsrc.mpc_irqflag = (trigger << 2) | polarity;
27363+ intsrc.mpc_srcbus = MP_ISA_BUS;
27364+ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
27365+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
27366+ intsrc.mpc_dstirq = pin; /* INTIN# */
27367+
27368+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
27369+ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
27370+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
27371+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
27372+
27373+ mp_irqs[mp_irq_entries] = intsrc;
27374+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
27375+ panic("Max # of irq sources exceeded!\n");
27376+
27377+ return;
27378+}
27379+
27380+
27381+void __init mp_config_acpi_legacy_irqs (void)
27382+{
27383+ struct mpc_config_intsrc intsrc;
27384+ int i = 0;
27385+ int ioapic = -1;
27386+
27387+ /*
27388+ * Fabricate the legacy ISA bus (bus #31).
27389+ */
27390+ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
27391+ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
27392+
27393+ /*
27394+ * Locate the IOAPIC that manages the ISA IRQs (0-15).
27395+ */
27396+ ioapic = mp_find_ioapic(0);
27397+ if (ioapic < 0)
27398+ return;
27399+
27400+ intsrc.mpc_type = MP_INTSRC;
27401+ intsrc.mpc_irqflag = 0; /* Conforming */
27402+ intsrc.mpc_srcbus = MP_ISA_BUS;
27403+ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
27404+
27405+ /*
27406+ * Use the default configuration for the IRQs 0-15. Unless
27407+ * overridden by (MADT) interrupt source override entries.
27408+ */
27409+ for (i = 0; i < 16; i++) {
27410+ int idx;
27411+
27412+ for (idx = 0; idx < mp_irq_entries; idx++) {
27413+ struct mpc_config_intsrc *irq = mp_irqs + idx;
27414+
27415+ /* Do we already have a mapping for this ISA IRQ? */
27416+ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
27417+ break;
27418+
27419+ /* Do we already have a mapping for this IOAPIC pin */
27420+ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
27421+ (irq->mpc_dstirq == i))
27422+ break;
27423+ }
27424+
27425+ if (idx != mp_irq_entries) {
27426+ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
27427+ continue; /* IRQ already used */
27428+ }
27429+
27430+ intsrc.mpc_irqtype = mp_INT;
27431+ intsrc.mpc_srcbusirq = i; /* Identity mapped */
27432+ intsrc.mpc_dstirq = i;
27433+
27434+ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
27435+ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
27436+ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
27437+ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
27438+ intsrc.mpc_dstirq);
27439+
27440+ mp_irqs[mp_irq_entries] = intsrc;
27441+ if (++mp_irq_entries == MAX_IRQ_SOURCES)
27442+ panic("Max # of irq sources exceeded!\n");
27443+ }
27444+
27445+ return;
27446+}
27447+
27448+#define MAX_GSI_NUM 4096
27449+
27450+int mp_register_gsi(u32 gsi, int triggering, int polarity)
27451+{
27452+ int ioapic = -1;
27453+ int ioapic_pin = 0;
27454+ int idx, bit = 0;
27455+ static int pci_irq = 16;
27456+ /*
27457+ * Mapping between Global System Interrupts, which
27458+ * represent all possible interrupts, to the IRQs
27459+ * assigned to actual devices.
27460+ */
27461+ static int gsi_to_irq[MAX_GSI_NUM];
27462+
27463+ if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
27464+ return gsi;
27465+
27466+ /* Don't set up the ACPI SCI because it's already set up */
27467+ if (acpi_fadt.sci_int == gsi)
27468+ return gsi;
27469+
27470+ ioapic = mp_find_ioapic(gsi);
27471+ if (ioapic < 0) {
27472+ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
27473+ return gsi;
27474+ }
27475+
27476+ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
27477+
27478+ /*
27479+ * Avoid pin reprogramming. PRTs typically include entries
27480+ * with redundant pin->gsi mappings (but unique PCI devices);
27481+ * we only program the IOAPIC on the first.
27482+ */
27483+ bit = ioapic_pin % 32;
27484+ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
27485+ if (idx > 3) {
27486+ printk(KERN_ERR "Invalid reference to IOAPIC pin "
27487+ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
27488+ ioapic_pin);
27489+ return gsi;
27490+ }
27491+ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
27492+ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
27493+ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
27494+ return gsi_to_irq[gsi];
27495+ }
27496+
27497+ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
27498+
27499+ if (triggering == ACPI_LEVEL_SENSITIVE) {
27500+ /*
27501+ * For PCI devices assign IRQs in order, avoiding gaps
27502+ * due to unused I/O APIC pins.
27503+ */
27504+ int irq = gsi;
27505+ if (gsi < MAX_GSI_NUM) {
27506+ /*
27507+ * Retain the VIA chipset work-around (gsi > 15), but
27508+ * avoid a problem where the 8254 timer (IRQ0) is setup
27509+ * via an override (so it's not on pin 0 of the ioapic),
27510+ * and at the same time, the pin 0 interrupt is a PCI
27511+ * type. The gsi > 15 test could cause these two pins
27512+ * to be shared as IRQ0, and they are not shareable.
27513+ * So test for this condition, and if necessary, avoid
27514+ * the pin collision.
27515+ */
27516+ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
27517+ gsi = pci_irq++;
27518+ /*
27519+ * Don't assign IRQ used by ACPI SCI
27520+ */
27521+ if (gsi == acpi_fadt.sci_int)
27522+ gsi = pci_irq++;
27523+ gsi_to_irq[irq] = gsi;
27524+ } else {
27525+ printk(KERN_ERR "GSI %u is too high\n", gsi);
27526+ return gsi;
27527+ }
27528+ }
27529+
27530+ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
27531+ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
27532+ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
27533+ return gsi;
27534+}
27535+
27536+#endif /*CONFIG_X86_IO_APIC*/
27537+#endif /*CONFIG_ACPI*/
27538Index: head-2008-11-25/arch/x86/kernel/process_64-xen.c
27539===================================================================
27540--- /dev/null 1970-01-01 00:00:00.000000000 +0000
27541+++ head-2008-11-25/arch/x86/kernel/process_64-xen.c 2008-08-07 12:44:36.000000000 +0200
27542@@ -0,0 +1,848 @@
27543+/*
27544+ * linux/arch/x86-64/kernel/process.c
27545+ *
27546+ * Copyright (C) 1995 Linus Torvalds
27547+ *
27548+ * Pentium III FXSR, SSE support
27549+ * Gareth Hughes <gareth@valinux.com>, May 2000
27550+ *
27551+ * X86-64 port
27552+ * Andi Kleen.
27553+ *
27554+ * CPU hotplug support - ashok.raj@intel.com
27555+ *
27556+ * Jun Nakajima <jun.nakajima@intel.com>
27557+ * Modified for Xen
27558+ */
27559+
27560+/*
27561+ * This file handles the architecture-dependent parts of process handling..
27562+ */
27563+
27564+#include <stdarg.h>
27565+
27566+#include <linux/cpu.h>
27567+#include <linux/errno.h>
27568+#include <linux/sched.h>
27569+#include <linux/kernel.h>
27570+#include <linux/mm.h>
27571+#include <linux/elfcore.h>
27572+#include <linux/smp.h>
27573+#include <linux/slab.h>
27574+#include <linux/user.h>
27575+#include <linux/module.h>
27576+#include <linux/a.out.h>
27577+#include <linux/interrupt.h>
27578+#include <linux/delay.h>
27579+#include <linux/ptrace.h>
27580+#include <linux/utsname.h>
27581+#include <linux/random.h>
27582+#include <linux/notifier.h>
27583+#include <linux/kprobes.h>
27584+
27585+#include <asm/uaccess.h>
27586+#include <asm/pgtable.h>
27587+#include <asm/system.h>
27588+#include <asm/io.h>
27589+#include <asm/processor.h>
27590+#include <asm/i387.h>
27591+#include <asm/mmu_context.h>
27592+#include <asm/pda.h>
27593+#include <asm/prctl.h>
27594+#include <asm/kdebug.h>
27595+#include <xen/interface/platform.h>
27596+#include <xen/interface/physdev.h>
27597+#include <xen/interface/vcpu.h>
27598+#include <asm/desc.h>
27599+#include <asm/proto.h>
27600+#include <asm/hardirq.h>
27601+#include <asm/ia32.h>
27602+#include <asm/idle.h>
27603+
27604+#include <xen/cpu_hotplug.h>
27605+
27606+asmlinkage extern void ret_from_fork(void);
27607+
27608+unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
27609+
27610+unsigned long boot_option_idle_override = 0;
27611+EXPORT_SYMBOL(boot_option_idle_override);
27612+
27613+/*
27614+ * Powermanagement idle function, if any..
27615+ */
27616+void (*pm_idle)(void);
27617+EXPORT_SYMBOL(pm_idle);
27618+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
27619+
27620+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
27621+
27622+void idle_notifier_register(struct notifier_block *n)
27623+{
27624+ atomic_notifier_chain_register(&idle_notifier, n);
27625+}
27626+EXPORT_SYMBOL_GPL(idle_notifier_register);
27627+
27628+void idle_notifier_unregister(struct notifier_block *n)
27629+{
27630+ atomic_notifier_chain_unregister(&idle_notifier, n);
27631+}
27632+EXPORT_SYMBOL(idle_notifier_unregister);
27633+
27634+enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
27635+static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
27636+
27637+void enter_idle(void)
27638+{
27639+ __get_cpu_var(idle_state) = CPU_IDLE;
27640+ atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
27641+}
27642+
27643+static void __exit_idle(void)
27644+{
27645+ __get_cpu_var(idle_state) = CPU_NOT_IDLE;
27646+ atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
27647+}
27648+
27649+/* Called from interrupts to signify idle end */
27650+void exit_idle(void)
27651+{
27652+ if (current->pid | read_pda(irqcount))
27653+ return;
27654+ __exit_idle();
27655+}
27656+
27657+/*
27658+ * On SMP it's slightly faster (but much more power-consuming!)
27659+ * to poll the ->need_resched flag instead of waiting for the
27660+ * cross-CPU IPI to arrive. Use this option with caution.
27661+ */
27662+static void poll_idle (void)
27663+{
27664+ local_irq_enable();
27665+
27666+ asm volatile(
27667+ "2:"
27668+ "testl %0,%1;"
27669+ "rep; nop;"
27670+ "je 2b;"
27671+ : :
27672+ "i" (_TIF_NEED_RESCHED),
27673+ "m" (current_thread_info()->flags));
27674+}
27675+
27676+static void xen_idle(void)
27677+{
27678+ local_irq_disable();
27679+
27680+ if (need_resched())
27681+ local_irq_enable();
27682+ else {
27683+ current_thread_info()->status &= ~TS_POLLING;
27684+ smp_mb__after_clear_bit();
27685+ safe_halt();
27686+ current_thread_info()->status |= TS_POLLING;
27687+ }
27688+}
27689+
27690+#ifdef CONFIG_HOTPLUG_CPU
27691+static inline void play_dead(void)
27692+{
27693+ idle_task_exit();
27694+ local_irq_disable();
27695+ cpu_clear(smp_processor_id(), cpu_initialized);
27696+ preempt_enable_no_resched();
27697+ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
27698+ cpu_bringup();
27699+}
27700+#else
27701+static inline void play_dead(void)
27702+{
27703+ BUG();
27704+}
27705+#endif /* CONFIG_HOTPLUG_CPU */
27706+
27707+/*
27708+ * The idle thread. There's no useful work to be
27709+ * done, so just try to conserve power and have a
27710+ * low exit latency (ie sit in a loop waiting for
27711+ * somebody to say that they'd like to reschedule)
27712+ */
27713+void cpu_idle (void)
27714+{
27715+ current_thread_info()->status |= TS_POLLING;
27716+ /* endless idle loop with no priority at all */
27717+ while (1) {
27718+ while (!need_resched()) {
27719+ void (*idle)(void);
27720+
27721+ if (__get_cpu_var(cpu_idle_state))
27722+ __get_cpu_var(cpu_idle_state) = 0;
27723+ rmb();
27724+ idle = xen_idle; /* no alternatives */
27725+ if (cpu_is_offline(smp_processor_id()))
27726+ play_dead();
27727+ enter_idle();
27728+ idle();
27729+ __exit_idle();
27730+ }
27731+
27732+ preempt_enable_no_resched();
27733+ schedule();
27734+ preempt_disable();
27735+ }
27736+}
27737+
27738+void cpu_idle_wait(void)
27739+{
27740+ unsigned int cpu, this_cpu = get_cpu();
27741+ cpumask_t map;
27742+
27743+ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
27744+ put_cpu();
27745+
27746+ cpus_clear(map);
27747+ for_each_online_cpu(cpu) {
27748+ per_cpu(cpu_idle_state, cpu) = 1;
27749+ cpu_set(cpu, map);
27750+ }
27751+
27752+ __get_cpu_var(cpu_idle_state) = 0;
27753+
27754+ wmb();
27755+ do {
27756+ ssleep(1);
27757+ for_each_online_cpu(cpu) {
27758+ if (cpu_isset(cpu, map) &&
27759+ !per_cpu(cpu_idle_state, cpu))
27760+ cpu_clear(cpu, map);
27761+ }
27762+ cpus_and(map, map, cpu_online_map);
27763+ } while (!cpus_empty(map));
27764+}
27765+EXPORT_SYMBOL_GPL(cpu_idle_wait);
27766+
27767+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
27768+{
27769+}
27770+
27771+static int __init idle_setup (char *str)
27772+{
27773+ if (!strncmp(str, "poll", 4)) {
27774+ printk("using polling idle threads.\n");
27775+ pm_idle = poll_idle;
27776+ }
27777+
27778+ boot_option_idle_override = 1;
27779+ return 1;
27780+}
27781+
27782+__setup("idle=", idle_setup);
27783+
27784+/* Prints also some state that isn't saved in the pt_regs */
27785+void __show_regs(struct pt_regs * regs)
27786+{
27787+ unsigned long fs, gs, shadowgs;
27788+ unsigned int fsindex,gsindex;
27789+ unsigned int ds,cs,es;
27790+
27791+ printk("\n");
27792+ print_modules();
27793+ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
27794+ current->pid, current->comm, print_tainted(),
27795+ system_utsname.release,
27796+ (int)strcspn(system_utsname.version, " "),
27797+ system_utsname.version);
27798+ printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
27799+ printk_address(regs->rip);
27800+ printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
27801+ regs->eflags);
27802+ printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
27803+ regs->rax, regs->rbx, regs->rcx);
27804+ printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
27805+ regs->rdx, regs->rsi, regs->rdi);
27806+ printk("RBP: %016lx R08: %016lx R09: %016lx\n",
27807+ regs->rbp, regs->r8, regs->r9);
27808+ printk("R10: %016lx R11: %016lx R12: %016lx\n",
27809+ regs->r10, regs->r11, regs->r12);
27810+ printk("R13: %016lx R14: %016lx R15: %016lx\n",
27811+ regs->r13, regs->r14, regs->r15);
27812+
27813+ asm("mov %%ds,%0" : "=r" (ds));
27814+ asm("mov %%cs,%0" : "=r" (cs));
27815+ asm("mov %%es,%0" : "=r" (es));
27816+ asm("mov %%fs,%0" : "=r" (fsindex));
27817+ asm("mov %%gs,%0" : "=r" (gsindex));
27818+
27819+ rdmsrl(MSR_FS_BASE, fs);
27820+ rdmsrl(MSR_GS_BASE, gs);
27821+ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
27822+
27823+ printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
27824+ fs,fsindex,gs,gsindex,shadowgs);
27825+ printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es);
27826+
27827+}
27828+
27829+void show_regs(struct pt_regs *regs)
27830+{
27831+ printk("CPU %d:", smp_processor_id());
27832+ __show_regs(regs);
27833+ show_trace(NULL, regs, (void *)(regs + 1));
27834+}
27835+
27836+/*
27837+ * Free current thread data structures etc..
27838+ */
27839+void exit_thread(void)
27840+{
27841+ struct task_struct *me = current;
27842+ struct thread_struct *t = &me->thread;
27843+
27844+ if (me->thread.io_bitmap_ptr) {
27845+#ifndef CONFIG_X86_NO_TSS
27846+ struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
27847+#endif
27848+#ifdef CONFIG_XEN
27849+ struct physdev_set_iobitmap iobmp_op;
27850+ memset(&iobmp_op, 0, sizeof(iobmp_op));
27851+#endif
27852+
27853+ kfree(t->io_bitmap_ptr);
27854+ t->io_bitmap_ptr = NULL;
27855+ /*
27856+ * Careful, clear this in the TSS too:
27857+ */
27858+#ifndef CONFIG_X86_NO_TSS
27859+ memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
27860+ put_cpu();
27861+#endif
27862+#ifdef CONFIG_XEN
27863+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
27864+ &iobmp_op));
27865+#endif
27866+ t->io_bitmap_max = 0;
27867+ }
27868+}
27869+
27870+void load_gs_index(unsigned gs)
27871+{
27872+ WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs));
27873+}
27874+
27875+void flush_thread(void)
27876+{
27877+ struct task_struct *tsk = current;
27878+ struct thread_info *t = current_thread_info();
27879+
27880+ if (t->flags & _TIF_ABI_PENDING) {
27881+ t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
27882+ if (t->flags & _TIF_IA32)
27883+ current_thread_info()->status |= TS_COMPAT;
27884+ }
27885+
27886+ tsk->thread.debugreg0 = 0;
27887+ tsk->thread.debugreg1 = 0;
27888+ tsk->thread.debugreg2 = 0;
27889+ tsk->thread.debugreg3 = 0;
27890+ tsk->thread.debugreg6 = 0;
27891+ tsk->thread.debugreg7 = 0;
27892+ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
27893+ /*
27894+ * Forget coprocessor state..
27895+ */
27896+ clear_fpu(tsk);
27897+ clear_used_math();
27898+}
27899+
27900+void release_thread(struct task_struct *dead_task)
27901+{
27902+ if (dead_task->mm) {
27903+ if (dead_task->mm->context.size) {
27904+ printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
27905+ dead_task->comm,
27906+ dead_task->mm->context.ldt,
27907+ dead_task->mm->context.size);
27908+ BUG();
27909+ }
27910+ }
27911+}
27912+
27913+static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
27914+{
27915+ struct user_desc ud = {
27916+ .base_addr = addr,
27917+ .limit = 0xfffff,
27918+ .seg_32bit = 1,
27919+ .limit_in_pages = 1,
27920+ .useable = 1,
27921+ };
27922+ struct n_desc_struct *desc = (void *)t->thread.tls_array;
27923+ desc += tls;
27924+ desc->a = LDT_entry_a(&ud);
27925+ desc->b = LDT_entry_b(&ud);
27926+}
27927+
27928+static inline u32 read_32bit_tls(struct task_struct *t, int tls)
27929+{
27930+ struct desc_struct *desc = (void *)t->thread.tls_array;
27931+ desc += tls;
27932+ return desc->base0 |
27933+ (((u32)desc->base1) << 16) |
27934+ (((u32)desc->base2) << 24);
27935+}
27936+
27937+/*
27938+ * This gets called before we allocate a new thread and copy
27939+ * the current task into it.
27940+ */
27941+void prepare_to_copy(struct task_struct *tsk)
27942+{
27943+ unlazy_fpu(tsk);
27944+}
27945+
27946+int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
27947+ unsigned long unused,
27948+ struct task_struct * p, struct pt_regs * regs)
27949+{
27950+ int err;
27951+ struct pt_regs * childregs;
27952+ struct task_struct *me = current;
27953+
27954+ childregs = ((struct pt_regs *)
27955+ (THREAD_SIZE + task_stack_page(p))) - 1;
27956+ *childregs = *regs;
27957+
27958+ childregs->rax = 0;
27959+ childregs->rsp = rsp;
27960+ if (rsp == ~0UL)
27961+ childregs->rsp = (unsigned long)childregs;
27962+
27963+ p->thread.rsp = (unsigned long) childregs;
27964+ p->thread.rsp0 = (unsigned long) (childregs+1);
27965+ p->thread.userrsp = me->thread.userrsp;
27966+
27967+ set_tsk_thread_flag(p, TIF_FORK);
27968+
27969+ p->thread.fs = me->thread.fs;
27970+ p->thread.gs = me->thread.gs;
27971+
27972+ asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
27973+ asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
27974+ asm("mov %%es,%0" : "=m" (p->thread.es));
27975+ asm("mov %%ds,%0" : "=m" (p->thread.ds));
27976+
27977+ if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
27978+ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
27979+ if (!p->thread.io_bitmap_ptr) {
27980+ p->thread.io_bitmap_max = 0;
27981+ return -ENOMEM;
27982+ }
27983+ memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
27984+ IO_BITMAP_BYTES);
27985+ }
27986+
27987+ /*
27988+ * Set a new TLS for the child thread?
27989+ */
27990+ if (clone_flags & CLONE_SETTLS) {
27991+#ifdef CONFIG_IA32_EMULATION
27992+ if (test_thread_flag(TIF_IA32))
27993+ err = ia32_child_tls(p, childregs);
27994+ else
27995+#endif
27996+ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
27997+ if (err)
27998+ goto out;
27999+ }
28000+ p->thread.iopl = current->thread.iopl;
28001+
28002+ err = 0;
28003+out:
28004+ if (err && p->thread.io_bitmap_ptr) {
28005+ kfree(p->thread.io_bitmap_ptr);
28006+ p->thread.io_bitmap_max = 0;
28007+ }
28008+ return err;
28009+}
28010+
28011+static inline void __save_init_fpu( struct task_struct *tsk )
28012+{
28013+ asm volatile( "rex64 ; fxsave %0 ; fnclex"
28014+ : "=m" (tsk->thread.i387.fxsave));
28015+ tsk->thread_info->status &= ~TS_USEDFPU;
28016+}
28017+
28018+/*
28019+ * switch_to(x,y) should switch tasks from x to y.
28020+ *
28021+ * This could still be optimized:
28022+ * - fold all the options into a flag word and test it with a single test.
28023+ * - could test fs/gs bitsliced
28024+ *
28025+ * Kprobes not supported here. Set the probe on schedule instead.
28026+ */
28027+__kprobes struct task_struct *
28028+__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
28029+{
28030+ struct thread_struct *prev = &prev_p->thread,
28031+ *next = &next_p->thread;
28032+ int cpu = smp_processor_id();
28033+#ifndef CONFIG_X86_NO_TSS
28034+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
28035+#endif
28036+#if CONFIG_XEN_COMPAT > 0x030002
28037+ struct physdev_set_iopl iopl_op;
28038+ struct physdev_set_iobitmap iobmp_op;
28039+#else
28040+ struct physdev_op _pdo[2], *pdo = _pdo;
28041+#define iopl_op pdo->u.set_iopl
28042+#define iobmp_op pdo->u.set_iobitmap
28043+#endif
28044+ multicall_entry_t _mcl[8], *mcl = _mcl;
28045+
28046+ /*
28047+ * This is basically '__unlazy_fpu', except that we queue a
28048+ * multicall to indicate FPU task switch, rather than
28049+ * synchronously trapping to Xen.
28050+ * The AMD workaround requires it to be after DS reload, or
28051+ * after DS has been cleared, which we do in __prepare_arch_switch.
28052+ */
28053+ if (prev_p->thread_info->status & TS_USEDFPU) {
28054+ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
28055+ mcl->op = __HYPERVISOR_fpu_taskswitch;
28056+ mcl->args[0] = 1;
28057+ mcl++;
28058+ }
28059+
28060+ /*
28061+ * Reload esp0, LDT and the page table pointer:
28062+ */
28063+ mcl->op = __HYPERVISOR_stack_switch;
28064+ mcl->args[0] = __KERNEL_DS;
28065+ mcl->args[1] = next->rsp0;
28066+ mcl++;
28067+
28068+ /*
28069+ * Load the per-thread Thread-Local Storage descriptor.
28070+ * This is load_TLS(next, cpu) with multicalls.
28071+ */
28072+#define C(i) do { \
28073+ if (unlikely(next->tls_array[i] != prev->tls_array[i])) { \
28074+ mcl->op = __HYPERVISOR_update_descriptor; \
28075+ mcl->args[0] = virt_to_machine( \
28076+ &cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]); \
28077+ mcl->args[1] = next->tls_array[i]; \
28078+ mcl++; \
28079+ } \
28080+} while (0)
28081+ C(0); C(1); C(2);
28082+#undef C
28083+
28084+ if (unlikely(prev->iopl != next->iopl)) {
28085+ iopl_op.iopl = (next->iopl == 0) ? 1 : next->iopl;
28086+#if CONFIG_XEN_COMPAT > 0x030002
28087+ mcl->op = __HYPERVISOR_physdev_op;
28088+ mcl->args[0] = PHYSDEVOP_set_iopl;
28089+ mcl->args[1] = (unsigned long)&iopl_op;
28090+#else
28091+ mcl->op = __HYPERVISOR_physdev_op_compat;
28092+ pdo->cmd = PHYSDEVOP_set_iopl;
28093+ mcl->args[0] = (unsigned long)pdo++;
28094+#endif
28095+ mcl++;
28096+ }
28097+
28098+ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
28099+ set_xen_guest_handle(iobmp_op.bitmap,
28100+ (char *)next->io_bitmap_ptr);
28101+ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
28102+#if CONFIG_XEN_COMPAT > 0x030002
28103+ mcl->op = __HYPERVISOR_physdev_op;
28104+ mcl->args[0] = PHYSDEVOP_set_iobitmap;
28105+ mcl->args[1] = (unsigned long)&iobmp_op;
28106+#else
28107+ mcl->op = __HYPERVISOR_physdev_op_compat;
28108+ pdo->cmd = PHYSDEVOP_set_iobitmap;
28109+ mcl->args[0] = (unsigned long)pdo++;
28110+#endif
28111+ mcl++;
28112+ }
28113+
28114+#if CONFIG_XEN_COMPAT <= 0x030002
28115+ BUG_ON(pdo > _pdo + ARRAY_SIZE(_pdo));
28116+#endif
28117+ BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl));
28118+ if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL)))
28119+ BUG();
28120+
28121+ /*
28122+ * Switch DS and ES.
28123+ * This won't pick up thread selector changes, but I guess that is ok.
28124+ */
28125+ if (unlikely(next->es))
28126+ loadsegment(es, next->es);
28127+
28128+ if (unlikely(next->ds))
28129+ loadsegment(ds, next->ds);
28130+
28131+ /*
28132+ * Switch FS and GS.
28133+ */
28134+ if (unlikely(next->fsindex))
28135+ loadsegment(fs, next->fsindex);
28136+
28137+ if (next->fs)
28138+ WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs));
28139+
28140+ if (unlikely(next->gsindex))
28141+ load_gs_index(next->gsindex);
28142+
28143+ if (next->gs)
28144+ WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs));
28145+
28146+ /*
28147+ * Switch the PDA context.
28148+ */
28149+ prev->userrsp = read_pda(oldrsp);
28150+ write_pda(oldrsp, next->userrsp);
28151+ write_pda(pcurrent, next_p);
28152+ write_pda(kernelstack,
28153+ task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
28154+
28155+ /*
28156+ * Now maybe reload the debug registers
28157+ */
28158+ if (unlikely(next->debugreg7)) {
28159+ set_debugreg(next->debugreg0, 0);
28160+ set_debugreg(next->debugreg1, 1);
28161+ set_debugreg(next->debugreg2, 2);
28162+ set_debugreg(next->debugreg3, 3);
28163+ /* no 4 and 5 */
28164+ set_debugreg(next->debugreg6, 6);
28165+ set_debugreg(next->debugreg7, 7);
28166+ }
28167+
28168+ return prev_p;
28169+}
28170+
28171+/*
28172+ * sys_execve() executes a new program.
28173+ */
28174+asmlinkage
28175+long sys_execve(char __user *name, char __user * __user *argv,
28176+ char __user * __user *envp, struct pt_regs regs)
28177+{
28178+ long error;
28179+ char * filename;
28180+
28181+ filename = getname(name);
28182+ error = PTR_ERR(filename);
28183+ if (IS_ERR(filename))
28184+ return error;
28185+ error = do_execve(filename, argv, envp, &regs);
28186+ if (error == 0) {
28187+ task_lock(current);
28188+ current->ptrace &= ~PT_DTRACE;
28189+ task_unlock(current);
28190+ }
28191+ putname(filename);
28192+ return error;
28193+}
28194+
28195+void set_personality_64bit(void)
28196+{
28197+ /* inherit personality from parent */
28198+
28199+ /* Make sure to be in 64bit mode */
28200+ clear_thread_flag(TIF_IA32);
28201+
28202+ /* TBD: overwrites user setup. Should have two bits.
28203+ But 64bit processes have always behaved this way,
28204+ so it's not too bad. The main problem is just that
28205+ 32bit childs are affected again. */
28206+ current->personality &= ~READ_IMPLIES_EXEC;
28207+}
28208+
28209+asmlinkage long sys_fork(struct pt_regs *regs)
28210+{
28211+ return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
28212+}
28213+
28214+asmlinkage long
28215+sys_clone(unsigned long clone_flags, unsigned long newsp,
28216+ void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
28217+{
28218+ if (!newsp)
28219+ newsp = regs->rsp;
28220+ return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
28221+}
28222+
28223+/*
28224+ * This is trivial, and on the face of it looks like it
28225+ * could equally well be done in user mode.
28226+ *
28227+ * Not so, for quite unobvious reasons - register pressure.
28228+ * In user mode vfork() cannot have a stack frame, and if
28229+ * done by calling the "clone()" system call directly, you
28230+ * do not have enough call-clobbered registers to hold all
28231+ * the information you need.
28232+ */
28233+asmlinkage long sys_vfork(struct pt_regs *regs)
28234+{
28235+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
28236+ NULL, NULL);
28237+}
28238+
28239+unsigned long get_wchan(struct task_struct *p)
28240+{
28241+ unsigned long stack;
28242+ u64 fp,rip;
28243+ int count = 0;
28244+
28245+ if (!p || p == current || p->state==TASK_RUNNING)
28246+ return 0;
28247+ stack = (unsigned long)task_stack_page(p);
28248+ if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
28249+ return 0;
28250+ fp = *(u64 *)(p->thread.rsp);
28251+ do {
28252+ if (fp < (unsigned long)stack ||
28253+ fp > (unsigned long)stack+THREAD_SIZE)
28254+ return 0;
28255+ rip = *(u64 *)(fp+8);
28256+ if (!in_sched_functions(rip))
28257+ return rip;
28258+ fp = *(u64 *)fp;
28259+ } while (count++ < 16);
28260+ return 0;
28261+}
28262+
28263+long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
28264+{
28265+ int ret = 0;
28266+ int doit = task == current;
28267+ int cpu;
28268+
28269+ switch (code) {
28270+ case ARCH_SET_GS:
28271+ if (addr >= TASK_SIZE_OF(task))
28272+ return -EPERM;
28273+ cpu = get_cpu();
28274+ /* handle small bases via the GDT because that's faster to
28275+ switch. */
28276+ if (addr <= 0xffffffff) {
28277+ set_32bit_tls(task, GS_TLS, addr);
28278+ if (doit) {
28279+ load_TLS(&task->thread, cpu);
28280+ load_gs_index(GS_TLS_SEL);
28281+ }
28282+ task->thread.gsindex = GS_TLS_SEL;
28283+ task->thread.gs = 0;
28284+ } else {
28285+ task->thread.gsindex = 0;
28286+ task->thread.gs = addr;
28287+ if (doit) {
28288+ load_gs_index(0);
28289+ ret = HYPERVISOR_set_segment_base(
28290+ SEGBASE_GS_USER, addr);
28291+ }
28292+ }
28293+ put_cpu();
28294+ break;
28295+ case ARCH_SET_FS:
28296+ /* Not strictly needed for fs, but do it for symmetry
28297+ with gs */
28298+ if (addr >= TASK_SIZE_OF(task))
28299+ return -EPERM;
28300+ cpu = get_cpu();
28301+ /* handle small bases via the GDT because that's faster to
28302+ switch. */
28303+ if (addr <= 0xffffffff) {
28304+ set_32bit_tls(task, FS_TLS, addr);
28305+ if (doit) {
28306+ load_TLS(&task->thread, cpu);
28307+ asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
28308+ }
28309+ task->thread.fsindex = FS_TLS_SEL;
28310+ task->thread.fs = 0;
28311+ } else {
28312+ task->thread.fsindex = 0;
28313+ task->thread.fs = addr;
28314+ if (doit) {
28315+ /* set the selector to 0 to not confuse
28316+ __switch_to */
28317+ asm volatile("movl %0,%%fs" :: "r" (0));
28318+ ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
28319+ addr);
28320+ }
28321+ }
28322+ put_cpu();
28323+ break;
28324+ case ARCH_GET_FS: {
28325+ unsigned long base;
28326+ if (task->thread.fsindex == FS_TLS_SEL)
28327+ base = read_32bit_tls(task, FS_TLS);
28328+ else if (doit)
28329+ rdmsrl(MSR_FS_BASE, base);
28330+ else
28331+ base = task->thread.fs;
28332+ ret = put_user(base, (unsigned long __user *)addr);
28333+ break;
28334+ }
28335+ case ARCH_GET_GS: {
28336+ unsigned long base;
28337+ unsigned gsindex;
28338+ if (task->thread.gsindex == GS_TLS_SEL)
28339+ base = read_32bit_tls(task, GS_TLS);
28340+ else if (doit) {
28341+ asm("movl %%gs,%0" : "=r" (gsindex));
28342+ if (gsindex)
28343+ rdmsrl(MSR_KERNEL_GS_BASE, base);
28344+ else
28345+ base = task->thread.gs;
28346+ }
28347+ else
28348+ base = task->thread.gs;
28349+ ret = put_user(base, (unsigned long __user *)addr);
28350+ break;
28351+ }
28352+
28353+ default:
28354+ ret = -EINVAL;
28355+ break;
28356+ }
28357+
28358+ return ret;
28359+}
28360+
28361+long sys_arch_prctl(int code, unsigned long addr)
28362+{
28363+ return do_arch_prctl(current, code, addr);
28364+}
28365+
28366+/*
28367+ * Capture the user space registers if the task is not running (in user space)
28368+ */
28369+int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
28370+{
28371+ struct pt_regs *pp, ptregs;
28372+
28373+ pp = task_pt_regs(tsk);
28374+
28375+ ptregs = *pp;
28376+ ptregs.cs &= 0xffff;
28377+ ptregs.ss &= 0xffff;
28378+
28379+ elf_core_copy_regs(regs, &ptregs);
28380+
28381+ boot_option_idle_override = 1;
28382+ return 1;
28383+}
28384+
28385+unsigned long arch_align_stack(unsigned long sp)
28386+{
28387+ if (randomize_va_space)
28388+ sp -= get_random_int() % 8192;
28389+ return sp & ~0xf;
28390+}
28391Index: head-2008-11-25/arch/x86/kernel/setup_64-xen.c
28392===================================================================
28393--- /dev/null 1970-01-01 00:00:00.000000000 +0000
28394+++ head-2008-11-25/arch/x86/kernel/setup_64-xen.c 2008-04-22 15:41:51.000000000 +0200
28395@@ -0,0 +1,1652 @@
28396+/*
28397+ * linux/arch/x86-64/kernel/setup.c
28398+ *
28399+ * Copyright (C) 1995 Linus Torvalds
28400+ *
28401+ * Nov 2001 Dave Jones <davej@suse.de>
28402+ * Forked from i386 setup code.
28403+ */
28404+
28405+/*
28406+ * This file handles the architecture-dependent parts of initialization
28407+ */
28408+
28409+#include <linux/errno.h>
28410+#include <linux/sched.h>
28411+#include <linux/kernel.h>
28412+#include <linux/mm.h>
28413+#include <linux/stddef.h>
28414+#include <linux/unistd.h>
28415+#include <linux/ptrace.h>
28416+#include <linux/slab.h>
28417+#include <linux/user.h>
28418+#include <linux/a.out.h>
28419+#include <linux/screen_info.h>
28420+#include <linux/ioport.h>
28421+#include <linux/delay.h>
28422+#include <linux/init.h>
28423+#include <linux/initrd.h>
28424+#include <linux/highmem.h>
28425+#include <linux/bootmem.h>
28426+#include <linux/module.h>
28427+#include <asm/processor.h>
28428+#include <linux/console.h>
28429+#include <linux/seq_file.h>
28430+#include <linux/crash_dump.h>
28431+#include <linux/root_dev.h>
28432+#include <linux/pci.h>
28433+#include <linux/acpi.h>
28434+#include <linux/kallsyms.h>
28435+#include <linux/edd.h>
28436+#include <linux/mmzone.h>
28437+#include <linux/kexec.h>
28438+#include <linux/cpufreq.h>
28439+#include <linux/dmi.h>
28440+#include <linux/dma-mapping.h>
28441+#include <linux/ctype.h>
28442+
28443+#include <asm/mtrr.h>
28444+#include <asm/uaccess.h>
28445+#include <asm/system.h>
28446+#include <asm/io.h>
28447+#include <asm/smp.h>
28448+#include <asm/msr.h>
28449+#include <asm/desc.h>
28450+#include <video/edid.h>
28451+#include <asm/e820.h>
28452+#include <asm/dma.h>
28453+#include <asm/mpspec.h>
28454+#include <asm/mmu_context.h>
28455+#include <asm/bootsetup.h>
28456+#include <asm/proto.h>
28457+#include <asm/setup.h>
28458+#include <asm/mach_apic.h>
28459+#include <asm/numa.h>
28460+#include <asm/sections.h>
28461+#include <asm/dmi.h>
28462+#ifdef CONFIG_XEN
28463+#include <linux/percpu.h>
28464+#include <xen/interface/physdev.h>
28465+#include "setup_arch_pre.h"
28466+#include <asm/hypervisor.h>
28467+#include <xen/interface/nmi.h>
28468+#include <xen/features.h>
28469+#include <xen/firmware.h>
28470+#include <xen/xencons.h>
28471+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
28472+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
28473+#include <asm/mach-xen/setup_arch_post.h>
28474+#include <xen/interface/memory.h>
28475+
28476+#ifdef CONFIG_XEN
28477+#include <xen/interface/kexec.h>
28478+#endif
28479+
28480+extern unsigned long start_pfn;
28481+extern struct edid_info edid_info;
28482+
28483+shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
28484+EXPORT_SYMBOL(HYPERVISOR_shared_info);
28485+
28486+extern char hypercall_page[PAGE_SIZE];
28487+EXPORT_SYMBOL(hypercall_page);
28488+
28489+static int xen_panic_event(struct notifier_block *, unsigned long, void *);
28490+static struct notifier_block xen_panic_block = {
28491+ xen_panic_event, NULL, 0 /* try to go last */
28492+};
28493+
28494+unsigned long *phys_to_machine_mapping;
28495+unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
28496+
28497+EXPORT_SYMBOL(phys_to_machine_mapping);
28498+
28499+DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
28500+DEFINE_PER_CPU(int, nr_multicall_ents);
28501+
28502+/* Raw start-of-day parameters from the hypervisor. */
28503+start_info_t *xen_start_info;
28504+EXPORT_SYMBOL(xen_start_info);
28505+#endif
28506+
28507+/*
28508+ * Machine setup..
28509+ */
28510+
28511+struct cpuinfo_x86 boot_cpu_data __read_mostly;
28512+EXPORT_SYMBOL(boot_cpu_data);
28513+
28514+unsigned long mmu_cr4_features;
28515+
28516+int acpi_disabled;
28517+EXPORT_SYMBOL(acpi_disabled);
28518+#ifdef CONFIG_ACPI
28519+extern int __initdata acpi_ht;
28520+extern acpi_interrupt_flags acpi_sci_flags;
28521+int __initdata acpi_force = 0;
28522+#endif
28523+
28524+int acpi_numa __initdata;
28525+
28526+/* Boot loader ID as an integer, for the benefit of proc_dointvec */
28527+int bootloader_type;
28528+
28529+unsigned long saved_video_mode;
28530+
28531+/*
28532+ * Early DMI memory
28533+ */
28534+int dmi_alloc_index;
28535+char dmi_alloc_data[DMI_MAX_DATA];
28536+
28537+/*
28538+ * Setup options
28539+ */
28540+struct screen_info screen_info;
28541+EXPORT_SYMBOL(screen_info);
28542+struct sys_desc_table_struct {
28543+ unsigned short length;
28544+ unsigned char table[0];
28545+};
28546+
28547+struct edid_info edid_info;
28548+EXPORT_SYMBOL_GPL(edid_info);
28549+struct e820map e820;
28550+#ifdef CONFIG_XEN
28551+struct e820map machine_e820;
28552+#endif
28553+
28554+extern int root_mountflags;
28555+
28556+char command_line[COMMAND_LINE_SIZE];
28557+
28558+struct resource standard_io_resources[] = {
28559+ { .name = "dma1", .start = 0x00, .end = 0x1f,
28560+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
28561+ { .name = "pic1", .start = 0x20, .end = 0x21,
28562+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
28563+ { .name = "timer0", .start = 0x40, .end = 0x43,
28564+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
28565+ { .name = "timer1", .start = 0x50, .end = 0x53,
28566+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
28567+ { .name = "keyboard", .start = 0x60, .end = 0x6f,
28568+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
28569+ { .name = "dma page reg", .start = 0x80, .end = 0x8f,
28570+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
28571+ { .name = "pic2", .start = 0xa0, .end = 0xa1,
28572+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
28573+ { .name = "dma2", .start = 0xc0, .end = 0xdf,
28574+ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
28575+ { .name = "fpu", .start = 0xf0, .end = 0xff,
28576+ .flags = IORESOURCE_BUSY | IORESOURCE_IO }
28577+};
28578+
28579+#define STANDARD_IO_RESOURCES \
28580+ (sizeof standard_io_resources / sizeof standard_io_resources[0])
28581+
28582+#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
28583+
28584+struct resource data_resource = {
28585+ .name = "Kernel data",
28586+ .start = 0,
28587+ .end = 0,
28588+ .flags = IORESOURCE_RAM,
28589+};
28590+struct resource code_resource = {
28591+ .name = "Kernel code",
28592+ .start = 0,
28593+ .end = 0,
28594+ .flags = IORESOURCE_RAM,
28595+};
28596+
28597+#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
28598+
28599+static struct resource system_rom_resource = {
28600+ .name = "System ROM",
28601+ .start = 0xf0000,
28602+ .end = 0xfffff,
28603+ .flags = IORESOURCE_ROM,
28604+};
28605+
28606+static struct resource extension_rom_resource = {
28607+ .name = "Extension ROM",
28608+ .start = 0xe0000,
28609+ .end = 0xeffff,
28610+ .flags = IORESOURCE_ROM,
28611+};
28612+
28613+static struct resource adapter_rom_resources[] = {
28614+ { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
28615+ .flags = IORESOURCE_ROM },
28616+ { .name = "Adapter ROM", .start = 0, .end = 0,
28617+ .flags = IORESOURCE_ROM },
28618+ { .name = "Adapter ROM", .start = 0, .end = 0,
28619+ .flags = IORESOURCE_ROM },
28620+ { .name = "Adapter ROM", .start = 0, .end = 0,
28621+ .flags = IORESOURCE_ROM },
28622+ { .name = "Adapter ROM", .start = 0, .end = 0,
28623+ .flags = IORESOURCE_ROM },
28624+ { .name = "Adapter ROM", .start = 0, .end = 0,
28625+ .flags = IORESOURCE_ROM }
28626+};
28627+
28628+#define ADAPTER_ROM_RESOURCES \
28629+ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
28630+
28631+static struct resource video_rom_resource = {
28632+ .name = "Video ROM",
28633+ .start = 0xc0000,
28634+ .end = 0xc7fff,
28635+ .flags = IORESOURCE_ROM,
28636+};
28637+
28638+static struct resource video_ram_resource = {
28639+ .name = "Video RAM area",
28640+ .start = 0xa0000,
28641+ .end = 0xbffff,
28642+ .flags = IORESOURCE_RAM,
28643+};
28644+
28645+#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
28646+
28647+static int __init romchecksum(unsigned char *rom, unsigned long length)
28648+{
28649+ unsigned char *p, sum = 0;
28650+
28651+ for (p = rom; p < rom + length; p++)
28652+ sum += *p;
28653+ return sum == 0;
28654+}
28655+
28656+static void __init probe_roms(void)
28657+{
28658+ unsigned long start, length, upper;
28659+ unsigned char *rom;
28660+ int i;
28661+
28662+#ifdef CONFIG_XEN
28663+ /* Nothing to do if not running in dom0. */
28664+ if (!is_initial_xendomain())
28665+ return;
28666+#endif
28667+
28668+ /* video rom */
28669+ upper = adapter_rom_resources[0].start;
28670+ for (start = video_rom_resource.start; start < upper; start += 2048) {
28671+ rom = isa_bus_to_virt(start);
28672+ if (!romsignature(rom))
28673+ continue;
28674+
28675+ video_rom_resource.start = start;
28676+
28677+ /* 0 < length <= 0x7f * 512, historically */
28678+ length = rom[2] * 512;
28679+
28680+ /* if checksum okay, trust length byte */
28681+ if (length && romchecksum(rom, length))
28682+ video_rom_resource.end = start + length - 1;
28683+
28684+ request_resource(&iomem_resource, &video_rom_resource);
28685+ break;
28686+ }
28687+
28688+ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
28689+ if (start < upper)
28690+ start = upper;
28691+
28692+ /* system rom */
28693+ request_resource(&iomem_resource, &system_rom_resource);
28694+ upper = system_rom_resource.start;
28695+
28696+ /* check for extension rom (ignore length byte!) */
28697+ rom = isa_bus_to_virt(extension_rom_resource.start);
28698+ if (romsignature(rom)) {
28699+ length = extension_rom_resource.end - extension_rom_resource.start + 1;
28700+ if (romchecksum(rom, length)) {
28701+ request_resource(&iomem_resource, &extension_rom_resource);
28702+ upper = extension_rom_resource.start;
28703+ }
28704+ }
28705+
28706+ /* check for adapter roms on 2k boundaries */
28707+ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
28708+ rom = isa_bus_to_virt(start);
28709+ if (!romsignature(rom))
28710+ continue;
28711+
28712+ /* 0 < length <= 0x7f * 512, historically */
28713+ length = rom[2] * 512;
28714+
28715+ /* but accept any length that fits if checksum okay */
28716+ if (!length || start + length > upper || !romchecksum(rom, length))
28717+ continue;
28718+
28719+ adapter_rom_resources[i].start = start;
28720+ adapter_rom_resources[i].end = start + length - 1;
28721+ request_resource(&iomem_resource, &adapter_rom_resources[i]);
28722+
28723+ start = adapter_rom_resources[i++].end & ~2047UL;
28724+ }
28725+}
28726+
28727+/* Check for full argument with no trailing characters */
28728+static int fullarg(char *p, char *arg)
28729+{
28730+ int l = strlen(arg);
28731+ return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
28732+}
28733+
28734+static __init void parse_cmdline_early (char ** cmdline_p)
28735+{
28736+ char c = ' ', *to = command_line, *from = COMMAND_LINE;
28737+ int len = 0;
28738+ int userdef = 0;
28739+
28740+ for (;;) {
28741+ if (c != ' ')
28742+ goto next_char;
28743+
28744+#ifdef CONFIG_SMP
28745+ /*
28746+ * If the BIOS enumerates physical processors before logical,
28747+ * maxcpus=N at enumeration-time can be used to disable HT.
28748+ */
28749+ else if (!memcmp(from, "maxcpus=", 8)) {
28750+ extern unsigned int maxcpus;
28751+
28752+ maxcpus = simple_strtoul(from + 8, NULL, 0);
28753+ }
28754+#endif
28755+#ifdef CONFIG_ACPI
28756+ /* "acpi=off" disables both ACPI table parsing and interpreter init */
28757+ if (fullarg(from,"acpi=off"))
28758+ disable_acpi();
28759+
28760+ if (fullarg(from, "acpi=force")) {
28761+ /* add later when we do DMI horrors: */
28762+ acpi_force = 1;
28763+ acpi_disabled = 0;
28764+ }
28765+
28766+ /* acpi=ht just means: do ACPI MADT parsing
28767+ at bootup, but don't enable the full ACPI interpreter */
28768+ if (fullarg(from, "acpi=ht")) {
28769+ if (!acpi_force)
28770+ disable_acpi();
28771+ acpi_ht = 1;
28772+ }
28773+ else if (fullarg(from, "pci=noacpi"))
28774+ acpi_disable_pci();
28775+ else if (fullarg(from, "acpi=noirq"))
28776+ acpi_noirq_set();
28777+
28778+ else if (fullarg(from, "acpi_sci=edge"))
28779+ acpi_sci_flags.trigger = 1;
28780+ else if (fullarg(from, "acpi_sci=level"))
28781+ acpi_sci_flags.trigger = 3;
28782+ else if (fullarg(from, "acpi_sci=high"))
28783+ acpi_sci_flags.polarity = 1;
28784+ else if (fullarg(from, "acpi_sci=low"))
28785+ acpi_sci_flags.polarity = 3;
28786+
28787+ /* acpi=strict disables out-of-spec workarounds */
28788+ else if (fullarg(from, "acpi=strict")) {
28789+ acpi_strict = 1;
28790+ }
28791+#ifdef CONFIG_X86_IO_APIC
28792+ else if (fullarg(from, "acpi_skip_timer_override"))
28793+ acpi_skip_timer_override = 1;
28794+#endif
28795+#endif
28796+
28797+#ifndef CONFIG_XEN
28798+ if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
28799+ clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
28800+ disable_apic = 1;
28801+ }
28802+
28803+ if (fullarg(from, "noapic"))
28804+ skip_ioapic_setup = 1;
28805+
28806+ if (fullarg(from,"apic")) {
28807+ skip_ioapic_setup = 0;
28808+ ioapic_force = 1;
28809+ }
28810+#endif
28811+
28812+ if (!memcmp(from, "mem=", 4))
28813+ parse_memopt(from+4, &from);
28814+
28815+ if (!memcmp(from, "memmap=", 7)) {
28816+ /* exactmap option is for used defined memory */
28817+ if (!memcmp(from+7, "exactmap", 8)) {
28818+#ifdef CONFIG_CRASH_DUMP
28819+ /* If we are doing a crash dump, we
28820+ * still need to know the real mem
28821+ * size before original memory map is
28822+ * reset.
28823+ */
28824+ saved_max_pfn = e820_end_of_ram();
28825+#endif
28826+ from += 8+7;
28827+ end_pfn_map = 0;
28828+ e820.nr_map = 0;
28829+ userdef = 1;
28830+ }
28831+ else {
28832+ parse_memmapopt(from+7, &from);
28833+ userdef = 1;
28834+ }
28835+ }
28836+
28837+#ifdef CONFIG_NUMA
28838+ if (!memcmp(from, "numa=", 5))
28839+ numa_setup(from+5);
28840+#endif
28841+
28842+ if (!memcmp(from,"iommu=",6)) {
28843+ iommu_setup(from+6);
28844+ }
28845+
28846+ if (fullarg(from,"oops=panic"))
28847+ panic_on_oops = 1;
28848+
28849+ if (!memcmp(from, "noexec=", 7))
28850+ nonx_setup(from + 7);
28851+
28852+#ifdef CONFIG_KEXEC
28853+ /* crashkernel=size@addr specifies the location to reserve for
28854+ * a crash kernel. By reserving this memory we guarantee
28855+ * that linux never set's it up as a DMA target.
28856+ * Useful for holding code to do something appropriate
28857+ * after a kernel panic.
28858+ */
28859+ else if (!memcmp(from, "crashkernel=", 12)) {
28860+#ifndef CONFIG_XEN
28861+ unsigned long size, base;
28862+ size = memparse(from+12, &from);
28863+ if (*from == '@') {
28864+ base = memparse(from+1, &from);
28865+ /* FIXME: Do I want a sanity check
28866+ * to validate the memory range?
28867+ */
28868+ crashk_res.start = base;
28869+ crashk_res.end = base + size - 1;
28870+ }
28871+#else
28872+ printk("Ignoring crashkernel command line, "
28873+ "parameter will be supplied by xen\n");
28874+#endif
28875+ }
28876+#endif
28877+
28878+#ifdef CONFIG_PROC_VMCORE
28879+ /* elfcorehdr= specifies the location of elf core header
28880+ * stored by the crashed kernel. This option will be passed
28881+ * by kexec loader to the capture kernel.
28882+ */
28883+ else if(!memcmp(from, "elfcorehdr=", 11))
28884+ elfcorehdr_addr = memparse(from+11, &from);
28885+#endif
28886+
28887+#if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
28888+ else if (!memcmp(from, "additional_cpus=", 16))
28889+ setup_additional_cpus(from+16);
28890+#endif
28891+
28892+ next_char:
28893+ c = *(from++);
28894+ if (!c)
28895+ break;
28896+ if (COMMAND_LINE_SIZE <= ++len)
28897+ break;
28898+ *(to++) = c;
28899+ }
28900+ if (userdef) {
28901+ printk(KERN_INFO "user-defined physical RAM map:\n");
28902+ e820_print_map("user");
28903+ }
28904+ *to = '\0';
28905+ *cmdline_p = command_line;
28906+}
28907+
28908+#ifndef CONFIG_NUMA
28909+static void __init
28910+contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
28911+{
28912+ unsigned long bootmap_size, bootmap;
28913+
28914+ bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
28915+ bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
28916+ if (bootmap == -1L)
28917+ panic("Cannot find bootmem map of size %ld\n",bootmap_size);
28918+ bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
28919+#ifdef CONFIG_XEN
28920+ e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
28921+#else
28922+ e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
28923+#endif
28924+ reserve_bootmem(bootmap, bootmap_size);
28925+}
28926+#endif
28927+
28928+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
28929+struct edd edd;
28930+#ifdef CONFIG_EDD_MODULE
28931+EXPORT_SYMBOL(edd);
28932+#endif
28933+#ifndef CONFIG_XEN
28934+/**
28935+ * copy_edd() - Copy the BIOS EDD information
28936+ * from boot_params into a safe place.
28937+ *
28938+ */
28939+static inline void copy_edd(void)
28940+{
28941+ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
28942+ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
28943+ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
28944+ edd.edd_info_nr = EDD_NR;
28945+}
28946+#endif
28947+#else
28948+static inline void copy_edd(void)
28949+{
28950+}
28951+#endif
28952+
28953+#ifndef CONFIG_XEN
28954+#define EBDA_ADDR_POINTER 0x40E
28955+
28956+unsigned __initdata ebda_addr;
28957+unsigned __initdata ebda_size;
28958+
28959+static void discover_ebda(void)
28960+{
28961+ /*
28962+ * there is a real-mode segmented pointer pointing to the
28963+ * 4K EBDA area at 0x40E
28964+ */
28965+ ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
28966+ ebda_addr <<= 4;
28967+
28968+ ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
28969+
28970+ /* Round EBDA up to pages */
28971+ if (ebda_size == 0)
28972+ ebda_size = 1;
28973+ ebda_size <<= 10;
28974+ ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
28975+ if (ebda_size > 64*1024)
28976+ ebda_size = 64*1024;
28977+}
28978+#else
28979+#define discover_ebda() ((void)0)
28980+#endif
28981+
28982+void __init setup_arch(char **cmdline_p)
28983+{
28984+#ifdef CONFIG_XEN
28985+ /* Register a call for panic conditions. */
28986+ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
28987+
28988+ ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
28989+ screen_info = SCREEN_INFO;
28990+
28991+ if (is_initial_xendomain()) {
28992+ const struct dom0_vga_console_info *info =
28993+ (void *)((char *)xen_start_info +
28994+ xen_start_info->console.dom0.info_off);
28995+
28996+ dom0_init_screen_info(info,
28997+ xen_start_info->console.dom0.info_size);
28998+ xen_start_info->console.domU.mfn = 0;
28999+ xen_start_info->console.domU.evtchn = 0;
29000+ } else
29001+ screen_info.orig_video_isVGA = 0;
29002+
29003+ copy_edid();
29004+
29005+ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
29006+ VMASST_TYPE_writable_pagetables));
29007+
29008+ ARCH_SETUP
29009+#else
29010+ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
29011+ screen_info = SCREEN_INFO;
29012+ edid_info = EDID_INFO;
29013+#endif /* !CONFIG_XEN */
29014+ saved_video_mode = SAVED_VIDEO_MODE;
29015+ bootloader_type = LOADER_TYPE;
29016+
29017+#ifdef CONFIG_BLK_DEV_RAM
29018+ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
29019+ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
29020+ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
29021+#endif
29022+ setup_memory_region();
29023+ copy_edd();
29024+
29025+ if (!MOUNT_ROOT_RDONLY)
29026+ root_mountflags &= ~MS_RDONLY;
29027+ init_mm.start_code = (unsigned long) &_text;
29028+ init_mm.end_code = (unsigned long) &_etext;
29029+ init_mm.end_data = (unsigned long) &_edata;
29030+ init_mm.brk = (unsigned long) &_end;
29031+
29032+ code_resource.start = virt_to_phys(&_text);
29033+ code_resource.end = virt_to_phys(&_etext)-1;
29034+ data_resource.start = virt_to_phys(&_etext);
29035+ data_resource.end = virt_to_phys(&_edata)-1;
29036+
29037+ parse_cmdline_early(cmdline_p);
29038+
29039+ early_identify_cpu(&boot_cpu_data);
29040+
29041+ /*
29042+ * partially used pages are not usable - thus
29043+ * we are rounding upwards:
29044+ */
29045+ end_pfn = e820_end_of_ram();
29046+ num_physpages = end_pfn; /* for pfn_valid */
29047+
29048+ check_efer();
29049+
29050+ discover_ebda();
29051+
29052+ init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
29053+
29054+ if (is_initial_xendomain())
29055+ dmi_scan_machine();
29056+
29057+#ifdef CONFIG_ACPI_NUMA
29058+ /*
29059+ * Parse SRAT to discover nodes.
29060+ */
29061+ acpi_numa_init();
29062+#endif
29063+
29064+#ifdef CONFIG_NUMA
29065+ numa_initmem_init(0, end_pfn);
29066+#else
29067+ contig_initmem_init(0, end_pfn);
29068+#endif
29069+
29070+#ifdef CONFIG_XEN
29071+ /*
29072+ * Reserve kernel, physmap, start info, initial page tables, and
29073+ * direct mapping.
29074+ */
29075+ reserve_bootmem_generic(__pa_symbol(&_text),
29076+ (table_end << PAGE_SHIFT) - __pa_symbol(&_text));
29077+#else
29078+ /* Reserve direct mapping */
29079+ reserve_bootmem_generic(table_start << PAGE_SHIFT,
29080+ (table_end - table_start) << PAGE_SHIFT);
29081+
29082+ /* reserve kernel */
29083+ reserve_bootmem_generic(__pa_symbol(&_text),
29084+ __pa_symbol(&_end) - __pa_symbol(&_text));
29085+
29086+ /*
29087+ * reserve physical page 0 - it's a special BIOS page on many boxes,
29088+ * enabling clean reboots, SMP operation, laptop functions.
29089+ */
29090+ reserve_bootmem_generic(0, PAGE_SIZE);
29091+
29092+ /* reserve ebda region */
29093+ if (ebda_addr)
29094+ reserve_bootmem_generic(ebda_addr, ebda_size);
29095+
29096+#ifdef CONFIG_SMP
29097+ /*
29098+ * But first pinch a few for the stack/trampoline stuff
29099+ * FIXME: Don't need the extra page at 4K, but need to fix
29100+ * trampoline before removing it. (see the GDT stuff)
29101+ */
29102+ reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
29103+
29104+ /* Reserve SMP trampoline */
29105+ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
29106+#endif
29107+#endif
29108+
29109+#ifdef CONFIG_ACPI_SLEEP
29110+ /*
29111+ * Reserve low memory region for sleep support.
29112+ */
29113+ acpi_reserve_bootmem();
29114+#endif
29115+#ifdef CONFIG_XEN
29116+#ifdef CONFIG_BLK_DEV_INITRD
29117+ if (xen_start_info->mod_start) {
29118+ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
29119+ /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
29120+ initrd_start = INITRD_START + PAGE_OFFSET;
29121+ initrd_end = initrd_start+INITRD_SIZE;
29122+ initrd_below_start_ok = 1;
29123+ } else {
29124+ printk(KERN_ERR "initrd extends beyond end of memory "
29125+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
29126+ (unsigned long)(INITRD_START + INITRD_SIZE),
29127+ (unsigned long)(end_pfn << PAGE_SHIFT));
29128+ initrd_start = 0;
29129+ }
29130+ }
29131+#endif
29132+#else /* CONFIG_XEN */
29133+#ifdef CONFIG_BLK_DEV_INITRD
29134+ if (LOADER_TYPE && INITRD_START) {
29135+ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
29136+ reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
29137+ initrd_start =
29138+ INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
29139+ initrd_end = initrd_start+INITRD_SIZE;
29140+ }
29141+ else {
29142+ printk(KERN_ERR "initrd extends beyond end of memory "
29143+ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
29144+ (unsigned long)(INITRD_START + INITRD_SIZE),
29145+ (unsigned long)(end_pfn << PAGE_SHIFT));
29146+ initrd_start = 0;
29147+ }
29148+ }
29149+#endif
29150+#endif /* !CONFIG_XEN */
29151+#ifdef CONFIG_KEXEC
29152+#ifdef CONFIG_XEN
29153+ xen_machine_kexec_setup_resources();
29154+#else
29155+ if (crashk_res.start != crashk_res.end) {
29156+ reserve_bootmem_generic(crashk_res.start,
29157+ crashk_res.end - crashk_res.start + 1);
29158+ }
29159+#endif
29160+#endif
29161+
29162+ paging_init();
29163+#ifdef CONFIG_X86_LOCAL_APIC
29164+ /*
29165+ * Find and reserve possible boot-time SMP configuration:
29166+ */
29167+ find_smp_config();
29168+#endif
29169+#ifdef CONFIG_XEN
29170+ {
29171+ int i, j, k, fpp;
29172+ unsigned long p2m_pages;
29173+
29174+ p2m_pages = end_pfn;
29175+ if (xen_start_info->nr_pages > end_pfn) {
29176+ /*
29177+ * the end_pfn was shrunk (probably by mem= or highmem=
29178+ * kernel parameter); shrink reservation with the HV
29179+ */
29180+ struct xen_memory_reservation reservation = {
29181+ .address_bits = 0,
29182+ .extent_order = 0,
29183+ .domid = DOMID_SELF
29184+ };
29185+ unsigned int difference;
29186+ int ret;
29187+
29188+ difference = xen_start_info->nr_pages - end_pfn;
29189+
29190+ set_xen_guest_handle(reservation.extent_start,
29191+ ((unsigned long *)xen_start_info->mfn_list) + end_pfn);
29192+ reservation.nr_extents = difference;
29193+ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
29194+ &reservation);
29195+ BUG_ON (ret != difference);
29196+ }
29197+ else if (end_pfn > xen_start_info->nr_pages)
29198+ p2m_pages = xen_start_info->nr_pages;
29199+
29200+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
29201+ /* Make sure we have a large enough P->M table. */
29202+ phys_to_machine_mapping = alloc_bootmem_pages(
29203+ end_pfn * sizeof(unsigned long));
29204+ memset(phys_to_machine_mapping, ~0,
29205+ end_pfn * sizeof(unsigned long));
29206+ memcpy(phys_to_machine_mapping,
29207+ (unsigned long *)xen_start_info->mfn_list,
29208+ p2m_pages * sizeof(unsigned long));
29209+ free_bootmem(
29210+ __pa(xen_start_info->mfn_list),
29211+ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
29212+ sizeof(unsigned long))));
29213+
29214+ /*
29215+ * Initialise the list of the frames that specify the
29216+ * list of frames that make up the p2m table. Used by
29217+ * save/restore.
29218+ */
29219+ pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
29220+
29221+ fpp = PAGE_SIZE/sizeof(unsigned long);
29222+ for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
29223+ if ((j % fpp) == 0) {
29224+ k++;
29225+ BUG_ON(k>=fpp);
29226+ pfn_to_mfn_frame_list[k] =
29227+ alloc_bootmem_pages(PAGE_SIZE);
29228+ pfn_to_mfn_frame_list_list[k] =
29229+ virt_to_mfn(pfn_to_mfn_frame_list[k]);
29230+ j=0;
29231+ }
29232+ pfn_to_mfn_frame_list[k][j] =
29233+ virt_to_mfn(&phys_to_machine_mapping[i]);
29234+ }
29235+ HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
29236+ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
29237+ virt_to_mfn(pfn_to_mfn_frame_list_list);
29238+ }
29239+
29240+ /* Mark all ISA DMA channels in-use - using them wouldn't work. */
29241+ for (i = 0; i < MAX_DMA_CHANNELS; ++i)
29242+ if (i != 4 && request_dma(i, "xen") != 0)
29243+ BUG();
29244+ }
29245+
29246+ if (!is_initial_xendomain()) {
29247+ acpi_disabled = 1;
29248+#ifdef CONFIG_ACPI
29249+ acpi_ht = 0;
29250+#endif
29251+ }
29252+#endif
29253+
29254+#ifndef CONFIG_XEN
29255+ check_ioapic();
29256+#endif
29257+
29258+ zap_low_mappings(0);
29259+
29260+ /*
29261+ * set this early, so we dont allocate cpu0
29262+ * if MADT list doesnt list BSP first
29263+ * mpparse.c/MP_processor_info() allocates logical cpu numbers.
29264+ */
29265+ cpu_set(0, cpu_present_map);
29266+#ifdef CONFIG_ACPI
29267+ /*
29268+ * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
29269+ * Call this early for SRAT node setup.
29270+ */
29271+ acpi_boot_table_init();
29272+
29273+ /*
29274+ * Read APIC and some other early information from ACPI tables.
29275+ */
29276+ acpi_boot_init();
29277+#endif
29278+
29279+ init_cpu_to_node();
29280+
29281+#ifdef CONFIG_X86_LOCAL_APIC
29282+ /*
29283+ * get boot-time SMP configuration:
29284+ */
29285+ if (smp_found_config)
29286+ get_smp_config();
29287+#ifndef CONFIG_XEN
29288+ init_apic_mappings();
29289+#endif
29290+#endif
29291+#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
29292+ prefill_possible_map();
29293+#endif
29294+
29295+ /*
29296+ * Request address space for all standard RAM and ROM resources
29297+ * and also for regions reported as reserved by the e820.
29298+ */
29299+ probe_roms();
29300+#ifdef CONFIG_XEN
29301+ if (is_initial_xendomain())
29302+ e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
29303+#else
29304+ e820_reserve_resources(e820.map, e820.nr_map);
29305+#endif
29306+
29307+ request_resource(&iomem_resource, &video_ram_resource);
29308+
29309+ {
29310+ unsigned i;
29311+ /* request I/O space for devices used on all i[345]86 PCs */
29312+ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
29313+ request_resource(&ioport_resource, &standard_io_resources[i]);
29314+ }
29315+
29316+#ifdef CONFIG_XEN
29317+ if (is_initial_xendomain())
29318+ e820_setup_gap(machine_e820.map, machine_e820.nr_map);
29319+#else
29320+ e820_setup_gap(e820.map, e820.nr_map);
29321+#endif
29322+
29323+#ifdef CONFIG_XEN
29324+ {
29325+ struct physdev_set_iopl set_iopl;
29326+
29327+ set_iopl.iopl = 1;
29328+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
29329+
29330+ if (is_initial_xendomain()) {
29331+#ifdef CONFIG_VT
29332+#if defined(CONFIG_VGA_CONSOLE)
29333+ conswitchp = &vga_con;
29334+#elif defined(CONFIG_DUMMY_CONSOLE)
29335+ conswitchp = &dummy_con;
29336+#endif
29337+#endif
29338+ } else {
29339+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
29340+ conswitchp = &dummy_con;
29341+#endif
29342+ }
29343+ }
29344+#else /* CONFIG_XEN */
29345+
29346+#ifdef CONFIG_VT
29347+#if defined(CONFIG_VGA_CONSOLE)
29348+ conswitchp = &vga_con;
29349+#elif defined(CONFIG_DUMMY_CONSOLE)
29350+ conswitchp = &dummy_con;
29351+#endif
29352+#endif
29353+
29354+#endif /* !CONFIG_XEN */
29355+}
29356+
29357+#ifdef CONFIG_XEN
29358+static int
29359+xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
29360+{
29361+ HYPERVISOR_shutdown(SHUTDOWN_crash);
29362+ /* we're never actually going to get here... */
29363+ return NOTIFY_DONE;
29364+}
29365+#endif /* !CONFIG_XEN */
29366+
29367+
29368+static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
29369+{
29370+ unsigned int *v;
29371+
29372+ if (c->extended_cpuid_level < 0x80000004)
29373+ return 0;
29374+
29375+ v = (unsigned int *) c->x86_model_id;
29376+ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
29377+ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
29378+ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
29379+ c->x86_model_id[48] = 0;
29380+ return 1;
29381+}
29382+
29383+
29384+static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
29385+{
29386+ unsigned int n, dummy, eax, ebx, ecx, edx;
29387+
29388+ n = c->extended_cpuid_level;
29389+
29390+ if (n >= 0x80000005) {
29391+ cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
29392+ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
29393+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
29394+ c->x86_cache_size=(ecx>>24)+(edx>>24);
29395+ /* On K8 L1 TLB is inclusive, so don't count it */
29396+ c->x86_tlbsize = 0;
29397+ }
29398+
29399+ if (n >= 0x80000006) {
29400+ cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
29401+ ecx = cpuid_ecx(0x80000006);
29402+ c->x86_cache_size = ecx >> 16;
29403+ c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
29404+
29405+ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
29406+ c->x86_cache_size, ecx & 0xFF);
29407+ }
29408+
29409+ if (n >= 0x80000007)
29410+ cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
29411+ if (n >= 0x80000008) {
29412+ cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
29413+ c->x86_virt_bits = (eax >> 8) & 0xff;
29414+ c->x86_phys_bits = eax & 0xff;
29415+ }
29416+}
29417+
29418+#ifdef CONFIG_NUMA
29419+static int nearby_node(int apicid)
29420+{
29421+ int i;
29422+ for (i = apicid - 1; i >= 0; i--) {
29423+ int node = apicid_to_node[i];
29424+ if (node != NUMA_NO_NODE && node_online(node))
29425+ return node;
29426+ }
29427+ for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
29428+ int node = apicid_to_node[i];
29429+ if (node != NUMA_NO_NODE && node_online(node))
29430+ return node;
29431+ }
29432+ return first_node(node_online_map); /* Shouldn't happen */
29433+}
29434+#endif
29435+
29436+/*
29437+ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
29438+ * Assumes number of cores is a power of two.
29439+ */
29440+static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
29441+{
29442+#ifdef CONFIG_SMP
29443+ unsigned bits;
29444+#ifdef CONFIG_NUMA
29445+ int cpu = smp_processor_id();
29446+ int node = 0;
29447+ unsigned apicid = hard_smp_processor_id();
29448+#endif
29449+ unsigned ecx = cpuid_ecx(0x80000008);
29450+
29451+ c->x86_max_cores = (ecx & 0xff) + 1;
29452+
29453+ /* CPU telling us the core id bits shift? */
29454+ bits = (ecx >> 12) & 0xF;
29455+
29456+ /* Otherwise recompute */
29457+ if (bits == 0) {
29458+ while ((1 << bits) < c->x86_max_cores)
29459+ bits++;
29460+ }
29461+
29462+ /* Low order bits define the core id (index of core in socket) */
29463+ c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
29464+ /* Convert the APIC ID into the socket ID */
29465+ c->phys_proc_id = phys_pkg_id(bits);
29466+
29467+#ifdef CONFIG_NUMA
29468+ node = c->phys_proc_id;
29469+ if (apicid_to_node[apicid] != NUMA_NO_NODE)
29470+ node = apicid_to_node[apicid];
29471+ if (!node_online(node)) {
29472+ /* Two possibilities here:
29473+ - The CPU is missing memory and no node was created.
29474+ In that case try picking one from a nearby CPU
29475+ - The APIC IDs differ from the HyperTransport node IDs
29476+ which the K8 northbridge parsing fills in.
29477+ Assume they are all increased by a constant offset,
29478+ but in the same order as the HT nodeids.
29479+ If that doesn't result in a usable node fall back to the
29480+ path for the previous case. */
29481+ int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
29482+ if (ht_nodeid >= 0 &&
29483+ apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
29484+ node = apicid_to_node[ht_nodeid];
29485+ /* Pick a nearby node */
29486+ if (!node_online(node))
29487+ node = nearby_node(apicid);
29488+ }
29489+ numa_set_node(cpu, node);
29490+
29491+ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
29492+#endif
29493+#endif
29494+}
29495+
29496+static void __init init_amd(struct cpuinfo_x86 *c)
29497+{
29498+ unsigned level;
29499+
29500+#ifdef CONFIG_SMP
29501+ unsigned long value;
29502+
29503+ /*
29504+ * Disable TLB flush filter by setting HWCR.FFDIS on K8
29505+ * bit 6 of msr C001_0015
29506+ *
29507+ * Errata 63 for SH-B3 steppings
29508+ * Errata 122 for all steppings (F+ have it disabled by default)
29509+ */
29510+ if (c->x86 == 15) {
29511+ rdmsrl(MSR_K8_HWCR, value);
29512+ value |= 1 << 6;
29513+ wrmsrl(MSR_K8_HWCR, value);
29514+ }
29515+#endif
29516+
29517+ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
29518+ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
29519+ clear_bit(0*32+31, &c->x86_capability);
29520+
29521+ /* On C+ stepping K8 rep microcode works well for copy/memset */
29522+ level = cpuid_eax(1);
29523+ if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
29524+ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
29525+
29526+ /* Enable workaround for FXSAVE leak */
29527+ if (c->x86 >= 6)
29528+ set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
29529+
29530+ level = get_model_name(c);
29531+ if (!level) {
29532+ switch (c->x86) {
29533+ case 15:
29534+ /* Should distinguish Models here, but this is only
29535+ a fallback anyways. */
29536+ strcpy(c->x86_model_id, "Hammer");
29537+ break;
29538+ }
29539+ }
29540+ display_cacheinfo(c);
29541+
29542+ /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
29543+ if (c->x86_power & (1<<8))
29544+ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
29545+
29546+ /* Multi core CPU? */
29547+ if (c->extended_cpuid_level >= 0x80000008)
29548+ amd_detect_cmp(c);
29549+
29550+ /* Fix cpuid4 emulation for more */
29551+ num_cache_leaves = 3;
29552+}
29553+
29554+static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
29555+{
29556+#ifdef CONFIG_SMP
29557+ u32 eax, ebx, ecx, edx;
29558+ int index_msb, core_bits;
29559+
29560+ cpuid(1, &eax, &ebx, &ecx, &edx);
29561+
29562+
29563+ if (!cpu_has(c, X86_FEATURE_HT))
29564+ return;
29565+ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
29566+ goto out;
29567+
29568+ smp_num_siblings = (ebx & 0xff0000) >> 16;
29569+
29570+ if (smp_num_siblings == 1) {
29571+ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
29572+ } else if (smp_num_siblings > 1 ) {
29573+
29574+ if (smp_num_siblings > NR_CPUS) {
29575+ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
29576+ smp_num_siblings = 1;
29577+ return;
29578+ }
29579+
29580+ index_msb = get_count_order(smp_num_siblings);
29581+ c->phys_proc_id = phys_pkg_id(index_msb);
29582+
29583+ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
29584+
29585+ index_msb = get_count_order(smp_num_siblings) ;
29586+
29587+ core_bits = get_count_order(c->x86_max_cores);
29588+
29589+ c->cpu_core_id = phys_pkg_id(index_msb) &
29590+ ((1 << core_bits) - 1);
29591+ }
29592+out:
29593+ if ((c->x86_max_cores * smp_num_siblings) > 1) {
29594+ printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
29595+ printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
29596+ }
29597+
29598+#endif
29599+}
29600+
29601+/*
29602+ * find out the number of processor cores on the die
29603+ */
29604+static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
29605+{
29606+ unsigned int eax, t;
29607+
29608+ if (c->cpuid_level < 4)
29609+ return 1;
29610+
29611+ cpuid_count(4, 0, &eax, &t, &t, &t);
29612+
29613+ if (eax & 0x1f)
29614+ return ((eax >> 26) + 1);
29615+ else
29616+ return 1;
29617+}
29618+
29619+static void srat_detect_node(void)
29620+{
29621+#ifdef CONFIG_NUMA
29622+ unsigned node;
29623+ int cpu = smp_processor_id();
29624+ int apicid = hard_smp_processor_id();
29625+
29626+ /* Don't do the funky fallback heuristics the AMD version employs
29627+ for now. */
29628+ node = apicid_to_node[apicid];
29629+ if (node == NUMA_NO_NODE)
29630+ node = first_node(node_online_map);
29631+ numa_set_node(cpu, node);
29632+
29633+ if (acpi_numa > 0)
29634+ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
29635+#endif
29636+}
29637+
29638+static void __cpuinit init_intel(struct cpuinfo_x86 *c)
29639+{
29640+ /* Cache sizes */
29641+ unsigned n;
29642+
29643+ init_intel_cacheinfo(c);
29644+ if (c->cpuid_level > 9 ) {
29645+ unsigned eax = cpuid_eax(10);
29646+ /* Check for version and the number of counters */
29647+ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
29648+ set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
29649+ }
29650+
29651+ n = c->extended_cpuid_level;
29652+ if (n >= 0x80000008) {
29653+ unsigned eax = cpuid_eax(0x80000008);
29654+ c->x86_virt_bits = (eax >> 8) & 0xff;
29655+ c->x86_phys_bits = eax & 0xff;
29656+ /* CPUID workaround for Intel 0F34 CPU */
29657+ if (c->x86_vendor == X86_VENDOR_INTEL &&
29658+ c->x86 == 0xF && c->x86_model == 0x3 &&
29659+ c->x86_mask == 0x4)
29660+ c->x86_phys_bits = 36;
29661+ }
29662+
29663+ if (c->x86 == 15)
29664+ c->x86_cache_alignment = c->x86_clflush_size * 2;
29665+ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
29666+ (c->x86 == 0x6 && c->x86_model >= 0x0e))
29667+ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
29668+ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
29669+ c->x86_max_cores = intel_num_cpu_cores(c);
29670+
29671+ srat_detect_node();
29672+}
29673+
29674+static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
29675+{
29676+ char *v = c->x86_vendor_id;
29677+
29678+ if (!strcmp(v, "AuthenticAMD"))
29679+ c->x86_vendor = X86_VENDOR_AMD;
29680+ else if (!strcmp(v, "GenuineIntel"))
29681+ c->x86_vendor = X86_VENDOR_INTEL;
29682+ else
29683+ c->x86_vendor = X86_VENDOR_UNKNOWN;
29684+}
29685+
29686+struct cpu_model_info {
29687+ int vendor;
29688+ int family;
29689+ char *model_names[16];
29690+};
29691+
29692+/* Do some early cpuid on the boot CPU to get some parameter that are
29693+ needed before check_bugs. Everything advanced is in identify_cpu
29694+ below. */
29695+void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
29696+{
29697+ u32 tfms;
29698+
29699+ c->loops_per_jiffy = loops_per_jiffy;
29700+ c->x86_cache_size = -1;
29701+ c->x86_vendor = X86_VENDOR_UNKNOWN;
29702+ c->x86_model = c->x86_mask = 0; /* So far unknown... */
29703+ c->x86_vendor_id[0] = '\0'; /* Unset */
29704+ c->x86_model_id[0] = '\0'; /* Unset */
29705+ c->x86_clflush_size = 64;
29706+ c->x86_cache_alignment = c->x86_clflush_size;
29707+ c->x86_max_cores = 1;
29708+ c->extended_cpuid_level = 0;
29709+ memset(&c->x86_capability, 0, sizeof c->x86_capability);
29710+
29711+ /* Get vendor name */
29712+ cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
29713+ (unsigned int *)&c->x86_vendor_id[0],
29714+ (unsigned int *)&c->x86_vendor_id[8],
29715+ (unsigned int *)&c->x86_vendor_id[4]);
29716+
29717+ get_cpu_vendor(c);
29718+
29719+ /* Initialize the standard set of capabilities */
29720+ /* Note that the vendor-specific code below might override */
29721+
29722+ /* Intel-defined flags: level 0x00000001 */
29723+ if (c->cpuid_level >= 0x00000001) {
29724+ __u32 misc;
29725+ cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
29726+ &c->x86_capability[0]);
29727+ c->x86 = (tfms >> 8) & 0xf;
29728+ c->x86_model = (tfms >> 4) & 0xf;
29729+ c->x86_mask = tfms & 0xf;
29730+ if (c->x86 == 0xf)
29731+ c->x86 += (tfms >> 20) & 0xff;
29732+ if (c->x86 >= 0x6)
29733+ c->x86_model += ((tfms >> 16) & 0xF) << 4;
29734+ if (c->x86_capability[0] & (1<<19))
29735+ c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
29736+ } else {
29737+ /* Have CPUID level 0 only - unheard of */
29738+ c->x86 = 4;
29739+ }
29740+
29741+#ifdef CONFIG_SMP
29742+ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
29743+#endif
29744+}
29745+
29746+/*
29747+ * This does the hard work of actually picking apart the CPU stuff...
29748+ */
29749+void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
29750+{
29751+ int i;
29752+ u32 xlvl;
29753+
29754+ early_identify_cpu(c);
29755+
29756+ /* AMD-defined flags: level 0x80000001 */
29757+ xlvl = cpuid_eax(0x80000000);
29758+ c->extended_cpuid_level = xlvl;
29759+ if ((xlvl & 0xffff0000) == 0x80000000) {
29760+ if (xlvl >= 0x80000001) {
29761+ c->x86_capability[1] = cpuid_edx(0x80000001);
29762+ c->x86_capability[6] = cpuid_ecx(0x80000001);
29763+ }
29764+ if (xlvl >= 0x80000004)
29765+ get_model_name(c); /* Default name */
29766+ }
29767+
29768+ /* Transmeta-defined flags: level 0x80860001 */
29769+ xlvl = cpuid_eax(0x80860000);
29770+ if ((xlvl & 0xffff0000) == 0x80860000) {
29771+ /* Don't set x86_cpuid_level here for now to not confuse. */
29772+ if (xlvl >= 0x80860001)
29773+ c->x86_capability[2] = cpuid_edx(0x80860001);
29774+ }
29775+
29776+ c->apicid = phys_pkg_id(0);
29777+
29778+ /*
29779+ * Vendor-specific initialization. In this section we
29780+ * canonicalize the feature flags, meaning if there are
29781+ * features a certain CPU supports which CPUID doesn't
29782+ * tell us, CPUID claiming incorrect flags, or other bugs,
29783+ * we handle them here.
29784+ *
29785+ * At the end of this section, c->x86_capability better
29786+ * indicate the features this CPU genuinely supports!
29787+ */
29788+ switch (c->x86_vendor) {
29789+ case X86_VENDOR_AMD:
29790+ init_amd(c);
29791+ break;
29792+
29793+ case X86_VENDOR_INTEL:
29794+ init_intel(c);
29795+ break;
29796+
29797+ case X86_VENDOR_UNKNOWN:
29798+ default:
29799+ display_cacheinfo(c);
29800+ break;
29801+ }
29802+
29803+ select_idle_routine(c);
29804+ detect_ht(c);
29805+
29806+ /*
29807+ * On SMP, boot_cpu_data holds the common feature set between
29808+ * all CPUs; so make sure that we indicate which features are
29809+ * common between the CPUs. The first time this routine gets
29810+ * executed, c == &boot_cpu_data.
29811+ */
29812+ if (c != &boot_cpu_data) {
29813+ /* AND the already accumulated flags with these */
29814+ for (i = 0 ; i < NCAPINTS ; i++)
29815+ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
29816+ }
29817+
29818+#ifdef CONFIG_X86_MCE
29819+ mcheck_init(c);
29820+#endif
29821+ if (c == &boot_cpu_data)
29822+ mtrr_bp_init();
29823+ else
29824+ mtrr_ap_init();
29825+#ifdef CONFIG_NUMA
29826+ numa_add_cpu(smp_processor_id());
29827+#endif
29828+}
29829+
29830+
29831+void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
29832+{
29833+ if (c->x86_model_id[0])
29834+ printk("%s", c->x86_model_id);
29835+
29836+ if (c->x86_mask || c->cpuid_level >= 0)
29837+ printk(" stepping %02x\n", c->x86_mask);
29838+ else
29839+ printk("\n");
29840+}
29841+
29842+/*
29843+ * Get CPU information for use by the procfs.
29844+ */
29845+
29846+static int show_cpuinfo(struct seq_file *m, void *v)
29847+{
29848+ struct cpuinfo_x86 *c = v;
29849+
29850+ /*
29851+ * These flag bits must match the definitions in <asm/cpufeature.h>.
29852+ * NULL means this bit is undefined or reserved; either way it doesn't
29853+ * have meaning as far as Linux is concerned. Note that it's important
29854+ * to realize there is a difference between this table and CPUID -- if
29855+ * applications want to get the raw CPUID data, they should access
29856+ * /dev/cpu/<cpu_nr>/cpuid instead.
29857+ */
29858+ static char *x86_cap_flags[] = {
29859+ /* Intel-defined */
29860+ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
29861+ "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
29862+ "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
29863+ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
29864+
29865+ /* AMD-defined */
29866+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29867+ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
29868+ NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
29869+ NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
29870+
29871+ /* Transmeta-defined */
29872+ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
29873+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29874+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29875+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29876+
29877+ /* Other (Linux-defined) */
29878+ "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
29879+ "constant_tsc", NULL, NULL,
29880+ "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29881+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29882+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29883+
29884+ /* Intel-defined (#2) */
29885+ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
29886+ "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
29887+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29888+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29889+
29890+ /* VIA/Cyrix/Centaur-defined */
29891+ NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
29892+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29893+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29894+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29895+
29896+ /* AMD-defined (#2) */
29897+ "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
29898+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29899+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29900+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
29901+ };
29902+ static char *x86_power_flags[] = {
29903+ "ts", /* temperature sensor */
29904+ "fid", /* frequency id control */
29905+ "vid", /* voltage id control */
29906+ "ttp", /* thermal trip */
29907+ "tm",
29908+ "stc",
29909+ NULL,
29910+ /* nothing */ /* constant_tsc - moved to flags */
29911+ };
29912+
29913+
29914+#ifdef CONFIG_SMP
29915+ if (!cpu_online(c-cpu_data))
29916+ return 0;
29917+#endif
29918+
29919+ seq_printf(m,"processor\t: %u\n"
29920+ "vendor_id\t: %s\n"
29921+ "cpu family\t: %d\n"
29922+ "model\t\t: %d\n"
29923+ "model name\t: %s\n",
29924+ (unsigned)(c-cpu_data),
29925+ c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
29926+ c->x86,
29927+ (int)c->x86_model,
29928+ c->x86_model_id[0] ? c->x86_model_id : "unknown");
29929+
29930+ if (c->x86_mask || c->cpuid_level >= 0)
29931+ seq_printf(m, "stepping\t: %d\n", c->x86_mask);
29932+ else
29933+ seq_printf(m, "stepping\t: unknown\n");
29934+
29935+ if (cpu_has(c,X86_FEATURE_TSC)) {
29936+ unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
29937+ if (!freq)
29938+ freq = cpu_khz;
29939+ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
29940+ freq / 1000, (freq % 1000));
29941+ }
29942+
29943+ /* Cache size */
29944+ if (c->x86_cache_size >= 0)
29945+ seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
29946+
29947+#ifdef CONFIG_SMP
29948+ if (smp_num_siblings * c->x86_max_cores > 1) {
29949+ int cpu = c - cpu_data;
29950+ seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
29951+ seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
29952+ seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
29953+ seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
29954+ }
29955+#endif
29956+
29957+ seq_printf(m,
29958+ "fpu\t\t: yes\n"
29959+ "fpu_exception\t: yes\n"
29960+ "cpuid level\t: %d\n"
29961+ "wp\t\t: yes\n"
29962+ "flags\t\t:",
29963+ c->cpuid_level);
29964+
29965+ {
29966+ int i;
29967+ for ( i = 0 ; i < 32*NCAPINTS ; i++ )
29968+ if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
29969+ seq_printf(m, " %s", x86_cap_flags[i]);
29970+ }
29971+
29972+ seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
29973+ c->loops_per_jiffy/(500000/HZ),
29974+ (c->loops_per_jiffy/(5000/HZ)) % 100);
29975+
29976+ if (c->x86_tlbsize > 0)
29977+ seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
29978+ seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
29979+ seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
29980+
29981+ seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
29982+ c->x86_phys_bits, c->x86_virt_bits);
29983+
29984+ seq_printf(m, "power management:");
29985+ {
29986+ unsigned i;
29987+ for (i = 0; i < 32; i++)
29988+ if (c->x86_power & (1 << i)) {
29989+ if (i < ARRAY_SIZE(x86_power_flags) &&
29990+ x86_power_flags[i])
29991+ seq_printf(m, "%s%s",
29992+ x86_power_flags[i][0]?" ":"",
29993+ x86_power_flags[i]);
29994+ else
29995+ seq_printf(m, " [%d]", i);
29996+ }
29997+ }
29998+
29999+ seq_printf(m, "\n\n");
30000+
30001+ return 0;
30002+}
30003+
30004+static void *c_start(struct seq_file *m, loff_t *pos)
30005+{
30006+ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
30007+}
30008+
30009+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
30010+{
30011+ ++*pos;
30012+ return c_start(m, pos);
30013+}
30014+
30015+static void c_stop(struct seq_file *m, void *v)
30016+{
30017+}
30018+
30019+struct seq_operations cpuinfo_op = {
30020+ .start =c_start,
30021+ .next = c_next,
30022+ .stop = c_stop,
30023+ .show = show_cpuinfo,
30024+};
30025+
30026+#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
30027+#include <linux/platform_device.h>
30028+static __init int add_pcspkr(void)
30029+{
30030+ struct platform_device *pd;
30031+ int ret;
30032+
30033+ if (!is_initial_xendomain())
30034+ return 0;
30035+
30036+ pd = platform_device_alloc("pcspkr", -1);
30037+ if (!pd)
30038+ return -ENOMEM;
30039+
30040+ ret = platform_device_add(pd);
30041+ if (ret)
30042+ platform_device_put(pd);
30043+
30044+ return ret;
30045+}
30046+device_initcall(add_pcspkr);
30047+#endif
30048Index: head-2008-11-25/arch/x86/kernel/setup64-xen.c
30049===================================================================
30050--- /dev/null 1970-01-01 00:00:00.000000000 +0000
30051+++ head-2008-11-25/arch/x86/kernel/setup64-xen.c 2008-01-28 12:24:19.000000000 +0100
30052@@ -0,0 +1,367 @@
30053+/*
30054+ * X86-64 specific CPU setup.
30055+ * Copyright (C) 1995 Linus Torvalds
30056+ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
30057+ * See setup.c for older changelog.
30058+ *
30059+ * Jun Nakajima <jun.nakajima@intel.com>
30060+ * Modified for Xen
30061+ *
30062+ */
30063+#include <linux/init.h>
30064+#include <linux/kernel.h>
30065+#include <linux/sched.h>
30066+#include <linux/string.h>
30067+#include <linux/bootmem.h>
30068+#include <linux/bitops.h>
30069+#include <linux/module.h>
30070+#include <asm/bootsetup.h>
30071+#include <asm/pda.h>
30072+#include <asm/pgtable.h>
30073+#include <asm/processor.h>
30074+#include <asm/desc.h>
30075+#include <asm/atomic.h>
30076+#include <asm/mmu_context.h>
30077+#include <asm/smp.h>
30078+#include <asm/i387.h>
30079+#include <asm/percpu.h>
30080+#include <asm/proto.h>
30081+#include <asm/sections.h>
30082+#ifdef CONFIG_XEN
30083+#include <asm/hypervisor.h>
30084+#endif
30085+
30086+char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
30087+
30088+cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
30089+
30090+struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
30091+EXPORT_SYMBOL(_cpu_pda);
30092+struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
30093+
30094+#ifndef CONFIG_X86_NO_IDT
30095+struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
30096+#endif
30097+
30098+char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
30099+
30100+unsigned long __supported_pte_mask __read_mostly = ~0UL;
30101+EXPORT_SYMBOL(__supported_pte_mask);
30102+static int do_not_nx __cpuinitdata = 0;
30103+
30104+/* noexec=on|off
30105+Control non executable mappings for 64bit processes.
30106+
30107+on Enable(default)
30108+off Disable
30109+*/
30110+int __init nonx_setup(char *str)
30111+{
30112+ if (!strncmp(str, "on", 2)) {
30113+ __supported_pte_mask |= _PAGE_NX;
30114+ do_not_nx = 0;
30115+ } else if (!strncmp(str, "off", 3)) {
30116+ do_not_nx = 1;
30117+ __supported_pte_mask &= ~_PAGE_NX;
30118+ }
30119+ return 1;
30120+}
30121+__setup("noexec=", nonx_setup); /* parsed early actually */
30122+
30123+int force_personality32 = 0;
30124+
30125+/* noexec32=on|off
30126+Control non executable heap for 32bit processes.
30127+To control the stack too use noexec=off
30128+
30129+on PROT_READ does not imply PROT_EXEC for 32bit processes
30130+off PROT_READ implies PROT_EXEC (default)
30131+*/
30132+static int __init nonx32_setup(char *str)
30133+{
30134+ if (!strcmp(str, "on"))
30135+ force_personality32 &= ~READ_IMPLIES_EXEC;
30136+ else if (!strcmp(str, "off"))
30137+ force_personality32 |= READ_IMPLIES_EXEC;
30138+ return 1;
30139+}
30140+__setup("noexec32=", nonx32_setup);
30141+
30142+/*
30143+ * Great future plan:
30144+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
30145+ * Always point %gs to its beginning
30146+ */
30147+void __init setup_per_cpu_areas(void)
30148+{
30149+ int i;
30150+ unsigned long size;
30151+
30152+#ifdef CONFIG_HOTPLUG_CPU
30153+ prefill_possible_map();
30154+#endif
30155+
30156+ /* Copy section for each CPU (we discard the original) */
30157+ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
30158+#ifdef CONFIG_MODULES
30159+ if (size < PERCPU_ENOUGH_ROOM)
30160+ size = PERCPU_ENOUGH_ROOM;
30161+#endif
30162+
30163+ for_each_cpu_mask (i, cpu_possible_map) {
30164+ char *ptr;
30165+
30166+ if (!NODE_DATA(cpu_to_node(i))) {
30167+ printk("cpu with no node %d, num_online_nodes %d\n",
30168+ i, num_online_nodes());
30169+ ptr = alloc_bootmem(size);
30170+ } else {
30171+ ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
30172+ }
30173+ if (!ptr)
30174+ panic("Cannot allocate cpu data for CPU %d\n", i);
30175+ cpu_pda(i)->data_offset = ptr - __per_cpu_start;
30176+ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
30177+ }
30178+}
30179+
30180+#ifdef CONFIG_XEN
30181+static void switch_pt(void)
30182+{
30183+ xen_pt_switch(__pa_symbol(init_level4_pgt));
30184+ xen_new_user_pt(__pa_symbol(__user_pgd(init_level4_pgt)));
30185+}
30186+
30187+static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr)
30188+{
30189+ unsigned long frames[16];
30190+ unsigned long va;
30191+ int f;
30192+
30193+ for (va = gdt_descr->address, f = 0;
30194+ va < gdt_descr->address + gdt_descr->size;
30195+ va += PAGE_SIZE, f++) {
30196+ frames[f] = virt_to_mfn(va);
30197+ make_page_readonly(
30198+ (void *)va, XENFEAT_writable_descriptor_tables);
30199+ }
30200+ if (HYPERVISOR_set_gdt(frames, (gdt_descr->size + 1) /
30201+ sizeof (struct desc_struct)))
30202+ BUG();
30203+}
30204+#else
30205+static void switch_pt(void)
30206+{
30207+ asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
30208+}
30209+
30210+static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr)
30211+{
30212+ asm volatile("lgdt %0" :: "m" (*gdt_descr));
30213+ asm volatile("lidt %0" :: "m" (idt_descr));
30214+}
30215+#endif
30216+
30217+void pda_init(int cpu)
30218+{
30219+ struct x8664_pda *pda = cpu_pda(cpu);
30220+
30221+ /* Setup up data that may be needed in __get_free_pages early */
30222+ asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
30223+#ifndef CONFIG_XEN
30224+ wrmsrl(MSR_GS_BASE, pda);
30225+#else
30226+ if (HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL,
30227+ (unsigned long)pda))
30228+ BUG();
30229+#endif
30230+ pda->cpunumber = cpu;
30231+ pda->irqcount = -1;
30232+ pda->kernelstack =
30233+ (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
30234+ pda->active_mm = &init_mm;
30235+ pda->mmu_state = 0;
30236+
30237+ if (cpu == 0) {
30238+#ifdef CONFIG_XEN
30239+ xen_init_pt();
30240+#endif
30241+ /* others are initialized in smpboot.c */
30242+ pda->pcurrent = &init_task;
30243+ pda->irqstackptr = boot_cpu_stack;
30244+ } else {
30245+ pda->irqstackptr = (char *)
30246+ __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
30247+ if (!pda->irqstackptr)
30248+ panic("cannot allocate irqstack for cpu %d", cpu);
30249+ }
30250+
30251+ switch_pt();
30252+
30253+ pda->irqstackptr += IRQSTACKSIZE-64;
30254+}
30255+
30256+#ifndef CONFIG_X86_NO_TSS
30257+char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
30258+__attribute__((section(".bss.page_aligned")));
30259+#endif
30260+
30261+/* May not be marked __init: used by software suspend */
30262+void syscall_init(void)
30263+{
30264+#ifndef CONFIG_XEN
30265+ /*
30266+ * LSTAR and STAR live in a bit strange symbiosis.
30267+ * They both write to the same internal register. STAR allows to set CS/DS
30268+ * but only a 32bit target. LSTAR sets the 64bit rip.
30269+ */
30270+ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
30271+ wrmsrl(MSR_LSTAR, system_call);
30272+
30273+ /* Flags to clear on syscall */
30274+ wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
30275+#endif
30276+#ifdef CONFIG_IA32_EMULATION
30277+ syscall32_cpu_init ();
30278+#endif
30279+}
30280+
30281+void __cpuinit check_efer(void)
30282+{
30283+ unsigned long efer;
30284+
30285+ rdmsrl(MSR_EFER, efer);
30286+ if (!(efer & EFER_NX) || do_not_nx) {
30287+ __supported_pte_mask &= ~_PAGE_NX;
30288+ }
30289+}
30290+
30291+unsigned long kernel_eflags;
30292+
30293+/*
30294+ * cpu_init() initializes state that is per-CPU. Some data is already
30295+ * initialized (naturally) in the bootstrap process, such as the GDT
30296+ * and IDT. We reload them nevertheless, this function acts as a
30297+ * 'CPU state barrier', nothing should get across.
30298+ * A lot of state is already set up in PDA init.
30299+ */
30300+void __cpuinit cpu_init (void)
30301+{
30302+ int cpu = stack_smp_processor_id();
30303+#ifndef CONFIG_X86_NO_TSS
30304+ struct tss_struct *t = &per_cpu(init_tss, cpu);
30305+ struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
30306+ unsigned long v;
30307+ char *estacks = NULL;
30308+ unsigned i;
30309+#endif
30310+ struct task_struct *me;
30311+
30312+ /* CPU 0 is initialised in head64.c */
30313+ if (cpu != 0) {
30314+ pda_init(cpu);
30315+ zap_low_mappings(cpu);
30316+ }
30317+#ifndef CONFIG_X86_NO_TSS
30318+ else
30319+ estacks = boot_exception_stacks;
30320+#endif
30321+
30322+ me = current;
30323+
30324+ if (cpu_test_and_set(cpu, cpu_initialized))
30325+ panic("CPU#%d already initialized!\n", cpu);
30326+
30327+ printk("Initializing CPU#%d\n", cpu);
30328+
30329+ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
30330+
30331+ /*
30332+ * Initialize the per-CPU GDT with the boot GDT,
30333+ * and set up the GDT descriptor:
30334+ */
30335+#ifndef CONFIG_XEN
30336+ if (cpu)
30337+ memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
30338+#endif
30339+
30340+ cpu_gdt_descr[cpu].size = GDT_SIZE;
30341+ cpu_gdt_init(&cpu_gdt_descr[cpu]);
30342+
30343+ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
30344+ syscall_init();
30345+
30346+ wrmsrl(MSR_FS_BASE, 0);
30347+ wrmsrl(MSR_KERNEL_GS_BASE, 0);
30348+ barrier();
30349+
30350+ check_efer();
30351+
30352+#ifndef CONFIG_X86_NO_TSS
30353+ /*
30354+ * set up and load the per-CPU TSS
30355+ */
30356+ for (v = 0; v < N_EXCEPTION_STACKS; v++) {
30357+ if (cpu) {
30358+ static const unsigned int order[N_EXCEPTION_STACKS] = {
30359+ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
30360+ [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
30361+ };
30362+
30363+ estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
30364+ if (!estacks)
30365+ panic("Cannot allocate exception stack %ld %d\n",
30366+ v, cpu);
30367+ }
30368+ switch (v + 1) {
30369+#if DEBUG_STKSZ > EXCEPTION_STKSZ
30370+ case DEBUG_STACK:
30371+ cpu_pda(cpu)->debugstack = (unsigned long)estacks;
30372+ estacks += DEBUG_STKSZ;
30373+ break;
30374+#endif
30375+ default:
30376+ estacks += EXCEPTION_STKSZ;
30377+ break;
30378+ }
30379+ orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
30380+ }
30381+
30382+ t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
30383+ /*
30384+ * <= is required because the CPU will access up to
30385+ * 8 bits beyond the end of the IO permission bitmap.
30386+ */
30387+ for (i = 0; i <= IO_BITMAP_LONGS; i++)
30388+ t->io_bitmap[i] = ~0UL;
30389+#endif
30390+
30391+ atomic_inc(&init_mm.mm_count);
30392+ me->active_mm = &init_mm;
30393+ if (me->mm)
30394+ BUG();
30395+ enter_lazy_tlb(&init_mm, me);
30396+
30397+#ifndef CONFIG_X86_NO_TSS
30398+ set_tss_desc(cpu, t);
30399+#endif
30400+#ifndef CONFIG_XEN
30401+ load_TR_desc();
30402+#endif
30403+ load_LDT(&init_mm.context);
30404+
30405+ /*
30406+ * Clear all 6 debug registers:
30407+ */
30408+
30409+ set_debugreg(0UL, 0);
30410+ set_debugreg(0UL, 1);
30411+ set_debugreg(0UL, 2);
30412+ set_debugreg(0UL, 3);
30413+ set_debugreg(0UL, 6);
30414+ set_debugreg(0UL, 7);
30415+
30416+ fpu_init();
30417+
30418+ raw_local_save_flags(kernel_eflags);
30419+}
30420Index: head-2008-11-25/arch/x86/kernel/smp_64-xen.c
30421===================================================================
30422--- /dev/null 1970-01-01 00:00:00.000000000 +0000
30423+++ head-2008-11-25/arch/x86/kernel/smp_64-xen.c 2008-04-02 12:34:02.000000000 +0200
30424@@ -0,0 +1,575 @@
30425+/*
30426+ * Intel SMP support routines.
30427+ *
30428+ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
30429+ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
30430+ * (c) 2002,2003 Andi Kleen, SuSE Labs.
30431+ *
30432+ * This code is released under the GNU General Public License version 2 or
30433+ * later.
30434+ */
30435+
30436+#include <linux/init.h>
30437+
30438+#include <linux/mm.h>
30439+#include <linux/delay.h>
30440+#include <linux/spinlock.h>
30441+#include <linux/smp_lock.h>
30442+#include <linux/smp.h>
30443+#include <linux/kernel_stat.h>
30444+#include <linux/mc146818rtc.h>
30445+#include <linux/interrupt.h>
30446+
30447+#include <asm/mtrr.h>
30448+#include <asm/pgalloc.h>
30449+#include <asm/tlbflush.h>
30450+#include <asm/mach_apic.h>
30451+#include <asm/mmu_context.h>
30452+#include <asm/proto.h>
30453+#include <asm/apicdef.h>
30454+#include <asm/idle.h>
30455+#ifdef CONFIG_XEN
30456+#include <xen/evtchn.h>
30457+#endif
30458+
30459+#ifndef CONFIG_XEN
30460+/*
30461+ * Smarter SMP flushing macros.
30462+ * c/o Linus Torvalds.
30463+ *
30464+ * These mean you can really definitely utterly forget about
30465+ * writing to user space from interrupts. (Its not allowed anyway).
30466+ *
30467+ * Optimizations Manfred Spraul <manfred@colorfullife.com>
30468+ *
30469+ * More scalable flush, from Andi Kleen
30470+ *
30471+ * To avoid global state use 8 different call vectors.
30472+ * Each CPU uses a specific vector to trigger flushes on other
30473+ * CPUs. Depending on the received vector the target CPUs look into
30474+ * the right per cpu variable for the flush data.
30475+ *
30476+ * With more than 8 CPUs they are hashed to the 8 available
30477+ * vectors. The limited global vector space forces us to this right now.
30478+ * In future when interrupts are split into per CPU domains this could be
30479+ * fixed, at the cost of triggering multiple IPIs in some cases.
30480+ */
30481+
30482+union smp_flush_state {
30483+ struct {
30484+ cpumask_t flush_cpumask;
30485+ struct mm_struct *flush_mm;
30486+ unsigned long flush_va;
30487+#define FLUSH_ALL -1ULL
30488+ spinlock_t tlbstate_lock;
30489+ };
30490+ char pad[SMP_CACHE_BYTES];
30491+} ____cacheline_aligned;
30492+
30493+/* State is put into the per CPU data section, but padded
30494+ to a full cache line because other CPUs can access it and we don't
30495+ want false sharing in the per cpu data segment. */
30496+static DEFINE_PER_CPU(union smp_flush_state, flush_state);
30497+
30498+/*
30499+ * We cannot call mmdrop() because we are in interrupt context,
30500+ * instead update mm->cpu_vm_mask.
30501+ */
30502+static inline void leave_mm(unsigned long cpu)
30503+{
30504+ if (read_pda(mmu_state) == TLBSTATE_OK)
30505+ BUG();
30506+ cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
30507+ load_cr3(swapper_pg_dir);
30508+}
30509+
30510+/*
30511+ *
30512+ * The flush IPI assumes that a thread switch happens in this order:
30513+ * [cpu0: the cpu that switches]
30514+ * 1) switch_mm() either 1a) or 1b)
30515+ * 1a) thread switch to a different mm
30516+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
30517+ * Stop ipi delivery for the old mm. This is not synchronized with
30518+ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
30519+ * for the wrong mm, and in the worst case we perform a superfluous
30520+ * tlb flush.
30521+ * 1a2) set cpu mmu_state to TLBSTATE_OK
30522+ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
30523+ * was in lazy tlb mode.
30524+ * 1a3) update cpu active_mm
30525+ * Now cpu0 accepts tlb flushes for the new mm.
30526+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
30527+ * Now the other cpus will send tlb flush ipis.
30528+ * 1a4) change cr3.
30529+ * 1b) thread switch without mm change
30530+ * cpu active_mm is correct, cpu0 already handles
30531+ * flush ipis.
30532+ * 1b1) set cpu mmu_state to TLBSTATE_OK
30533+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
30534+ * Atomically set the bit [other cpus will start sending flush ipis],
30535+ * and test the bit.
30536+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
30537+ * 2) switch %%esp, ie current
30538+ *
30539+ * The interrupt must handle 2 special cases:
30540+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
30541+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
30542+ * runs in kernel space, the cpu could load tlb entries for user space
30543+ * pages.
30544+ *
30545+ * The good news is that cpu mmu_state is local to each cpu, no
30546+ * write/read ordering problems.
30547+ */
30548+
30549+/*
30550+ * TLB flush IPI:
30551+ *
30552+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
30553+ * 2) Leave the mm if we are in the lazy tlb mode.
30554+ *
30555+ * Interrupts are disabled.
30556+ */
30557+
30558+asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
30559+{
30560+ int cpu;
30561+ int sender;
30562+ union smp_flush_state *f;
30563+
30564+ cpu = smp_processor_id();
30565+ /*
30566+ * orig_rax contains the negated interrupt vector.
30567+ * Use that to determine where the sender put the data.
30568+ */
30569+ sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
30570+ f = &per_cpu(flush_state, sender);
30571+
30572+ if (!cpu_isset(cpu, f->flush_cpumask))
30573+ goto out;
30574+ /*
30575+ * This was a BUG() but until someone can quote me the
30576+ * line from the intel manual that guarantees an IPI to
30577+ * multiple CPUs is retried _only_ on the erroring CPUs
30578+ * its staying as a return
30579+ *
30580+ * BUG();
30581+ */
30582+
30583+ if (f->flush_mm == read_pda(active_mm)) {
30584+ if (read_pda(mmu_state) == TLBSTATE_OK) {
30585+ if (f->flush_va == FLUSH_ALL)
30586+ local_flush_tlb();
30587+ else
30588+ __flush_tlb_one(f->flush_va);
30589+ } else
30590+ leave_mm(cpu);
30591+ }
30592+out:
30593+ ack_APIC_irq();
30594+ cpu_clear(cpu, f->flush_cpumask);
30595+}
30596+
30597+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
30598+ unsigned long va)
30599+{
30600+ int sender;
30601+ union smp_flush_state *f;
30602+
30603+ /* Caller has disabled preemption */
30604+ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
30605+ f = &per_cpu(flush_state, sender);
30606+
30607+ /* Could avoid this lock when
30608+ num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
30609+ probably not worth checking this for a cache-hot lock. */
30610+ spin_lock(&f->tlbstate_lock);
30611+
30612+ f->flush_mm = mm;
30613+ f->flush_va = va;
30614+ cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
30615+
30616+ /*
30617+ * We have to send the IPI only to
30618+ * CPUs affected.
30619+ */
30620+ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
30621+
30622+ while (!cpus_empty(f->flush_cpumask))
30623+ cpu_relax();
30624+
30625+ f->flush_mm = NULL;
30626+ f->flush_va = 0;
30627+ spin_unlock(&f->tlbstate_lock);
30628+}
30629+
30630+int __cpuinit init_smp_flush(void)
30631+{
30632+ int i;
30633+ for_each_cpu_mask(i, cpu_possible_map) {
30634+ spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
30635+ }
30636+ return 0;
30637+}
30638+
30639+core_initcall(init_smp_flush);
30640+
30641+void flush_tlb_current_task(void)
30642+{
30643+ struct mm_struct *mm = current->mm;
30644+ cpumask_t cpu_mask;
30645+
30646+ preempt_disable();
30647+ cpu_mask = mm->cpu_vm_mask;
30648+ cpu_clear(smp_processor_id(), cpu_mask);
30649+
30650+ local_flush_tlb();
30651+ if (!cpus_empty(cpu_mask))
30652+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
30653+ preempt_enable();
30654+}
30655+EXPORT_SYMBOL(flush_tlb_current_task);
30656+
30657+void flush_tlb_mm (struct mm_struct * mm)
30658+{
30659+ cpumask_t cpu_mask;
30660+
30661+ preempt_disable();
30662+ cpu_mask = mm->cpu_vm_mask;
30663+ cpu_clear(smp_processor_id(), cpu_mask);
30664+
30665+ if (current->active_mm == mm) {
30666+ if (current->mm)
30667+ local_flush_tlb();
30668+ else
30669+ leave_mm(smp_processor_id());
30670+ }
30671+ if (!cpus_empty(cpu_mask))
30672+ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
30673+
30674+ preempt_enable();
30675+}
30676+EXPORT_SYMBOL(flush_tlb_mm);
30677+
30678+void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
30679+{
30680+ struct mm_struct *mm = vma->vm_mm;
30681+ cpumask_t cpu_mask;
30682+
30683+ preempt_disable();
30684+ cpu_mask = mm->cpu_vm_mask;
30685+ cpu_clear(smp_processor_id(), cpu_mask);
30686+
30687+ if (current->active_mm == mm) {
30688+ if(current->mm)
30689+ __flush_tlb_one(va);
30690+ else
30691+ leave_mm(smp_processor_id());
30692+ }
30693+
30694+ if (!cpus_empty(cpu_mask))
30695+ flush_tlb_others(cpu_mask, mm, va);
30696+
30697+ preempt_enable();
30698+}
30699+EXPORT_SYMBOL(flush_tlb_page);
30700+
30701+static void do_flush_tlb_all(void* info)
30702+{
30703+ unsigned long cpu = smp_processor_id();
30704+
30705+ __flush_tlb_all();
30706+ if (read_pda(mmu_state) == TLBSTATE_LAZY)
30707+ leave_mm(cpu);
30708+}
30709+
30710+void flush_tlb_all(void)
30711+{
30712+ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
30713+}
30714+#endif /* Xen */
30715+
30716+/*
30717+ * this function sends a 'reschedule' IPI to another CPU.
30718+ * it goes straight through and wastes no time serializing
30719+ * anything. Worst case is that we lose a reschedule ...
30720+ */
30721+
30722+void smp_send_reschedule(int cpu)
30723+{
30724+ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
30725+}
30726+
30727+/*
30728+ * Structure and data for smp_call_function(). This is designed to minimise
30729+ * static memory requirements. It also looks cleaner.
30730+ */
30731+static DEFINE_SPINLOCK(call_lock);
30732+
30733+struct call_data_struct {
30734+ void (*func) (void *info);
30735+ void *info;
30736+ atomic_t started;
30737+ atomic_t finished;
30738+ int wait;
30739+};
30740+
30741+static struct call_data_struct * call_data;
30742+
30743+void lock_ipi_call_lock(void)
30744+{
30745+ spin_lock_irq(&call_lock);
30746+}
30747+
30748+void unlock_ipi_call_lock(void)
30749+{
30750+ spin_unlock_irq(&call_lock);
30751+}
30752+
30753+/*
30754+ * this function sends a 'generic call function' IPI to one other CPU
30755+ * in the system.
30756+ *
30757+ * cpu is a standard Linux logical CPU number.
30758+ */
30759+static void
30760+__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
30761+ int nonatomic, int wait)
30762+{
30763+ struct call_data_struct data;
30764+ int cpus = 1;
30765+
30766+ data.func = func;
30767+ data.info = info;
30768+ atomic_set(&data.started, 0);
30769+ data.wait = wait;
30770+ if (wait)
30771+ atomic_set(&data.finished, 0);
30772+
30773+ call_data = &data;
30774+ wmb();
30775+ /* Send a message to all other CPUs and wait for them to respond */
30776+ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
30777+
30778+ /* Wait for response */
30779+ while (atomic_read(&data.started) != cpus)
30780+ cpu_relax();
30781+
30782+ if (!wait)
30783+ return;
30784+
30785+ while (atomic_read(&data.finished) != cpus)
30786+ cpu_relax();
30787+}
30788+
30789+/*
30790+ * smp_call_function_single - Run a function on another CPU
30791+ * @func: The function to run. This must be fast and non-blocking.
30792+ * @info: An arbitrary pointer to pass to the function.
30793+ * @nonatomic: Currently unused.
30794+ * @wait: If true, wait until function has completed on other CPUs.
30795+ *
30796+ * Retrurns 0 on success, else a negative status code.
30797+ *
30798+ * Does not return until the remote CPU is nearly ready to execute <func>
30799+ * or is or has executed.
30800+ */
30801+
30802+int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
30803+ int nonatomic, int wait)
30804+{
30805+ /* prevent preemption and reschedule on another processor */
30806+ int me = get_cpu();
30807+ if (cpu == me) {
30808+ WARN_ON(1);
30809+ put_cpu();
30810+ return -EBUSY;
30811+ }
30812+ spin_lock_bh(&call_lock);
30813+ __smp_call_function_single(cpu, func, info, nonatomic, wait);
30814+ spin_unlock_bh(&call_lock);
30815+ put_cpu();
30816+ return 0;
30817+}
30818+
30819+/*
30820+ * this function sends a 'generic call function' IPI to all other CPUs
30821+ * in the system.
30822+ */
30823+static void __smp_call_function (void (*func) (void *info), void *info,
30824+ int nonatomic, int wait)
30825+{
30826+ struct call_data_struct data;
30827+ int cpus = num_online_cpus()-1;
30828+
30829+ if (!cpus)
30830+ return;
30831+
30832+ data.func = func;
30833+ data.info = info;
30834+ atomic_set(&data.started, 0);
30835+ data.wait = wait;
30836+ if (wait)
30837+ atomic_set(&data.finished, 0);
30838+
30839+ call_data = &data;
30840+ wmb();
30841+ /* Send a message to all other CPUs and wait for them to respond */
30842+ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
30843+
30844+ /* Wait for response */
30845+ while (atomic_read(&data.started) != cpus)
30846+ cpu_relax();
30847+
30848+ if (!wait)
30849+ return;
30850+
30851+ while (atomic_read(&data.finished) != cpus)
30852+ cpu_relax();
30853+}
30854+
30855+/*
30856+ * smp_call_function - run a function on all other CPUs.
30857+ * @func: The function to run. This must be fast and non-blocking.
30858+ * @info: An arbitrary pointer to pass to the function.
30859+ * @nonatomic: currently unused.
30860+ * @wait: If true, wait (atomically) until function has completed on other
30861+ * CPUs.
30862+ *
30863+ * Returns 0 on success, else a negative status code. Does not return until
30864+ * remote CPUs are nearly ready to execute func or are or have executed.
30865+ *
30866+ * You must not call this function with disabled interrupts or from a
30867+ * hardware interrupt handler or from a bottom half handler.
30868+ * Actually there are a few legal cases, like panic.
30869+ */
30870+int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
30871+ int wait)
30872+{
30873+ spin_lock(&call_lock);
30874+ __smp_call_function(func,info,nonatomic,wait);
30875+ spin_unlock(&call_lock);
30876+ return 0;
30877+}
30878+EXPORT_SYMBOL(smp_call_function);
30879+
30880+void smp_stop_cpu(void)
30881+{
30882+ unsigned long flags;
30883+ /*
30884+ * Remove this CPU:
30885+ */
30886+ cpu_clear(smp_processor_id(), cpu_online_map);
30887+ local_irq_save(flags);
30888+ disable_all_local_evtchn();
30889+ local_irq_restore(flags);
30890+}
30891+
30892+static void smp_really_stop_cpu(void *dummy)
30893+{
30894+ smp_stop_cpu();
30895+ for (;;)
30896+ halt();
30897+}
30898+
30899+void smp_send_stop(void)
30900+{
30901+ int nolock = 0;
30902+#ifndef CONFIG_XEN
30903+ if (reboot_force)
30904+ return;
30905+#endif
30906+ /* Don't deadlock on the call lock in panic */
30907+ if (!spin_trylock(&call_lock)) {
30908+ /* ignore locking because we have panicked anyways */
30909+ nolock = 1;
30910+ }
30911+ __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
30912+ if (!nolock)
30913+ spin_unlock(&call_lock);
30914+
30915+ local_irq_disable();
30916+ disable_all_local_evtchn();
30917+ local_irq_enable();
30918+}
30919+
30920+/*
30921+ * Reschedule call back. Nothing to do,
30922+ * all the work is done automatically when
30923+ * we return from the interrupt.
30924+ */
30925+#ifndef CONFIG_XEN
30926+asmlinkage void smp_reschedule_interrupt(void)
30927+#else
30928+asmlinkage irqreturn_t smp_reschedule_interrupt(void)
30929+#endif
30930+{
30931+#ifndef CONFIG_XEN
30932+ ack_APIC_irq();
30933+#else
30934+ return IRQ_HANDLED;
30935+#endif
30936+}
30937+
30938+#ifndef CONFIG_XEN
30939+asmlinkage void smp_call_function_interrupt(void)
30940+#else
30941+asmlinkage irqreturn_t smp_call_function_interrupt(void)
30942+#endif
30943+{
30944+ void (*func) (void *info) = call_data->func;
30945+ void *info = call_data->info;
30946+ int wait = call_data->wait;
30947+
30948+#ifndef CONFIG_XEN
30949+ ack_APIC_irq();
30950+#endif
30951+ /*
30952+ * Notify initiating CPU that I've grabbed the data and am
30953+ * about to execute the function
30954+ */
30955+ mb();
30956+ atomic_inc(&call_data->started);
30957+ /*
30958+ * At this point the info structure may be out of scope unless wait==1
30959+ */
30960+ exit_idle();
30961+ irq_enter();
30962+ (*func)(info);
30963+ irq_exit();
30964+ if (wait) {
30965+ mb();
30966+ atomic_inc(&call_data->finished);
30967+ }
30968+#ifdef CONFIG_XEN
30969+ return IRQ_HANDLED;
30970+#endif
30971+}
30972+
30973+int safe_smp_processor_id(void)
30974+{
30975+#ifdef CONFIG_XEN
30976+ return smp_processor_id();
30977+#else
30978+ unsigned apicid, i;
30979+
30980+ if (disable_apic)
30981+ return 0;
30982+
30983+ apicid = hard_smp_processor_id();
30984+ if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
30985+ return apicid;
30986+
30987+ for (i = 0; i < NR_CPUS; ++i) {
30988+ if (x86_cpu_to_apicid[i] == apicid)
30989+ return i;
30990+ }
30991+
30992+ /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
30993+ * or called too early. Either way, we must be CPU 0. */
30994+ if (x86_cpu_to_apicid[0] == BAD_APICID)
30995+ return 0;
30996+
30997+ return 0; /* Should not happen */
30998+#endif
30999+}
31000Index: head-2008-11-25/arch/x86/kernel/traps_64-xen.c
31001===================================================================
31002--- /dev/null 1970-01-01 00:00:00.000000000 +0000
31003+++ head-2008-11-25/arch/x86/kernel/traps_64-xen.c 2008-04-02 12:34:02.000000000 +0200
31004@@ -0,0 +1,1173 @@
31005+/*
31006+ * linux/arch/x86-64/traps.c
31007+ *
31008+ * Copyright (C) 1991, 1992 Linus Torvalds
31009+ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
31010+ *
31011+ * Pentium III FXSR, SSE support
31012+ * Gareth Hughes <gareth@valinux.com>, May 2000
31013+ */
31014+
31015+/*
31016+ * 'Traps.c' handles hardware traps and faults after we have saved some
31017+ * state in 'entry.S'.
31018+ */
31019+#include <linux/sched.h>
31020+#include <linux/kernel.h>
31021+#include <linux/string.h>
31022+#include <linux/errno.h>
31023+#include <linux/ptrace.h>
31024+#include <linux/timer.h>
31025+#include <linux/mm.h>
31026+#include <linux/init.h>
31027+#include <linux/delay.h>
31028+#include <linux/spinlock.h>
31029+#include <linux/interrupt.h>
31030+#include <linux/module.h>
31031+#include <linux/moduleparam.h>
31032+#include <linux/nmi.h>
31033+#include <linux/kprobes.h>
31034+#include <linux/kexec.h>
31035+#include <linux/unwind.h>
31036+
31037+#include <asm/system.h>
31038+#include <asm/uaccess.h>
31039+#include <asm/io.h>
31040+#include <asm/atomic.h>
31041+#include <asm/debugreg.h>
31042+#include <asm/desc.h>
31043+#include <asm/i387.h>
31044+#include <asm/kdebug.h>
31045+#include <asm/processor.h>
31046+#include <asm/unwind.h>
31047+#include <asm/smp.h>
31048+#include <asm/pgalloc.h>
31049+#include <asm/pda.h>
31050+#include <asm/proto.h>
31051+#include <asm/nmi.h>
31052+
31053+asmlinkage void divide_error(void);
31054+asmlinkage void debug(void);
31055+asmlinkage void nmi(void);
31056+asmlinkage void int3(void);
31057+asmlinkage void overflow(void);
31058+asmlinkage void bounds(void);
31059+asmlinkage void invalid_op(void);
31060+asmlinkage void device_not_available(void);
31061+asmlinkage void double_fault(void);
31062+asmlinkage void coprocessor_segment_overrun(void);
31063+asmlinkage void invalid_TSS(void);
31064+asmlinkage void segment_not_present(void);
31065+asmlinkage void stack_segment(void);
31066+asmlinkage void general_protection(void);
31067+asmlinkage void page_fault(void);
31068+asmlinkage void coprocessor_error(void);
31069+asmlinkage void simd_coprocessor_error(void);
31070+asmlinkage void reserved(void);
31071+asmlinkage void alignment_check(void);
31072+asmlinkage void machine_check(void);
31073+asmlinkage void spurious_interrupt_bug(void);
31074+
31075+ATOMIC_NOTIFIER_HEAD(die_chain);
31076+EXPORT_SYMBOL(die_chain);
31077+
31078+int register_die_notifier(struct notifier_block *nb)
31079+{
31080+ vmalloc_sync_all();
31081+ return atomic_notifier_chain_register(&die_chain, nb);
31082+}
31083+EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
31084+
31085+int unregister_die_notifier(struct notifier_block *nb)
31086+{
31087+ return atomic_notifier_chain_unregister(&die_chain, nb);
31088+}
31089+EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
31090+
31091+static inline void conditional_sti(struct pt_regs *regs)
31092+{
31093+ if (regs->eflags & X86_EFLAGS_IF)
31094+ local_irq_enable();
31095+}
31096+
31097+static inline void preempt_conditional_sti(struct pt_regs *regs)
31098+{
31099+ preempt_disable();
31100+ if (regs->eflags & X86_EFLAGS_IF)
31101+ local_irq_enable();
31102+}
31103+
31104+static inline void preempt_conditional_cli(struct pt_regs *regs)
31105+{
31106+ if (regs->eflags & X86_EFLAGS_IF)
31107+ local_irq_disable();
31108+ /* Make sure to not schedule here because we could be running
31109+ on an exception stack. */
31110+ preempt_enable_no_resched();
31111+}
31112+
31113+static int kstack_depth_to_print = 12;
31114+#ifdef CONFIG_STACK_UNWIND
31115+static int call_trace = 1;
31116+#else
31117+#define call_trace (-1)
31118+#endif
31119+
31120+#ifdef CONFIG_KALLSYMS
31121+# include <linux/kallsyms.h>
31122+void printk_address(unsigned long address)
31123+{
31124+ unsigned long offset = 0, symsize;
31125+ const char *symname;
31126+ char *modname;
31127+ char *delim = ":";
31128+ char namebuf[128];
31129+
31130+ symname = kallsyms_lookup(address, &symsize, &offset,
31131+ &modname, namebuf);
31132+ if (!symname) {
31133+ printk(" [<%016lx>]\n", address);
31134+ return;
31135+ }
31136+ if (!modname)
31137+ modname = delim = "";
31138+ printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
31139+ address, delim, modname, delim, symname, offset, symsize);
31140+}
31141+#else
31142+void printk_address(unsigned long address)
31143+{
31144+ printk(" [<%016lx>]\n", address);
31145+}
31146+#endif
31147+
31148+static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
31149+ unsigned *usedp, const char **idp)
31150+{
31151+#ifndef CONFIG_X86_NO_TSS
31152+ static char ids[][8] = {
31153+ [DEBUG_STACK - 1] = "#DB",
31154+ [NMI_STACK - 1] = "NMI",
31155+ [DOUBLEFAULT_STACK - 1] = "#DF",
31156+ [STACKFAULT_STACK - 1] = "#SS",
31157+ [MCE_STACK - 1] = "#MC",
31158+#if DEBUG_STKSZ > EXCEPTION_STKSZ
31159+ [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
31160+#endif
31161+ };
31162+ unsigned k;
31163+
31164+ /*
31165+ * Iterate over all exception stacks, and figure out whether
31166+ * 'stack' is in one of them:
31167+ */
31168+ for (k = 0; k < N_EXCEPTION_STACKS; k++) {
31169+ unsigned long end;
31170+
31171+ /*
31172+ * set 'end' to the end of the exception stack.
31173+ */
31174+ switch (k + 1) {
31175+ /*
31176+ * TODO: this block is not needed i think, because
31177+ * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
31178+ * properly too.
31179+ */
31180+#if DEBUG_STKSZ > EXCEPTION_STKSZ
31181+ case DEBUG_STACK:
31182+ end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
31183+ break;
31184+#endif
31185+ default:
31186+ end = per_cpu(orig_ist, cpu).ist[k];
31187+ break;
31188+ }
31189+ /*
31190+ * Is 'stack' above this exception frame's end?
31191+ * If yes then skip to the next frame.
31192+ */
31193+ if (stack >= end)
31194+ continue;
31195+ /*
31196+ * Is 'stack' above this exception frame's start address?
31197+ * If yes then we found the right frame.
31198+ */
31199+ if (stack >= end - EXCEPTION_STKSZ) {
31200+ /*
31201+ * Make sure we only iterate through an exception
31202+ * stack once. If it comes up for the second time
31203+ * then there's something wrong going on - just
31204+ * break out and return NULL:
31205+ */
31206+ if (*usedp & (1U << k))
31207+ break;
31208+ *usedp |= 1U << k;
31209+ *idp = ids[k];
31210+ return (unsigned long *)end;
31211+ }
31212+ /*
31213+ * If this is a debug stack, and if it has a larger size than
31214+ * the usual exception stacks, then 'stack' might still
31215+ * be within the lower portion of the debug stack:
31216+ */
31217+#if DEBUG_STKSZ > EXCEPTION_STKSZ
31218+ if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
31219+ unsigned j = N_EXCEPTION_STACKS - 1;
31220+
31221+ /*
31222+ * Black magic. A large debug stack is composed of
31223+ * multiple exception stack entries, which we
31224+ * iterate through now. Dont look:
31225+ */
31226+ do {
31227+ ++j;
31228+ end -= EXCEPTION_STKSZ;
31229+ ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
31230+ } while (stack < end - EXCEPTION_STKSZ);
31231+ if (*usedp & (1U << j))
31232+ break;
31233+ *usedp |= 1U << j;
31234+ *idp = ids[j];
31235+ return (unsigned long *)end;
31236+ }
31237+#endif
31238+ }
31239+#endif
31240+ return NULL;
31241+}
31242+
31243+static int show_trace_unwind(struct unwind_frame_info *info, void *context)
31244+{
31245+ int n = 0;
31246+
31247+ while (unwind(info) == 0 && UNW_PC(info)) {
31248+ n++;
31249+ printk_address(UNW_PC(info));
31250+ if (arch_unw_user_mode(info))
31251+ break;
31252+ }
31253+ return n;
31254+}
31255+
31256+/*
31257+ * x86-64 can have upto three kernel stacks:
31258+ * process stack
31259+ * interrupt stack
31260+ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
31261+ */
31262+
31263+void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
31264+{
31265+ const unsigned cpu = safe_smp_processor_id();
31266+ unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
31267+ unsigned used = 0;
31268+
31269+ printk("\nCall Trace:\n");
31270+
31271+ if (!tsk)
31272+ tsk = current;
31273+
31274+ if (call_trace >= 0) {
31275+ int unw_ret = 0;
31276+ struct unwind_frame_info info;
31277+
31278+ if (regs) {
31279+ if (unwind_init_frame_info(&info, tsk, regs) == 0)
31280+ unw_ret = show_trace_unwind(&info, NULL);
31281+ } else if (tsk == current)
31282+ unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
31283+ else {
31284+ if (unwind_init_blocked(&info, tsk) == 0)
31285+ unw_ret = show_trace_unwind(&info, NULL);
31286+ }
31287+ if (unw_ret > 0) {
31288+ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
31289+ print_symbol("DWARF2 unwinder stuck at %s\n",
31290+ UNW_PC(&info));
31291+ if ((long)UNW_SP(&info) < 0) {
31292+ printk("Leftover inexact backtrace:\n");
31293+ stack = (unsigned long *)UNW_SP(&info);
31294+ } else
31295+ printk("Full inexact backtrace again:\n");
31296+ } else if (call_trace >= 1)
31297+ return;
31298+ else
31299+ printk("Full inexact backtrace again:\n");
31300+ } else
31301+ printk("Inexact backtrace:\n");
31302+ }
31303+
31304+ /*
31305+ * Print function call entries within a stack. 'cond' is the
31306+ * "end of stackframe" condition, that the 'stack++'
31307+ * iteration will eventually trigger.
31308+ */
31309+#define HANDLE_STACK(cond) \
31310+ do while (cond) { \
31311+ unsigned long addr = *stack++; \
31312+ if (kernel_text_address(addr)) { \
31313+ /* \
31314+ * If the address is either in the text segment of the \
31315+ * kernel, or in the region which contains vmalloc'ed \
31316+ * memory, it *may* be the address of a calling \
31317+ * routine; if so, print it so that someone tracing \
31318+ * down the cause of the crash will be able to figure \
31319+ * out the call path that was taken. \
31320+ */ \
31321+ printk_address(addr); \
31322+ } \
31323+ } while (0)
31324+
31325+ /*
31326+ * Print function call entries in all stacks, starting at the
31327+ * current stack address. If the stacks consist of nested
31328+ * exceptions
31329+ */
31330+ for ( ; ; ) {
31331+ const char *id;
31332+ unsigned long *estack_end;
31333+ estack_end = in_exception_stack(cpu, (unsigned long)stack,
31334+ &used, &id);
31335+
31336+ if (estack_end) {
31337+ printk(" <%s>", id);
31338+ HANDLE_STACK (stack < estack_end);
31339+ printk(" <EOE>");
31340+ /*
31341+ * We link to the next stack via the
31342+ * second-to-last pointer (index -2 to end) in the
31343+ * exception stack:
31344+ */
31345+ stack = (unsigned long *) estack_end[-2];
31346+ continue;
31347+ }
31348+ if (irqstack_end) {
31349+ unsigned long *irqstack;
31350+ irqstack = irqstack_end -
31351+ (IRQSTACKSIZE - 64) / sizeof(*irqstack);
31352+
31353+ if (stack >= irqstack && stack < irqstack_end) {
31354+ printk(" <IRQ>");
31355+ HANDLE_STACK (stack < irqstack_end);
31356+ /*
31357+ * We link to the next stack (which would be
31358+ * the process stack normally) the last
31359+ * pointer (index -1 to end) in the IRQ stack:
31360+ */
31361+ stack = (unsigned long *) (irqstack_end[-1]);
31362+ irqstack_end = NULL;
31363+ printk(" <EOI>");
31364+ continue;
31365+ }
31366+ }
31367+ break;
31368+ }
31369+
31370+ /*
31371+ * This prints the process stack:
31372+ */
31373+ HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
31374+#undef HANDLE_STACK
31375+
31376+ printk("\n");
31377+}
31378+
31379+static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
31380+{
31381+ unsigned long *stack;
31382+ int i;
31383+ const int cpu = safe_smp_processor_id();
31384+ unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
31385+ unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
31386+
31387+ // debugging aid: "show_stack(NULL, NULL);" prints the
31388+ // back trace for this cpu.
31389+
31390+ if (rsp == NULL) {
31391+ if (tsk)
31392+ rsp = (unsigned long *)tsk->thread.rsp;
31393+ else
31394+ rsp = (unsigned long *)&rsp;
31395+ }
31396+
31397+ stack = rsp;
31398+ for(i=0; i < kstack_depth_to_print; i++) {
31399+ if (stack >= irqstack && stack <= irqstack_end) {
31400+ if (stack == irqstack_end) {
31401+ stack = (unsigned long *) (irqstack_end[-1]);
31402+ printk(" <EOI> ");
31403+ }
31404+ } else {
31405+ if (((long) stack & (THREAD_SIZE-1)) == 0)
31406+ break;
31407+ }
31408+ if (i && ((i % 4) == 0))
31409+ printk("\n");
31410+ printk(" %016lx", *stack++);
31411+ touch_nmi_watchdog();
31412+ }
31413+ show_trace(tsk, regs, rsp);
31414+}
31415+
31416+void show_stack(struct task_struct *tsk, unsigned long * rsp)
31417+{
31418+ _show_stack(tsk, NULL, rsp);
31419+}
31420+
31421+/*
31422+ * The architecture-independent dump_stack generator
31423+ */
31424+void dump_stack(void)
31425+{
31426+ unsigned long dummy;
31427+ show_trace(NULL, NULL, &dummy);
31428+}
31429+
31430+EXPORT_SYMBOL(dump_stack);
31431+
31432+void show_registers(struct pt_regs *regs)
31433+{
31434+ int i;
31435+ int in_kernel = !user_mode(regs);
31436+ unsigned long rsp;
31437+ const int cpu = safe_smp_processor_id();
31438+ struct task_struct *cur = cpu_pda(cpu)->pcurrent;
31439+
31440+ rsp = regs->rsp;
31441+
31442+ printk("CPU %d ", cpu);
31443+ __show_regs(regs);
31444+ printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
31445+ cur->comm, cur->pid, task_thread_info(cur), cur);
31446+
31447+ /*
31448+ * When in-kernel, we also print out the stack and code at the
31449+ * time of the fault..
31450+ */
31451+ if (in_kernel) {
31452+
31453+ printk("Stack: ");
31454+ _show_stack(NULL, regs, (unsigned long*)rsp);
31455+
31456+ printk("\nCode: ");
31457+ if (regs->rip < PAGE_OFFSET)
31458+ goto bad;
31459+
31460+ for (i=0; i<20; i++) {
31461+ unsigned char c;
31462+ if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
31463+bad:
31464+ printk(" Bad RIP value.");
31465+ break;
31466+ }
31467+ printk("%02x ", c);
31468+ }
31469+ }
31470+ printk("\n");
31471+}
31472+
31473+void handle_BUG(struct pt_regs *regs)
31474+{
31475+ struct bug_frame f;
31476+ long len;
31477+ const char *prefix = "";
31478+
31479+ if (user_mode(regs))
31480+ return;
31481+ if (__copy_from_user(&f, (const void __user *) regs->rip,
31482+ sizeof(struct bug_frame)))
31483+ return;
31484+ if (f.filename >= 0 ||
31485+ f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
31486+ return;
31487+ len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
31488+ if (len < 0 || len >= PATH_MAX)
31489+ f.filename = (int)(long)"unmapped filename";
31490+ else if (len > 50) {
31491+ f.filename += len - 50;
31492+ prefix = "...";
31493+ }
31494+ printk("----------- [cut here ] --------- [please bite here ] ---------\n");
31495+ printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
31496+}
31497+
31498+#ifdef CONFIG_BUG
31499+void out_of_line_bug(void)
31500+{
31501+ BUG();
31502+}
31503+EXPORT_SYMBOL(out_of_line_bug);
31504+#endif
31505+
31506+static DEFINE_SPINLOCK(die_lock);
31507+static int die_owner = -1;
31508+static unsigned int die_nest_count;
31509+
31510+unsigned __kprobes long oops_begin(void)
31511+{
31512+ int cpu = safe_smp_processor_id();
31513+ unsigned long flags;
31514+
31515+ /* racy, but better than risking deadlock. */
31516+ local_irq_save(flags);
31517+ if (!spin_trylock(&die_lock)) {
31518+ if (cpu == die_owner)
31519+ /* nested oops. should stop eventually */;
31520+ else
31521+ spin_lock(&die_lock);
31522+ }
31523+ die_nest_count++;
31524+ die_owner = cpu;
31525+ console_verbose();
31526+ bust_spinlocks(1);
31527+ return flags;
31528+}
31529+
31530+void __kprobes oops_end(unsigned long flags)
31531+{
31532+ die_owner = -1;
31533+ bust_spinlocks(0);
31534+ die_nest_count--;
31535+ if (die_nest_count)
31536+ /* We still own the lock */
31537+ local_irq_restore(flags);
31538+ else
31539+ /* Nest count reaches zero, release the lock. */
31540+ spin_unlock_irqrestore(&die_lock, flags);
31541+ if (panic_on_oops)
31542+ panic("Fatal exception");
31543+}
31544+
31545+void __kprobes __die(const char * str, struct pt_regs * regs, long err)
31546+{
31547+ static int die_counter;
31548+ printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
31549+#ifdef CONFIG_PREEMPT
31550+ printk("PREEMPT ");
31551+#endif
31552+#ifdef CONFIG_SMP
31553+ printk("SMP ");
31554+#endif
31555+#ifdef CONFIG_DEBUG_PAGEALLOC
31556+ printk("DEBUG_PAGEALLOC");
31557+#endif
31558+ printk("\n");
31559+ notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
31560+ show_registers(regs);
31561+ /* Executive summary in case the oops scrolled away */
31562+ printk(KERN_ALERT "RIP ");
31563+ printk_address(regs->rip);
31564+ printk(" RSP <%016lx>\n", regs->rsp);
31565+ if (kexec_should_crash(current))
31566+ crash_kexec(regs);
31567+}
31568+
31569+void die(const char * str, struct pt_regs * regs, long err)
31570+{
31571+ unsigned long flags = oops_begin();
31572+
31573+ handle_BUG(regs);
31574+ __die(str, regs, err);
31575+ oops_end(flags);
31576+ do_exit(SIGSEGV);
31577+}
31578+
31579+#ifdef CONFIG_X86_LOCAL_APIC
31580+void __kprobes die_nmi(char *str, struct pt_regs *regs)
31581+{
31582+ unsigned long flags = oops_begin();
31583+
31584+ /*
31585+ * We are in trouble anyway, lets at least try
31586+ * to get a message out.
31587+ */
31588+ printk(str, safe_smp_processor_id());
31589+ show_registers(regs);
31590+ if (kexec_should_crash(current))
31591+ crash_kexec(regs);
31592+ if (panic_on_timeout || panic_on_oops)
31593+ panic("nmi watchdog");
31594+ printk("console shuts up ...\n");
31595+ oops_end(flags);
31596+ nmi_exit();
31597+ local_irq_enable();
31598+ do_exit(SIGSEGV);
31599+}
31600+#endif
31601+
31602+static void __kprobes do_trap(int trapnr, int signr, char *str,
31603+ struct pt_regs * regs, long error_code,
31604+ siginfo_t *info)
31605+{
31606+ struct task_struct *tsk = current;
31607+
31608+ tsk->thread.error_code = error_code;
31609+ tsk->thread.trap_no = trapnr;
31610+
31611+ if (user_mode(regs)) {
31612+ if (exception_trace && unhandled_signal(tsk, signr))
31613+ printk(KERN_INFO
31614+ "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
31615+ tsk->comm, tsk->pid, str,
31616+ regs->rip, regs->rsp, error_code);
31617+
31618+ if (info)
31619+ force_sig_info(signr, info, tsk);
31620+ else
31621+ force_sig(signr, tsk);
31622+ return;
31623+ }
31624+
31625+
31626+ /* kernel trap */
31627+ {
31628+ const struct exception_table_entry *fixup;
31629+ fixup = search_exception_tables(regs->rip);
31630+ if (fixup)
31631+ regs->rip = fixup->fixup;
31632+ else
31633+ die(str, regs, error_code);
31634+ return;
31635+ }
31636+}
31637+
31638+#define DO_ERROR(trapnr, signr, str, name) \
31639+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
31640+{ \
31641+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
31642+ == NOTIFY_STOP) \
31643+ return; \
31644+ conditional_sti(regs); \
31645+ do_trap(trapnr, signr, str, regs, error_code, NULL); \
31646+}
31647+
31648+#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
31649+asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
31650+{ \
31651+ siginfo_t info; \
31652+ info.si_signo = signr; \
31653+ info.si_errno = 0; \
31654+ info.si_code = sicode; \
31655+ info.si_addr = (void __user *)siaddr; \
31656+ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
31657+ == NOTIFY_STOP) \
31658+ return; \
31659+ conditional_sti(regs); \
31660+ do_trap(trapnr, signr, str, regs, error_code, &info); \
31661+}
31662+
31663+DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
31664+DO_ERROR( 4, SIGSEGV, "overflow", overflow)
31665+DO_ERROR( 5, SIGSEGV, "bounds", bounds)
31666+DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
31667+DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
31668+DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
31669+DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
31670+DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
31671+DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
31672+DO_ERROR(18, SIGSEGV, "reserved", reserved)
31673+
31674+/* Runs on IST stack */
31675+asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
31676+{
31677+ if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
31678+ 12, SIGBUS) == NOTIFY_STOP)
31679+ return;
31680+ preempt_conditional_sti(regs);
31681+ do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
31682+ preempt_conditional_cli(regs);
31683+}
31684+
31685+asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
31686+{
31687+ static const char str[] = "double fault";
31688+ struct task_struct *tsk = current;
31689+
31690+ /* Return not checked because double check cannot be ignored */
31691+ notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
31692+
31693+ tsk->thread.error_code = error_code;
31694+ tsk->thread.trap_no = 8;
31695+
31696+ /* This is always a kernel trap and never fixable (and thus must
31697+ never return). */
31698+ for (;;)
31699+ die(str, regs, error_code);
31700+}
31701+
31702+asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
31703+ long error_code)
31704+{
31705+ struct task_struct *tsk = current;
31706+
31707+ conditional_sti(regs);
31708+
31709+ tsk->thread.error_code = error_code;
31710+ tsk->thread.trap_no = 13;
31711+
31712+ if (user_mode(regs)) {
31713+ if (exception_trace && unhandled_signal(tsk, SIGSEGV))
31714+ printk(KERN_INFO
31715+ "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
31716+ tsk->comm, tsk->pid,
31717+ regs->rip, regs->rsp, error_code);
31718+
31719+ force_sig(SIGSEGV, tsk);
31720+ return;
31721+ }
31722+
31723+ /* kernel gp */
31724+ {
31725+ const struct exception_table_entry *fixup;
31726+ fixup = search_exception_tables(regs->rip);
31727+ if (fixup) {
31728+ regs->rip = fixup->fixup;
31729+ return;
31730+ }
31731+ if (notify_die(DIE_GPF, "general protection fault", regs,
31732+ error_code, 13, SIGSEGV) == NOTIFY_STOP)
31733+ return;
31734+ die("general protection fault", regs, error_code);
31735+ }
31736+}
31737+
31738+static __kprobes void
31739+mem_parity_error(unsigned char reason, struct pt_regs * regs)
31740+{
31741+ printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
31742+ printk("You probably have a hardware problem with your RAM chips\n");
31743+
31744+#if 0 /* XEN */
31745+ /* Clear and disable the memory parity error line. */
31746+ reason = (reason & 0xf) | 4;
31747+ outb(reason, 0x61);
31748+#endif /* XEN */
31749+}
31750+
31751+static __kprobes void
31752+io_check_error(unsigned char reason, struct pt_regs * regs)
31753+{
31754+ printk("NMI: IOCK error (debug interrupt?)\n");
31755+ show_registers(regs);
31756+
31757+#if 0 /* XEN */
31758+ /* Re-enable the IOCK line, wait for a few seconds */
31759+ reason = (reason & 0xf) | 8;
31760+ outb(reason, 0x61);
31761+ mdelay(2000);
31762+ reason &= ~8;
31763+ outb(reason, 0x61);
31764+#endif /* XEN */
31765+}
31766+
31767+static __kprobes void
31768+unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
31769+{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
31770+ printk("Dazed and confused, but trying to continue\n");
31771+ printk("Do you have a strange power saving mode enabled?\n");
31772+}
31773+
31774+/* Runs on IST stack. This code must keep interrupts off all the time.
31775+ Nested NMIs are prevented by the CPU. */
31776+asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
31777+{
31778+ unsigned char reason = 0;
31779+ int cpu;
31780+
31781+ cpu = smp_processor_id();
31782+
31783+ /* Only the BSP gets external NMIs from the system. */
31784+ if (!cpu)
31785+ reason = get_nmi_reason();
31786+
31787+ if (!(reason & 0xc0)) {
31788+ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
31789+ == NOTIFY_STOP)
31790+ return;
31791+#ifdef CONFIG_X86_LOCAL_APIC
31792+ /*
31793+ * Ok, so this is none of the documented NMI sources,
31794+ * so it must be the NMI watchdog.
31795+ */
31796+ if (nmi_watchdog > 0) {
31797+ nmi_watchdog_tick(regs,reason);
31798+ return;
31799+ }
31800+#endif
31801+ unknown_nmi_error(reason, regs);
31802+ return;
31803+ }
31804+ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
31805+ return;
31806+
31807+ /* AK: following checks seem to be broken on modern chipsets. FIXME */
31808+
31809+ if (reason & 0x80)
31810+ mem_parity_error(reason, regs);
31811+ if (reason & 0x40)
31812+ io_check_error(reason, regs);
31813+}
31814+
31815+/* runs on IST stack. */
31816+asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
31817+{
31818+ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
31819+ return;
31820+ }
31821+ preempt_conditional_sti(regs);
31822+ do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
31823+ preempt_conditional_cli(regs);
31824+}
31825+
31826+/* Help handler running on IST stack to switch back to user stack
31827+ for scheduling or signal handling. The actual stack switch is done in
31828+ entry.S */
31829+asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
31830+{
31831+ struct pt_regs *regs = eregs;
31832+ /* Did already sync */
31833+ if (eregs == (struct pt_regs *)eregs->rsp)
31834+ ;
31835+ /* Exception from user space */
31836+ else if (user_mode(eregs))
31837+ regs = task_pt_regs(current);
31838+ /* Exception from kernel and interrupts are enabled. Move to
31839+ kernel process stack. */
31840+ else if (eregs->eflags & X86_EFLAGS_IF)
31841+ regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
31842+ if (eregs != regs)
31843+ *regs = *eregs;
31844+ return regs;
31845+}
31846+
31847+/* runs on IST stack. */
31848+asmlinkage void __kprobes do_debug(struct pt_regs * regs,
31849+ unsigned long error_code)
31850+{
31851+ unsigned long condition;
31852+ struct task_struct *tsk = current;
31853+ siginfo_t info;
31854+
31855+ get_debugreg(condition, 6);
31856+
31857+ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
31858+ SIGTRAP) == NOTIFY_STOP)
31859+ return;
31860+
31861+ preempt_conditional_sti(regs);
31862+
31863+ /* Mask out spurious debug traps due to lazy DR7 setting */
31864+ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
31865+ if (!tsk->thread.debugreg7) {
31866+ goto clear_dr7;
31867+ }
31868+ }
31869+
31870+ tsk->thread.debugreg6 = condition;
31871+
31872+ /* Mask out spurious TF errors due to lazy TF clearing */
31873+ if (condition & DR_STEP) {
31874+ /*
31875+ * The TF error should be masked out only if the current
31876+ * process is not traced and if the TRAP flag has been set
31877+ * previously by a tracing process (condition detected by
31878+ * the PT_DTRACE flag); remember that the i386 TRAP flag
31879+ * can be modified by the process itself in user mode,
31880+ * allowing programs to debug themselves without the ptrace()
31881+ * interface.
31882+ */
31883+ if (!user_mode(regs))
31884+ goto clear_TF_reenable;
31885+ /*
31886+ * Was the TF flag set by a debugger? If so, clear it now,
31887+ * so that register information is correct.
31888+ */
31889+ if (tsk->ptrace & PT_DTRACE) {
31890+ regs->eflags &= ~TF_MASK;
31891+ tsk->ptrace &= ~PT_DTRACE;
31892+ }
31893+ }
31894+
31895+ /* Ok, finally something we can handle */
31896+ tsk->thread.trap_no = 1;
31897+ tsk->thread.error_code = error_code;
31898+ info.si_signo = SIGTRAP;
31899+ info.si_errno = 0;
31900+ info.si_code = TRAP_BRKPT;
31901+ info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
31902+ force_sig_info(SIGTRAP, &info, tsk);
31903+
31904+clear_dr7:
31905+ set_debugreg(0UL, 7);
31906+ preempt_conditional_cli(regs);
31907+ return;
31908+
31909+clear_TF_reenable:
31910+ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
31911+ regs->eflags &= ~TF_MASK;
31912+ preempt_conditional_cli(regs);
31913+}
31914+
31915+static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
31916+{
31917+ const struct exception_table_entry *fixup;
31918+ fixup = search_exception_tables(regs->rip);
31919+ if (fixup) {
31920+ regs->rip = fixup->fixup;
31921+ return 1;
31922+ }
31923+ notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
31924+ /* Illegal floating point operation in the kernel */
31925+ current->thread.trap_no = trapnr;
31926+ die(str, regs, 0);
31927+ return 0;
31928+}
31929+
31930+/*
31931+ * Note that we play around with the 'TS' bit in an attempt to get
31932+ * the correct behaviour even in the presence of the asynchronous
31933+ * IRQ13 behaviour
31934+ */
31935+asmlinkage void do_coprocessor_error(struct pt_regs *regs)
31936+{
31937+ void __user *rip = (void __user *)(regs->rip);
31938+ struct task_struct * task;
31939+ siginfo_t info;
31940+ unsigned short cwd, swd;
31941+
31942+ conditional_sti(regs);
31943+ if (!user_mode(regs) &&
31944+ kernel_math_error(regs, "kernel x87 math error", 16))
31945+ return;
31946+
31947+ /*
31948+ * Save the info for the exception handler and clear the error.
31949+ */
31950+ task = current;
31951+ save_init_fpu(task);
31952+ task->thread.trap_no = 16;
31953+ task->thread.error_code = 0;
31954+ info.si_signo = SIGFPE;
31955+ info.si_errno = 0;
31956+ info.si_code = __SI_FAULT;
31957+ info.si_addr = rip;
31958+ /*
31959+ * (~cwd & swd) will mask out exceptions that are not set to unmasked
31960+ * status. 0x3f is the exception bits in these regs, 0x200 is the
31961+ * C1 reg you need in case of a stack fault, 0x040 is the stack
31962+ * fault bit. We should only be taking one exception at a time,
31963+ * so if this combination doesn't produce any single exception,
31964+ * then we have a bad program that isn't synchronizing its FPU usage
31965+ * and it will suffer the consequences since we won't be able to
31966+ * fully reproduce the context of the exception
31967+ */
31968+ cwd = get_fpu_cwd(task);
31969+ swd = get_fpu_swd(task);
31970+ switch (swd & ~cwd & 0x3f) {
31971+ case 0x000:
31972+ default:
31973+ break;
31974+ case 0x001: /* Invalid Op */
31975+ /*
31976+ * swd & 0x240 == 0x040: Stack Underflow
31977+ * swd & 0x240 == 0x240: Stack Overflow
31978+ * User must clear the SF bit (0x40) if set
31979+ */
31980+ info.si_code = FPE_FLTINV;
31981+ break;
31982+ case 0x002: /* Denormalize */
31983+ case 0x010: /* Underflow */
31984+ info.si_code = FPE_FLTUND;
31985+ break;
31986+ case 0x004: /* Zero Divide */
31987+ info.si_code = FPE_FLTDIV;
31988+ break;
31989+ case 0x008: /* Overflow */
31990+ info.si_code = FPE_FLTOVF;
31991+ break;
31992+ case 0x020: /* Precision */
31993+ info.si_code = FPE_FLTRES;
31994+ break;
31995+ }
31996+ force_sig_info(SIGFPE, &info, task);
31997+}
31998+
31999+asmlinkage void bad_intr(void)
32000+{
32001+ printk("bad interrupt");
32002+}
32003+
32004+asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
32005+{
32006+ void __user *rip = (void __user *)(regs->rip);
32007+ struct task_struct * task;
32008+ siginfo_t info;
32009+ unsigned short mxcsr;
32010+
32011+ conditional_sti(regs);
32012+ if (!user_mode(regs) &&
32013+ kernel_math_error(regs, "kernel simd math error", 19))
32014+ return;
32015+
32016+ /*
32017+ * Save the info for the exception handler and clear the error.
32018+ */
32019+ task = current;
32020+ save_init_fpu(task);
32021+ task->thread.trap_no = 19;
32022+ task->thread.error_code = 0;
32023+ info.si_signo = SIGFPE;
32024+ info.si_errno = 0;
32025+ info.si_code = __SI_FAULT;
32026+ info.si_addr = rip;
32027+ /*
32028+ * The SIMD FPU exceptions are handled a little differently, as there
32029+ * is only a single status/control register. Thus, to determine which
32030+ * unmasked exception was caught we must mask the exception mask bits
32031+ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
32032+ */
32033+ mxcsr = get_fpu_mxcsr(task);
32034+ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
32035+ case 0x000:
32036+ default:
32037+ break;
32038+ case 0x001: /* Invalid Op */
32039+ info.si_code = FPE_FLTINV;
32040+ break;
32041+ case 0x002: /* Denormalize */
32042+ case 0x010: /* Underflow */
32043+ info.si_code = FPE_FLTUND;
32044+ break;
32045+ case 0x004: /* Zero Divide */
32046+ info.si_code = FPE_FLTDIV;
32047+ break;
32048+ case 0x008: /* Overflow */
32049+ info.si_code = FPE_FLTOVF;
32050+ break;
32051+ case 0x020: /* Precision */
32052+ info.si_code = FPE_FLTRES;
32053+ break;
32054+ }
32055+ force_sig_info(SIGFPE, &info, task);
32056+}
32057+
32058+asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
32059+{
32060+}
32061+
32062+#if 0
32063+asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
32064+{
32065+}
32066+#endif
32067+
32068+asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
32069+{
32070+}
32071+
32072+/*
32073+ * 'math_state_restore()' saves the current math information in the
32074+ * old math state array, and gets the new ones from the current task
32075+ *
32076+ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
32077+ * Don't touch unless you *really* know how it works.
32078+ */
32079+asmlinkage void math_state_restore(void)
32080+{
32081+ struct task_struct *me = current;
32082+ /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
32083+
32084+ if (!used_math())
32085+ init_fpu(me);
32086+ restore_fpu_checking(&me->thread.i387.fxsave);
32087+ task_thread_info(me)->status |= TS_USEDFPU;
32088+}
32089+
32090+
32091+/*
32092+ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
32093+ * specify <dpl>|4 in the second field.
32094+ */
32095+static trap_info_t __cpuinitdata trap_table[] = {
32096+ { 0, 0|4, __KERNEL_CS, (unsigned long)divide_error },
32097+ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
32098+ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
32099+ { 4, 3|4, __KERNEL_CS, (unsigned long)overflow },
32100+ { 5, 0|4, __KERNEL_CS, (unsigned long)bounds },
32101+ { 6, 0|4, __KERNEL_CS, (unsigned long)invalid_op },
32102+ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
32103+ { 9, 0|4, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun},
32104+ { 10, 0|4, __KERNEL_CS, (unsigned long)invalid_TSS },
32105+ { 11, 0|4, __KERNEL_CS, (unsigned long)segment_not_present },
32106+ { 12, 0|4, __KERNEL_CS, (unsigned long)stack_segment },
32107+ { 13, 0|4, __KERNEL_CS, (unsigned long)general_protection },
32108+ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
32109+ { 15, 0|4, __KERNEL_CS, (unsigned long)spurious_interrupt_bug },
32110+ { 16, 0|4, __KERNEL_CS, (unsigned long)coprocessor_error },
32111+ { 17, 0|4, __KERNEL_CS, (unsigned long)alignment_check },
32112+#ifdef CONFIG_X86_MCE
32113+ { 18, 0|4, __KERNEL_CS, (unsigned long)machine_check },
32114+#endif
32115+ { 19, 0|4, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
32116+#ifdef CONFIG_IA32_EMULATION
32117+ { IA32_SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)ia32_syscall},
32118+#endif
32119+ { 0, 0, 0, 0 }
32120+};
32121+
32122+void __init trap_init(void)
32123+{
32124+ int ret;
32125+
32126+ ret = HYPERVISOR_set_trap_table(trap_table);
32127+ if (ret)
32128+ printk("HYPERVISOR_set_trap_table failed: error %d\n", ret);
32129+
32130+ /*
32131+ * Should be a barrier for any external CPU state.
32132+ */
32133+ cpu_init();
32134+}
32135+
32136+void __cpuinit smp_trap_init(trap_info_t *trap_ctxt)
32137+{
32138+ const trap_info_t *t = trap_table;
32139+
32140+ for (t = trap_table; t->address; t++) {
32141+ trap_ctxt[t->vector].flags = t->flags;
32142+ trap_ctxt[t->vector].cs = t->cs;
32143+ trap_ctxt[t->vector].address = t->address;
32144+ }
32145+}
32146+
32147+
32148+/* Actual parsing is done early in setup.c. */
32149+static int __init oops_dummy(char *s)
32150+{
32151+ panic_on_oops = 1;
32152+ return 1;
32153+}
32154+__setup("oops=", oops_dummy);
32155+
32156+static int __init kstack_setup(char *s)
32157+{
32158+ kstack_depth_to_print = simple_strtoul(s,NULL,0);
32159+ return 1;
32160+}
32161+__setup("kstack=", kstack_setup);
32162+
32163+#ifdef CONFIG_STACK_UNWIND
32164+static int __init call_trace_setup(char *s)
32165+{
32166+ if (strcmp(s, "old") == 0)
32167+ call_trace = -1;
32168+ else if (strcmp(s, "both") == 0)
32169+ call_trace = 0;
32170+ else if (strcmp(s, "newfallback") == 0)
32171+ call_trace = 1;
32172+ else if (strcmp(s, "new") == 0)
32173+ call_trace = 2;
32174+ return 1;
32175+}
32176+__setup("call_trace=", call_trace_setup);
32177+#endif
32178Index: head-2008-11-25/arch/x86/kernel/vsyscall_64-xen.c
32179===================================================================
32180--- /dev/null 1970-01-01 00:00:00.000000000 +0000
32181+++ head-2008-11-25/arch/x86/kernel/vsyscall_64-xen.c 2007-06-18 08:38:13.000000000 +0200
32182@@ -0,0 +1,227 @@
32183+/*
32184+ * linux/arch/x86_64/kernel/vsyscall.c
32185+ *
32186+ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
32187+ * Copyright 2003 Andi Kleen, SuSE Labs.
32188+ *
32189+ * Thanks to hpa@transmeta.com for some useful hint.
32190+ * Special thanks to Ingo Molnar for his early experience with
32191+ * a different vsyscall implementation for Linux/IA32 and for the name.
32192+ *
32193+ * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
32194+ * at virtual address -10Mbyte+1024bytes etc... There are at max 4
32195+ * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
32196+ * jumping out of line if necessary. We cannot add more with this
32197+ * mechanism because older kernels won't return -ENOSYS.
32198+ * If we want more than four we need a vDSO.
32199+ *
32200+ * Note: the concept clashes with user mode linux. If you use UML and
32201+ * want per guest time just set the kernel.vsyscall64 sysctl to 0.
32202+ */
32203+
32204+#include <linux/time.h>
32205+#include <linux/init.h>
32206+#include <linux/kernel.h>
32207+#include <linux/timer.h>
32208+#include <linux/seqlock.h>
32209+#include <linux/jiffies.h>
32210+#include <linux/sysctl.h>
32211+
32212+#include <asm/vsyscall.h>
32213+#include <asm/pgtable.h>
32214+#include <asm/page.h>
32215+#include <asm/fixmap.h>
32216+#include <asm/errno.h>
32217+#include <asm/io.h>
32218+
32219+#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
32220+
32221+int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
32222+seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
32223+
32224+#include <asm/unistd.h>
32225+
32226+static __always_inline void timeval_normalize(struct timeval * tv)
32227+{
32228+ time_t __sec;
32229+
32230+ __sec = tv->tv_usec / 1000000;
32231+ if (__sec) {
32232+ tv->tv_usec %= 1000000;
32233+ tv->tv_sec += __sec;
32234+ }
32235+}
32236+
32237+static __always_inline void do_vgettimeofday(struct timeval * tv)
32238+{
32239+ long sequence, t;
32240+ unsigned long sec, usec;
32241+
32242+ do {
32243+ sequence = read_seqbegin(&__xtime_lock);
32244+
32245+ sec = __xtime.tv_sec;
32246+ usec = (__xtime.tv_nsec / 1000) +
32247+ (__jiffies - __wall_jiffies) * (1000000 / HZ);
32248+
32249+ if (__vxtime.mode != VXTIME_HPET) {
32250+ t = get_cycles_sync();
32251+ if (t < __vxtime.last_tsc)
32252+ t = __vxtime.last_tsc;
32253+ usec += ((t - __vxtime.last_tsc) *
32254+ __vxtime.tsc_quot) >> 32;
32255+ /* See comment in x86_64 do_gettimeofday. */
32256+ } else {
32257+ usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
32258+ __vxtime.last) * __vxtime.quot) >> 32;
32259+ }
32260+ } while (read_seqretry(&__xtime_lock, sequence));
32261+
32262+ tv->tv_sec = sec + usec / 1000000;
32263+ tv->tv_usec = usec % 1000000;
32264+}
32265+
32266+/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
32267+static __always_inline void do_get_tz(struct timezone * tz)
32268+{
32269+ *tz = __sys_tz;
32270+}
32271+
32272+static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
32273+{
32274+ int ret;
32275+ asm volatile("vsysc2: syscall"
32276+ : "=a" (ret)
32277+ : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
32278+ return ret;
32279+}
32280+
32281+static __always_inline long time_syscall(long *t)
32282+{
32283+ long secs;
32284+ asm volatile("vsysc1: syscall"
32285+ : "=a" (secs)
32286+ : "0" (__NR_time),"D" (t) : __syscall_clobber);
32287+ return secs;
32288+}
32289+
32290+int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
32291+{
32292+ if (!__sysctl_vsyscall)
32293+ return gettimeofday(tv,tz);
32294+ if (tv)
32295+ do_vgettimeofday(tv);
32296+ if (tz)
32297+ do_get_tz(tz);
32298+ return 0;
32299+}
32300+
32301+/* This will break when the xtime seconds get inaccurate, but that is
32302+ * unlikely */
32303+time_t __vsyscall(1) vtime(time_t *t)
32304+{
32305+ if (!__sysctl_vsyscall)
32306+ return time_syscall(t);
32307+ else if (t)
32308+ *t = __xtime.tv_sec;
32309+ return __xtime.tv_sec;
32310+}
32311+
32312+long __vsyscall(2) venosys_0(void)
32313+{
32314+ return -ENOSYS;
32315+}
32316+
32317+long __vsyscall(3) venosys_1(void)
32318+{
32319+ return -ENOSYS;
32320+}
32321+
32322+#ifdef CONFIG_SYSCTL
32323+
32324+#define SYSCALL 0x050f
32325+#define NOP2 0x9090
32326+
32327+/*
32328+ * NOP out syscall in vsyscall page when not needed.
32329+ */
32330+static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
32331+ void __user *buffer, size_t *lenp, loff_t *ppos)
32332+{
32333+ extern u16 vsysc1, vsysc2;
32334+ u16 *map1, *map2;
32335+ int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
32336+ if (!write)
32337+ return ret;
32338+ /* gcc has some trouble with __va(__pa()), so just do it this
32339+ way. */
32340+ map1 = ioremap(__pa_symbol(&vsysc1), 2);
32341+ if (!map1)
32342+ return -ENOMEM;
32343+ map2 = ioremap(__pa_symbol(&vsysc2), 2);
32344+ if (!map2) {
32345+ ret = -ENOMEM;
32346+ goto out;
32347+ }
32348+ if (!sysctl_vsyscall) {
32349+ *map1 = SYSCALL;
32350+ *map2 = SYSCALL;
32351+ } else {
32352+ *map1 = NOP2;
32353+ *map2 = NOP2;
32354+ }
32355+ iounmap(map2);
32356+out:
32357+ iounmap(map1);
32358+ return ret;
32359+}
32360+
32361+static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
32362+ void __user *oldval, size_t __user *oldlenp,
32363+ void __user *newval, size_t newlen,
32364+ void **context)
32365+{
32366+ return -ENOSYS;
32367+}
32368+
32369+static ctl_table kernel_table2[] = {
32370+ { .ctl_name = 99, .procname = "vsyscall64",
32371+ .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
32372+ .strategy = vsyscall_sysctl_nostrat,
32373+ .proc_handler = vsyscall_sysctl_change },
32374+ { 0, }
32375+};
32376+
32377+static ctl_table kernel_root_table2[] = {
32378+ { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
32379+ .child = kernel_table2 },
32380+ { 0 },
32381+};
32382+
32383+#endif
32384+
32385+static void __init map_vsyscall(void)
32386+{
32387+ extern char __vsyscall_0;
32388+ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
32389+
32390+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
32391+}
32392+
32393+static int __init vsyscall_init(void)
32394+{
32395+ BUG_ON(((unsigned long) &vgettimeofday !=
32396+ VSYSCALL_ADDR(__NR_vgettimeofday)));
32397+ BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
32398+ BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
32399+ map_vsyscall();
32400+#ifdef CONFIG_XEN
32401+ sysctl_vsyscall = 0; /* disable vgettimeofay() */
32402+#endif
32403+#ifdef CONFIG_SYSCTL
32404+ register_sysctl_table(kernel_root_table2, 0);
32405+#endif
32406+ return 0;
32407+}
32408+
32409+__initcall(vsyscall_init);
32410Index: head-2008-11-25/arch/x86/kernel/xen_entry_64.S
32411===================================================================
32412--- /dev/null 1970-01-01 00:00:00.000000000 +0000
32413+++ head-2008-11-25/arch/x86/kernel/xen_entry_64.S 2008-04-02 12:34:02.000000000 +0200
32414@@ -0,0 +1,36 @@
32415+/*
32416+ * Copied from arch/xen/i386/kernel/entry.S
32417+ */
32418+/* Offsets into shared_info_t. */
32419+#define evtchn_upcall_pending /* 0 */
32420+#define evtchn_upcall_mask 1
32421+
32422+#define sizeof_vcpu_shift 6
32423+
32424+#ifdef CONFIG_SMP
32425+//#define preempt_disable(reg) incl threadinfo_preempt_count(reg)
32426+//#define preempt_enable(reg) decl threadinfo_preempt_count(reg)
32427+#define preempt_disable(reg)
32428+#define preempt_enable(reg)
32429+#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
32430+ movq %gs:pda_cpunumber,reg ; \
32431+ shl $32, reg ; \
32432+ shr $32-sizeof_vcpu_shift,reg ; \
32433+ addq HYPERVISOR_shared_info,reg
32434+#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%rbp) ; \
32435+#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
32436+#else
32437+#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
32438+#define XEN_PUT_VCPU_INFO(reg)
32439+#define XEN_PUT_VCPU_INFO_fixup
32440+#endif
32441+
32442+#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
32443+#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
32444+#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
32445+ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
32446+ XEN_PUT_VCPU_INFO(reg)
32447+#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
32448+ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
32449+ XEN_PUT_VCPU_INFO(reg)
32450+#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
32451Index: head-2008-11-25/arch/x86/mm/fault_64-xen.c
32452===================================================================
32453--- /dev/null 1970-01-01 00:00:00.000000000 +0000
32454+++ head-2008-11-25/arch/x86/mm/fault_64-xen.c 2007-11-02 17:34:23.000000000 +0100
32455@@ -0,0 +1,724 @@
32456+/*
32457+ * linux/arch/x86-64/mm/fault.c
32458+ *
32459+ * Copyright (C) 1995 Linus Torvalds
32460+ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
32461+ */
32462+
32463+#include <linux/signal.h>
32464+#include <linux/sched.h>
32465+#include <linux/kernel.h>
32466+#include <linux/errno.h>
32467+#include <linux/string.h>
32468+#include <linux/types.h>
32469+#include <linux/ptrace.h>
32470+#include <linux/mman.h>
32471+#include <linux/mm.h>
32472+#include <linux/smp.h>
32473+#include <linux/smp_lock.h>
32474+#include <linux/interrupt.h>
32475+#include <linux/init.h>
32476+#include <linux/tty.h>
32477+#include <linux/vt_kern.h> /* For unblank_screen() */
32478+#include <linux/compiler.h>
32479+#include <linux/module.h>
32480+#include <linux/kprobes.h>
32481+
32482+#include <asm/system.h>
32483+#include <asm/uaccess.h>
32484+#include <asm/pgalloc.h>
32485+#include <asm/smp.h>
32486+#include <asm/tlbflush.h>
32487+#include <asm/proto.h>
32488+#include <asm/kdebug.h>
32489+#include <asm-generic/sections.h>
32490+
32491+/* Page fault error code bits */
32492+#define PF_PROT (1<<0) /* or no page found */
32493+#define PF_WRITE (1<<1)
32494+#define PF_USER (1<<2)
32495+#define PF_RSVD (1<<3)
32496+#define PF_INSTR (1<<4)
32497+
32498+#ifdef CONFIG_KPROBES
32499+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
32500+
32501+/* Hook to register for page fault notifications */
32502+int register_page_fault_notifier(struct notifier_block *nb)
32503+{
32504+ vmalloc_sync_all();
32505+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
32506+}
32507+
32508+int unregister_page_fault_notifier(struct notifier_block *nb)
32509+{
32510+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
32511+}
32512+
32513+static inline int notify_page_fault(enum die_val val, const char *str,
32514+ struct pt_regs *regs, long err, int trap, int sig)
32515+{
32516+ struct die_args args = {
32517+ .regs = regs,
32518+ .str = str,
32519+ .err = err,
32520+ .trapnr = trap,
32521+ .signr = sig
32522+ };
32523+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
32524+}
32525+#else
32526+static inline int notify_page_fault(enum die_val val, const char *str,
32527+ struct pt_regs *regs, long err, int trap, int sig)
32528+{
32529+ return NOTIFY_DONE;
32530+}
32531+#endif
32532+
32533+void bust_spinlocks(int yes)
32534+{
32535+ int loglevel_save = console_loglevel;
32536+ if (yes) {
32537+ oops_in_progress = 1;
32538+ } else {
32539+#ifdef CONFIG_VT
32540+ unblank_screen();
32541+#endif
32542+ oops_in_progress = 0;
32543+ /*
32544+ * OK, the message is on the console. Now we call printk()
32545+ * without oops_in_progress set so that printk will give klogd
32546+ * a poke. Hold onto your hats...
32547+ */
32548+ console_loglevel = 15; /* NMI oopser may have shut the console up */
32549+ printk(" ");
32550+ console_loglevel = loglevel_save;
32551+ }
32552+}
32553+
32554+/* Sometimes the CPU reports invalid exceptions on prefetch.
32555+ Check that here and ignore.
32556+ Opcode checker based on code by Richard Brunner */
32557+static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
32558+ unsigned long error_code)
32559+{
32560+ unsigned char *instr;
32561+ int scan_more = 1;
32562+ int prefetch = 0;
32563+ unsigned char *max_instr;
32564+
32565+ /* If it was a exec fault ignore */
32566+ if (error_code & PF_INSTR)
32567+ return 0;
32568+
32569+ instr = (unsigned char *)convert_rip_to_linear(current, regs);
32570+ max_instr = instr + 15;
32571+
32572+ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
32573+ return 0;
32574+
32575+ while (scan_more && instr < max_instr) {
32576+ unsigned char opcode;
32577+ unsigned char instr_hi;
32578+ unsigned char instr_lo;
32579+
32580+ if (__get_user(opcode, instr))
32581+ break;
32582+
32583+ instr_hi = opcode & 0xf0;
32584+ instr_lo = opcode & 0x0f;
32585+ instr++;
32586+
32587+ switch (instr_hi) {
32588+ case 0x20:
32589+ case 0x30:
32590+ /* Values 0x26,0x2E,0x36,0x3E are valid x86
32591+ prefixes. In long mode, the CPU will signal
32592+ invalid opcode if some of these prefixes are
32593+ present so we will never get here anyway */
32594+ scan_more = ((instr_lo & 7) == 0x6);
32595+ break;
32596+
32597+ case 0x40:
32598+ /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
32599+ Need to figure out under what instruction mode the
32600+ instruction was issued ... */
32601+ /* Could check the LDT for lm, but for now it's good
32602+ enough to assume that long mode only uses well known
32603+ segments or kernel. */
32604+ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
32605+ break;
32606+
32607+ case 0x60:
32608+ /* 0x64 thru 0x67 are valid prefixes in all modes. */
32609+ scan_more = (instr_lo & 0xC) == 0x4;
32610+ break;
32611+ case 0xF0:
32612+ /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
32613+ scan_more = !instr_lo || (instr_lo>>1) == 1;
32614+ break;
32615+ case 0x00:
32616+ /* Prefetch instruction is 0x0F0D or 0x0F18 */
32617+ scan_more = 0;
32618+ if (__get_user(opcode, instr))
32619+ break;
32620+ prefetch = (instr_lo == 0xF) &&
32621+ (opcode == 0x0D || opcode == 0x18);
32622+ break;
32623+ default:
32624+ scan_more = 0;
32625+ break;
32626+ }
32627+ }
32628+ return prefetch;
32629+}
32630+
32631+static int bad_address(void *p)
32632+{
32633+ unsigned long dummy;
32634+ return __get_user(dummy, (unsigned long *)p);
32635+}
32636+
32637+void dump_pagetable(unsigned long address)
32638+{
32639+ pgd_t *pgd;
32640+ pud_t *pud;
32641+ pmd_t *pmd;
32642+ pte_t *pte;
32643+
32644+ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
32645+ pgd += pgd_index(address);
32646+ if (bad_address(pgd)) goto bad;
32647+ printk("PGD %lx ", pgd_val(*pgd));
32648+ if (!pgd_present(*pgd)) goto ret;
32649+
32650+ pud = pud_offset(pgd, address);
32651+ if (bad_address(pud)) goto bad;
32652+ printk("PUD %lx ", pud_val(*pud));
32653+ if (!pud_present(*pud)) goto ret;
32654+
32655+ pmd = pmd_offset(pud, address);
32656+ if (bad_address(pmd)) goto bad;
32657+ printk("PMD %lx ", pmd_val(*pmd));
32658+ if (!pmd_present(*pmd)) goto ret;
32659+
32660+ pte = pte_offset_kernel(pmd, address);
32661+ if (bad_address(pte)) goto bad;
32662+ printk("PTE %lx", pte_val(*pte));
32663+ret:
32664+ printk("\n");
32665+ return;
32666+bad:
32667+ printk("BAD\n");
32668+}
32669+
32670+static const char errata93_warning[] =
32671+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
32672+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
32673+KERN_ERR "******* Please consider a BIOS update.\n"
32674+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
32675+
32676+/* Workaround for K8 erratum #93 & buggy BIOS.
32677+ BIOS SMM functions are required to use a specific workaround
32678+ to avoid corruption of the 64bit RIP register on C stepping K8.
32679+ A lot of BIOS that didn't get tested properly miss this.
32680+ The OS sees this as a page fault with the upper 32bits of RIP cleared.
32681+ Try to work around it here.
32682+ Note we only handle faults in kernel here. */
32683+
32684+static int is_errata93(struct pt_regs *regs, unsigned long address)
32685+{
32686+ static int warned;
32687+ if (address != regs->rip)
32688+ return 0;
32689+ if ((address >> 32) != 0)
32690+ return 0;
32691+ address |= 0xffffffffUL << 32;
32692+ if ((address >= (u64)_stext && address <= (u64)_etext) ||
32693+ (address >= MODULES_VADDR && address <= MODULES_END)) {
32694+ if (!warned) {
32695+ printk(errata93_warning);
32696+ warned = 1;
32697+ }
32698+ regs->rip = address;
32699+ return 1;
32700+ }
32701+ return 0;
32702+}
32703+
32704+int unhandled_signal(struct task_struct *tsk, int sig)
32705+{
32706+ if (tsk->pid == 1)
32707+ return 1;
32708+ if (tsk->ptrace & PT_PTRACED)
32709+ return 0;
32710+ return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
32711+ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
32712+}
32713+
32714+static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
32715+ unsigned long error_code)
32716+{
32717+ unsigned long flags = oops_begin();
32718+ struct task_struct *tsk;
32719+
32720+ printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
32721+ current->comm, address);
32722+ dump_pagetable(address);
32723+ tsk = current;
32724+ tsk->thread.cr2 = address;
32725+ tsk->thread.trap_no = 14;
32726+ tsk->thread.error_code = error_code;
32727+ __die("Bad pagetable", regs, error_code);
32728+ oops_end(flags);
32729+ do_exit(SIGKILL);
32730+}
32731+
32732+/*
32733+ * Handle a fault on the vmalloc area
32734+ *
32735+ * This assumes no large pages in there.
32736+ */
32737+static int vmalloc_fault(unsigned long address)
32738+{
32739+ pgd_t *pgd, *pgd_ref;
32740+ pud_t *pud, *pud_ref;
32741+ pmd_t *pmd, *pmd_ref;
32742+ pte_t *pte, *pte_ref;
32743+
32744+ /* Copy kernel mappings over when needed. This can also
32745+ happen within a race in page table update. In the later
32746+ case just flush. */
32747+
32748+ /* On Xen the line below does not always work. Needs investigating! */
32749+ /*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
32750+ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
32751+ pgd += pgd_index(address);
32752+ pgd_ref = pgd_offset_k(address);
32753+ if (pgd_none(*pgd_ref))
32754+ return -1;
32755+ if (pgd_none(*pgd))
32756+ set_pgd(pgd, *pgd_ref);
32757+ else
32758+ BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
32759+
32760+ /* Below here mismatches are bugs because these lower tables
32761+ are shared */
32762+
32763+ pud = pud_offset(pgd, address);
32764+ pud_ref = pud_offset(pgd_ref, address);
32765+ if (pud_none(*pud_ref))
32766+ return -1;
32767+ if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
32768+ BUG();
32769+ pmd = pmd_offset(pud, address);
32770+ pmd_ref = pmd_offset(pud_ref, address);
32771+ if (pmd_none(*pmd_ref))
32772+ return -1;
32773+ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
32774+ BUG();
32775+ pte_ref = pte_offset_kernel(pmd_ref, address);
32776+ if (!pte_present(*pte_ref))
32777+ return -1;
32778+ pte = pte_offset_kernel(pmd, address);
32779+ /* Don't use pte_page here, because the mappings can point
32780+ outside mem_map, and the NUMA hash lookup cannot handle
32781+ that. */
32782+ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
32783+ BUG();
32784+ return 0;
32785+}
32786+
32787+int page_fault_trace = 0;
32788+int exception_trace = 1;
32789+
32790+
32791+#define MEM_VERBOSE 1
32792+
32793+#ifdef MEM_VERBOSE
32794+#define MEM_LOG(_f, _a...) \
32795+ printk("fault.c:[%d]-> " _f "\n", \
32796+ __LINE__ , ## _a )
32797+#else
32798+#define MEM_LOG(_f, _a...) ((void)0)
32799+#endif
32800+
32801+static int spurious_fault(struct pt_regs *regs,
32802+ unsigned long address,
32803+ unsigned long error_code)
32804+{
32805+ pgd_t *pgd;
32806+ pud_t *pud;
32807+ pmd_t *pmd;
32808+ pte_t *pte;
32809+
32810+#ifdef CONFIG_XEN
32811+ /* Faults in hypervisor area are never spurious. */
32812+ if ((address >= HYPERVISOR_VIRT_START) &&
32813+ (address < HYPERVISOR_VIRT_END))
32814+ return 0;
32815+#endif
32816+
32817+ /* Reserved-bit violation or user access to kernel space? */
32818+ if (error_code & (PF_RSVD|PF_USER))
32819+ return 0;
32820+
32821+ pgd = init_mm.pgd + pgd_index(address);
32822+ if (!pgd_present(*pgd))
32823+ return 0;
32824+
32825+ pud = pud_offset(pgd, address);
32826+ if (!pud_present(*pud))
32827+ return 0;
32828+
32829+ pmd = pmd_offset(pud, address);
32830+ if (!pmd_present(*pmd))
32831+ return 0;
32832+
32833+ pte = pte_offset_kernel(pmd, address);
32834+ if (!pte_present(*pte))
32835+ return 0;
32836+ if ((error_code & PF_WRITE) && !pte_write(*pte))
32837+ return 0;
32838+ if ((error_code & PF_INSTR) && (__pte_val(*pte) & _PAGE_NX))
32839+ return 0;
32840+
32841+ return 1;
32842+}
32843+
32844+/*
32845+ * This routine handles page faults. It determines the address,
32846+ * and the problem, and then passes it off to one of the appropriate
32847+ * routines.
32848+ */
32849+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
32850+ unsigned long error_code)
32851+{
32852+ struct task_struct *tsk;
32853+ struct mm_struct *mm;
32854+ struct vm_area_struct * vma;
32855+ unsigned long address;
32856+ const struct exception_table_entry *fixup;
32857+ int write;
32858+ unsigned long flags;
32859+ siginfo_t info;
32860+
32861+ if (!user_mode(regs))
32862+ error_code &= ~PF_USER; /* means kernel */
32863+
32864+ tsk = current;
32865+ mm = tsk->mm;
32866+ prefetchw(&mm->mmap_sem);
32867+
32868+ /* get the address */
32869+ address = current_vcpu_info()->arch.cr2;
32870+
32871+ info.si_code = SEGV_MAPERR;
32872+
32873+
32874+ /*
32875+ * We fault-in kernel-space virtual memory on-demand. The
32876+ * 'reference' page table is init_mm.pgd.
32877+ *
32878+ * NOTE! We MUST NOT take any locks for this case. We may
32879+ * be in an interrupt or a critical region, and should
32880+ * only copy the information from the master page table,
32881+ * nothing more.
32882+ *
32883+ * This verifies that the fault happens in kernel space
32884+ * (error_code & 4) == 0, and that the fault was not a
32885+ * protection error (error_code & 9) == 0.
32886+ */
32887+ if (unlikely(address >= TASK_SIZE64)) {
32888+ /*
32889+ * Don't check for the module range here: its PML4
32890+ * is always initialized because it's shared with the main
32891+ * kernel text. Only vmalloc may need PML4 syncups.
32892+ */
32893+ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
32894+ ((address >= VMALLOC_START && address < VMALLOC_END))) {
32895+ if (vmalloc_fault(address) >= 0)
32896+ return;
32897+ }
32898+ /* Can take a spurious fault if mapping changes R/O -> R/W. */
32899+ if (spurious_fault(regs, address, error_code))
32900+ return;
32901+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
32902+ SIGSEGV) == NOTIFY_STOP)
32903+ return;
32904+ /*
32905+ * Don't take the mm semaphore here. If we fixup a prefetch
32906+ * fault we could otherwise deadlock.
32907+ */
32908+ goto bad_area_nosemaphore;
32909+ }
32910+
32911+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
32912+ SIGSEGV) == NOTIFY_STOP)
32913+ return;
32914+
32915+ if (likely(regs->eflags & X86_EFLAGS_IF))
32916+ local_irq_enable();
32917+
32918+ if (unlikely(page_fault_trace))
32919+ printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
32920+ regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
32921+
32922+ if (unlikely(error_code & PF_RSVD))
32923+ pgtable_bad(address, regs, error_code);
32924+
32925+ /*
32926+ * If we're in an interrupt or have no user
32927+ * context, we must not take the fault..
32928+ */
32929+ if (unlikely(in_atomic() || !mm))
32930+ goto bad_area_nosemaphore;
32931+
32932+ again:
32933+ /* When running in the kernel we expect faults to occur only to
32934+ * addresses in user space. All other faults represent errors in the
32935+ * kernel and should generate an OOPS. Unfortunatly, in the case of an
32936+ * erroneous fault occurring in a code path which already holds mmap_sem
32937+ * we will deadlock attempting to validate the fault against the
32938+ * address space. Luckily the kernel only validly references user
32939+ * space from well defined areas of code, which are listed in the
32940+ * exceptions table.
32941+ *
32942+ * As the vast majority of faults will be valid we will only perform
32943+ * the source reference check when there is a possibilty of a deadlock.
32944+ * Attempt to lock the address space, if we cannot we then validate the
32945+ * source. If this is invalid we can skip the address space check,
32946+ * thus avoiding the deadlock.
32947+ */
32948+ if (!down_read_trylock(&mm->mmap_sem)) {
32949+ if ((error_code & PF_USER) == 0 &&
32950+ !search_exception_tables(regs->rip))
32951+ goto bad_area_nosemaphore;
32952+ down_read(&mm->mmap_sem);
32953+ }
32954+
32955+ vma = find_vma(mm, address);
32956+ if (!vma)
32957+ goto bad_area;
32958+ if (likely(vma->vm_start <= address))
32959+ goto good_area;
32960+ if (!(vma->vm_flags & VM_GROWSDOWN))
32961+ goto bad_area;
32962+ if (error_code & 4) {
32963+ /* Allow userspace just enough access below the stack pointer
32964+ * to let the 'enter' instruction work.
32965+ */
32966+ if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
32967+ goto bad_area;
32968+ }
32969+ if (expand_stack(vma, address))
32970+ goto bad_area;
32971+/*
32972+ * Ok, we have a good vm_area for this memory access, so
32973+ * we can handle it..
32974+ */
32975+good_area:
32976+ info.si_code = SEGV_ACCERR;
32977+ write = 0;
32978+ switch (error_code & (PF_PROT|PF_WRITE)) {
32979+ default: /* 3: write, present */
32980+ /* fall through */
32981+ case PF_WRITE: /* write, not present */
32982+ if (!(vma->vm_flags & VM_WRITE))
32983+ goto bad_area;
32984+ write++;
32985+ break;
32986+ case PF_PROT: /* read, present */
32987+ goto bad_area;
32988+ case 0: /* read, not present */
32989+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
32990+ goto bad_area;
32991+ }
32992+
32993+ /*
32994+ * If for any reason at all we couldn't handle the fault,
32995+ * make sure we exit gracefully rather than endlessly redo
32996+ * the fault.
32997+ */
32998+ switch (handle_mm_fault(mm, vma, address, write)) {
32999+ case VM_FAULT_MINOR:
33000+ tsk->min_flt++;
33001+ break;
33002+ case VM_FAULT_MAJOR:
33003+ tsk->maj_flt++;
33004+ break;
33005+ case VM_FAULT_SIGBUS:
33006+ goto do_sigbus;
33007+ default:
33008+ goto out_of_memory;
33009+ }
33010+
33011+ up_read(&mm->mmap_sem);
33012+ return;
33013+
33014+/*
33015+ * Something tried to access memory that isn't in our memory map..
33016+ * Fix it, but check if it's kernel or user first..
33017+ */
33018+bad_area:
33019+ up_read(&mm->mmap_sem);
33020+
33021+bad_area_nosemaphore:
33022+ /* User mode accesses just cause a SIGSEGV */
33023+ if (error_code & PF_USER) {
33024+ if (is_prefetch(regs, address, error_code))
33025+ return;
33026+
33027+ /* Work around K8 erratum #100 K8 in compat mode
33028+ occasionally jumps to illegal addresses >4GB. We
33029+ catch this here in the page fault handler because
33030+ these addresses are not reachable. Just detect this
33031+ case and return. Any code segment in LDT is
33032+ compatibility mode. */
33033+ if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
33034+ (address >> 32))
33035+ return;
33036+
33037+ if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
33038+ printk(
33039+ "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
33040+ tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
33041+ tsk->comm, tsk->pid, address, regs->rip,
33042+ regs->rsp, error_code);
33043+ }
33044+
33045+ tsk->thread.cr2 = address;
33046+ /* Kernel addresses are always protection faults */
33047+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
33048+ tsk->thread.trap_no = 14;
33049+ info.si_signo = SIGSEGV;
33050+ info.si_errno = 0;
33051+ /* info.si_code has been set above */
33052+ info.si_addr = (void __user *)address;
33053+ force_sig_info(SIGSEGV, &info, tsk);
33054+ return;
33055+ }
33056+
33057+no_context:
33058+
33059+ /* Are we prepared to handle this kernel fault? */
33060+ fixup = search_exception_tables(regs->rip);
33061+ if (fixup) {
33062+ regs->rip = fixup->fixup;
33063+ return;
33064+ }
33065+
33066+ /*
33067+ * Hall of shame of CPU/BIOS bugs.
33068+ */
33069+
33070+ if (is_prefetch(regs, address, error_code))
33071+ return;
33072+
33073+ if (is_errata93(regs, address))
33074+ return;
33075+
33076+/*
33077+ * Oops. The kernel tried to access some bad page. We'll have to
33078+ * terminate things with extreme prejudice.
33079+ */
33080+
33081+ flags = oops_begin();
33082+
33083+ if (address < PAGE_SIZE)
33084+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
33085+ else
33086+ printk(KERN_ALERT "Unable to handle kernel paging request");
33087+ printk(" at %016lx RIP: \n" KERN_ALERT,address);
33088+ printk_address(regs->rip);
33089+ dump_pagetable(address);
33090+ tsk->thread.cr2 = address;
33091+ tsk->thread.trap_no = 14;
33092+ tsk->thread.error_code = error_code;
33093+ __die("Oops", regs, error_code);
33094+ /* Executive summary in case the body of the oops scrolled away */
33095+ printk(KERN_EMERG "CR2: %016lx\n", address);
33096+ oops_end(flags);
33097+ do_exit(SIGKILL);
33098+
33099+/*
33100+ * We ran out of memory, or some other thing happened to us that made
33101+ * us unable to handle the page fault gracefully.
33102+ */
33103+out_of_memory:
33104+ up_read(&mm->mmap_sem);
33105+ if (current->pid == 1) {
33106+ yield();
33107+ goto again;
33108+ }
33109+ printk("VM: killing process %s\n", tsk->comm);
33110+ if (error_code & 4)
33111+ do_exit(SIGKILL);
33112+ goto no_context;
33113+
33114+do_sigbus:
33115+ up_read(&mm->mmap_sem);
33116+
33117+ /* Kernel mode? Handle exceptions or die */
33118+ if (!(error_code & PF_USER))
33119+ goto no_context;
33120+
33121+ tsk->thread.cr2 = address;
33122+ tsk->thread.error_code = error_code;
33123+ tsk->thread.trap_no = 14;
33124+ info.si_signo = SIGBUS;
33125+ info.si_errno = 0;
33126+ info.si_code = BUS_ADRERR;
33127+ info.si_addr = (void __user *)address;
33128+ force_sig_info(SIGBUS, &info, tsk);
33129+ return;
33130+}
33131+
33132+DEFINE_SPINLOCK(pgd_lock);
33133+struct page *pgd_list;
33134+
33135+void vmalloc_sync_all(void)
33136+{
33137+ /* Note that races in the updates of insync and start aren't
33138+ problematic:
33139+ insync can only get set bits added, and updates to start are only
33140+ improving performance (without affecting correctness if undone). */
33141+ static DECLARE_BITMAP(insync, PTRS_PER_PGD);
33142+ static unsigned long start = VMALLOC_START & PGDIR_MASK;
33143+ unsigned long address;
33144+
33145+ for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
33146+ if (!test_bit(pgd_index(address), insync)) {
33147+ const pgd_t *pgd_ref = pgd_offset_k(address);
33148+ struct page *page;
33149+
33150+ if (pgd_none(*pgd_ref))
33151+ continue;
33152+ spin_lock(&pgd_lock);
33153+ for (page = pgd_list; page;
33154+ page = (struct page *)page->index) {
33155+ pgd_t *pgd;
33156+ pgd = (pgd_t *)page_address(page) + pgd_index(address);
33157+ if (pgd_none(*pgd))
33158+ set_pgd(pgd, *pgd_ref);
33159+ else
33160+ BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
33161+ }
33162+ spin_unlock(&pgd_lock);
33163+ set_bit(pgd_index(address), insync);
33164+ }
33165+ if (address == start)
33166+ start = address + PGDIR_SIZE;
33167+ }
33168+ /* Check that there is no need to do the same for the modules area. */
33169+ BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
33170+ BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
33171+ (__START_KERNEL & PGDIR_MASK)));
33172+}
33173+
33174+static int __init enable_pagefaulttrace(char *str)
33175+{
33176+ page_fault_trace = 1;
33177+ return 1;
33178+}
33179+__setup("pagefaulttrace", enable_pagefaulttrace);
33180Index: head-2008-11-25/arch/x86/mm/init_64-xen.c
33181===================================================================
33182--- /dev/null 1970-01-01 00:00:00.000000000 +0000
33183+++ head-2008-11-25/arch/x86/mm/init_64-xen.c 2008-10-29 09:55:56.000000000 +0100
33184@@ -0,0 +1,1206 @@
33185+/*
33186+ * linux/arch/x86_64/mm/init.c
33187+ *
33188+ * Copyright (C) 1995 Linus Torvalds
33189+ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
33190+ * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
33191+ *
33192+ * Jun Nakajima <jun.nakajima@intel.com>
33193+ * Modified for Xen.
33194+ */
33195+
33196+#include <linux/signal.h>
33197+#include <linux/sched.h>
33198+#include <linux/kernel.h>
33199+#include <linux/errno.h>
33200+#include <linux/string.h>
33201+#include <linux/types.h>
33202+#include <linux/ptrace.h>
33203+#include <linux/mman.h>
33204+#include <linux/mm.h>
33205+#include <linux/swap.h>
33206+#include <linux/smp.h>
33207+#include <linux/init.h>
33208+#include <linux/pagemap.h>
33209+#include <linux/bootmem.h>
33210+#include <linux/proc_fs.h>
33211+#include <linux/pci.h>
33212+#include <linux/poison.h>
33213+#include <linux/dma-mapping.h>
33214+#include <linux/module.h>
33215+#include <linux/memory_hotplug.h>
33216+
33217+#include <asm/processor.h>
33218+#include <asm/system.h>
33219+#include <asm/uaccess.h>
33220+#include <asm/pgtable.h>
33221+#include <asm/pgalloc.h>
33222+#include <asm/dma.h>
33223+#include <asm/fixmap.h>
33224+#include <asm/e820.h>
33225+#include <asm/apic.h>
33226+#include <asm/tlb.h>
33227+#include <asm/mmu_context.h>
33228+#include <asm/proto.h>
33229+#include <asm/smp.h>
33230+#include <asm/sections.h>
33231+
33232+#include <xen/features.h>
33233+
33234+#ifndef Dprintk
33235+#define Dprintk(x...)
33236+#endif
33237+
33238+struct dma_mapping_ops* dma_ops;
33239+EXPORT_SYMBOL(dma_ops);
33240+
33241+#if CONFIG_XEN_COMPAT <= 0x030002
33242+unsigned int __kernel_page_user;
33243+EXPORT_SYMBOL(__kernel_page_user);
33244+#endif
33245+
33246+int after_bootmem;
33247+
33248+static unsigned long dma_reserve __initdata;
33249+
33250+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
33251+extern unsigned long start_pfn;
33252+
33253+/*
33254+ * Use this until direct mapping is established, i.e. before __va() is
33255+ * available in init_memory_mapping().
33256+ */
33257+
33258+#define addr_to_page(addr, page) \
33259+ (addr) &= PHYSICAL_PAGE_MASK; \
33260+ (page) = ((unsigned long *) ((unsigned long) \
33261+ (((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + \
33262+ __START_KERNEL_map)))
33263+
33264+static void __meminit early_make_page_readonly(void *va, unsigned int feature)
33265+{
33266+ unsigned long addr, _va = (unsigned long)va;
33267+ pte_t pte, *ptep;
33268+ unsigned long *page = (unsigned long *) init_level4_pgt;
33269+
33270+ BUG_ON(after_bootmem);
33271+
33272+ if (xen_feature(feature))
33273+ return;
33274+
33275+ addr = (unsigned long) page[pgd_index(_va)];
33276+ addr_to_page(addr, page);
33277+
33278+ addr = page[pud_index(_va)];
33279+ addr_to_page(addr, page);
33280+
33281+ addr = page[pmd_index(_va)];
33282+ addr_to_page(addr, page);
33283+
33284+ ptep = (pte_t *) &page[pte_index(_va)];
33285+
33286+ pte.pte = ptep->pte & ~_PAGE_RW;
33287+ if (HYPERVISOR_update_va_mapping(_va, pte, 0))
33288+ BUG();
33289+}
33290+
33291+static void __make_page_readonly(void *va)
33292+{
33293+ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
33294+ unsigned long addr = (unsigned long) va;
33295+
33296+ pgd = pgd_offset_k(addr);
33297+ pud = pud_offset(pgd, addr);
33298+ pmd = pmd_offset(pud, addr);
33299+ ptep = pte_offset_kernel(pmd, addr);
33300+
33301+ pte.pte = ptep->pte & ~_PAGE_RW;
33302+ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
33303+ xen_l1_entry_update(ptep, pte); /* fallback */
33304+
33305+ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
33306+ __make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT));
33307+}
33308+
33309+static void __make_page_writable(void *va)
33310+{
33311+ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
33312+ unsigned long addr = (unsigned long) va;
33313+
33314+ pgd = pgd_offset_k(addr);
33315+ pud = pud_offset(pgd, addr);
33316+ pmd = pmd_offset(pud, addr);
33317+ ptep = pte_offset_kernel(pmd, addr);
33318+
33319+ pte.pte = ptep->pte | _PAGE_RW;
33320+ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
33321+ xen_l1_entry_update(ptep, pte); /* fallback */
33322+
33323+ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
33324+ __make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT));
33325+}
33326+
33327+void make_page_readonly(void *va, unsigned int feature)
33328+{
33329+ if (!xen_feature(feature))
33330+ __make_page_readonly(va);
33331+}
33332+
33333+void make_page_writable(void *va, unsigned int feature)
33334+{
33335+ if (!xen_feature(feature))
33336+ __make_page_writable(va);
33337+}
33338+
33339+void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
33340+{
33341+ if (xen_feature(feature))
33342+ return;
33343+
33344+ while (nr-- != 0) {
33345+ __make_page_readonly(va);
33346+ va = (void*)((unsigned long)va + PAGE_SIZE);
33347+ }
33348+}
33349+
33350+void make_pages_writable(void *va, unsigned nr, unsigned int feature)
33351+{
33352+ if (xen_feature(feature))
33353+ return;
33354+
33355+ while (nr-- != 0) {
33356+ __make_page_writable(va);
33357+ va = (void*)((unsigned long)va + PAGE_SIZE);
33358+ }
33359+}
33360+
33361+/*
33362+ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
33363+ * physical space so we can cache the place of the first one and move
33364+ * around without checking the pgd every time.
33365+ */
33366+
33367+void show_mem(void)
33368+{
33369+ long i, total = 0, reserved = 0;
33370+ long shared = 0, cached = 0;
33371+ pg_data_t *pgdat;
33372+ struct page *page;
33373+
33374+ printk(KERN_INFO "Mem-info:\n");
33375+ show_free_areas();
33376+ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
33377+
33378+ for_each_online_pgdat(pgdat) {
33379+ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
33380+ page = pfn_to_page(pgdat->node_start_pfn + i);
33381+ total++;
33382+ if (PageReserved(page))
33383+ reserved++;
33384+ else if (PageSwapCache(page))
33385+ cached++;
33386+ else if (page_count(page))
33387+ shared += page_count(page) - 1;
33388+ }
33389+ }
33390+ printk(KERN_INFO "%lu pages of RAM\n", total);
33391+ printk(KERN_INFO "%lu reserved pages\n",reserved);
33392+ printk(KERN_INFO "%lu pages shared\n",shared);
33393+ printk(KERN_INFO "%lu pages swap cached\n",cached);
33394+}
33395+
33396+
33397+static __init void *spp_getpage(void)
33398+{
33399+ void *ptr;
33400+ if (after_bootmem)
33401+ ptr = (void *) get_zeroed_page(GFP_ATOMIC);
33402+ else if (start_pfn < table_end) {
33403+ ptr = __va(start_pfn << PAGE_SHIFT);
33404+ start_pfn++;
33405+ memset(ptr, 0, PAGE_SIZE);
33406+ } else
33407+ ptr = alloc_bootmem_pages(PAGE_SIZE);
33408+ if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
33409+ panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
33410+
33411+ Dprintk("spp_getpage %p\n", ptr);
33412+ return ptr;
33413+}
33414+
33415+#define pgd_offset_u(address) (__user_pgd(init_level4_pgt) + pgd_index(address))
33416+#define pud_offset_u(address) (level3_user_pgt + pud_index(address))
33417+
33418+static __init void set_pte_phys(unsigned long vaddr,
33419+ unsigned long phys, pgprot_t prot, int user_mode)
33420+{
33421+ pgd_t *pgd;
33422+ pud_t *pud;
33423+ pmd_t *pmd;
33424+ pte_t *pte, new_pte;
33425+
33426+ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
33427+
33428+ pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
33429+ if (pgd_none(*pgd)) {
33430+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
33431+ return;
33432+ }
33433+ pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
33434+ if (pud_none(*pud)) {
33435+ pmd = (pmd_t *) spp_getpage();
33436+ make_page_readonly(pmd, XENFEAT_writable_page_tables);
33437+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
33438+ if (pmd != pmd_offset(pud, 0)) {
33439+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
33440+ return;
33441+ }
33442+ }
33443+ pmd = pmd_offset(pud, vaddr);
33444+ if (pmd_none(*pmd)) {
33445+ pte = (pte_t *) spp_getpage();
33446+ make_page_readonly(pte, XENFEAT_writable_page_tables);
33447+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
33448+ if (pte != pte_offset_kernel(pmd, 0)) {
33449+ printk("PAGETABLE BUG #02!\n");
33450+ return;
33451+ }
33452+ }
33453+ if (pgprot_val(prot))
33454+ new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
33455+ else
33456+ new_pte = __pte(0);
33457+
33458+ pte = pte_offset_kernel(pmd, vaddr);
33459+ if (!pte_none(*pte) && __pte_val(new_pte) &&
33460+ __pte_val(*pte) != (__pte_val(new_pte) & __supported_pte_mask))
33461+ pte_ERROR(*pte);
33462+ set_pte(pte, new_pte);
33463+
33464+ /*
33465+ * It's enough to flush this one mapping.
33466+ * (PGE mappings get flushed as well)
33467+ */
33468+ __flush_tlb_one(vaddr);
33469+}
33470+
33471+static __init void set_pte_phys_ma(unsigned long vaddr,
33472+ unsigned long phys, pgprot_t prot)
33473+{
33474+ pgd_t *pgd;
33475+ pud_t *pud;
33476+ pmd_t *pmd;
33477+ pte_t *pte, new_pte;
33478+
33479+ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
33480+
33481+ pgd = pgd_offset_k(vaddr);
33482+ if (pgd_none(*pgd)) {
33483+ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
33484+ return;
33485+ }
33486+ pud = pud_offset(pgd, vaddr);
33487+ if (pud_none(*pud)) {
33488+
33489+ pmd = (pmd_t *) spp_getpage();
33490+ make_page_readonly(pmd, XENFEAT_writable_page_tables);
33491+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
33492+ if (pmd != pmd_offset(pud, 0)) {
33493+ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
33494+ return;
33495+ }
33496+ }
33497+ pmd = pmd_offset(pud, vaddr);
33498+ if (pmd_none(*pmd)) {
33499+ pte = (pte_t *) spp_getpage();
33500+ make_page_readonly(pte, XENFEAT_writable_page_tables);
33501+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
33502+ if (pte != pte_offset_kernel(pmd, 0)) {
33503+ printk("PAGETABLE BUG #02!\n");
33504+ return;
33505+ }
33506+ }
33507+ new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
33508+
33509+ pte = pte_offset_kernel(pmd, vaddr);
33510+ if (!pte_none(*pte) && __pte_val(new_pte) &&
33511+#ifdef CONFIG_ACPI
33512+ /* __acpi_map_table() fails to properly call clear_fixmap() */
33513+ (vaddr < __fix_to_virt(FIX_ACPI_END) ||
33514+ vaddr > __fix_to_virt(FIX_ACPI_BEGIN)) &&
33515+#endif
33516+ __pte_val(*pte) != (__pte_val(new_pte) & __supported_pte_mask))
33517+ pte_ERROR(*pte);
33518+ set_pte(pte, new_pte);
33519+
33520+ /*
33521+ * It's enough to flush this one mapping.
33522+ * (PGE mappings get flushed as well)
33523+ */
33524+ __flush_tlb_one(vaddr);
33525+}
33526+
33527+/* NOTE: this is meant to be run only at boot */
33528+void __init
33529+__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
33530+{
33531+ unsigned long address = __fix_to_virt(idx);
33532+
33533+ if (idx >= __end_of_fixed_addresses) {
33534+ printk("Invalid __set_fixmap\n");
33535+ return;
33536+ }
33537+ switch (idx) {
33538+ case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
33539+ set_pte_phys(address, phys, prot, 0);
33540+ set_pte_phys(address, phys, prot, 1);
33541+ break;
33542+ default:
33543+ set_pte_phys_ma(address, phys, prot);
33544+ break;
33545+ }
33546+}
33547+
33548+unsigned long __initdata table_start, table_end;
33549+
33550+static __meminit void *alloc_static_page(unsigned long *phys)
33551+{
33552+ unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
33553+
33554+ if (after_bootmem) {
33555+ void *adr = (void *)get_zeroed_page(GFP_ATOMIC);
33556+
33557+ *phys = __pa(adr);
33558+ return adr;
33559+ }
33560+
33561+ *phys = start_pfn << PAGE_SHIFT;
33562+ start_pfn++;
33563+ memset((void *)va, 0, PAGE_SIZE);
33564+ return (void *)va;
33565+}
33566+
33567+#define PTE_SIZE PAGE_SIZE
33568+
33569+static inline int make_readonly(unsigned long paddr)
33570+{
33571+ extern char __vsyscall_0;
33572+ int readonly = 0;
33573+
33574+ /* Make new page tables read-only. */
33575+ if (!xen_feature(XENFEAT_writable_page_tables)
33576+ && (paddr >= (table_start << PAGE_SHIFT))
33577+ && (paddr < (table_end << PAGE_SHIFT)))
33578+ readonly = 1;
33579+ /* Make old page tables read-only. */
33580+ if (!xen_feature(XENFEAT_writable_page_tables)
33581+ && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
33582+ && (paddr < (start_pfn << PAGE_SHIFT)))
33583+ readonly = 1;
33584+
33585+ /*
33586+ * No need for writable mapping of kernel image. This also ensures that
33587+ * page and descriptor tables embedded inside don't have writable
33588+ * mappings. Exclude the vsyscall area here, allowing alternative
33589+ * instruction patching to work.
33590+ */
33591+ if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))
33592+ && !(paddr >= __pa_symbol(&__vsyscall_0)
33593+ && paddr < __pa_symbol(&__vsyscall_0) + PAGE_SIZE))
33594+ readonly = 1;
33595+
33596+ return readonly;
33597+}
33598+
33599+#ifndef CONFIG_XEN
33600+/* Must run before zap_low_mappings */
33601+__init void *early_ioremap(unsigned long addr, unsigned long size)
33602+{
33603+ unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
33604+
33605+ /* actually usually some more */
33606+ if (size >= LARGE_PAGE_SIZE) {
33607+ printk("SMBIOS area too long %lu\n", size);
33608+ return NULL;
33609+ }
33610+ set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
33611+ map += LARGE_PAGE_SIZE;
33612+ set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
33613+ __flush_tlb();
33614+ return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
33615+}
33616+
33617+/* To avoid virtual aliases later */
33618+__init void early_iounmap(void *addr, unsigned long size)
33619+{
33620+ if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
33621+ printk("early_iounmap: bad address %p\n", addr);
33622+ set_pmd(temp_mappings[0].pmd, __pmd(0));
33623+ set_pmd(temp_mappings[1].pmd, __pmd(0));
33624+ __flush_tlb();
33625+}
33626+#endif
33627+
33628+static void __meminit
33629+phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
33630+{
33631+ int i, k;
33632+
33633+ for (i = 0; i < PTRS_PER_PMD; pmd++, i++) {
33634+ unsigned long pte_phys;
33635+ pte_t *pte, *pte_save;
33636+
33637+ if (address >= end)
33638+ break;
33639+ pte = alloc_static_page(&pte_phys);
33640+ pte_save = pte;
33641+ for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
33642+ unsigned long pteval = address | _PAGE_NX | _KERNPG_TABLE;
33643+
33644+ if (address >= (after_bootmem
33645+ ? end
33646+ : xen_start_info->nr_pages << PAGE_SHIFT))
33647+ pteval = 0;
33648+ else if (make_readonly(address))
33649+ pteval &= ~_PAGE_RW;
33650+ set_pte(pte, __pte(pteval & __supported_pte_mask));
33651+ }
33652+ if (!after_bootmem) {
33653+ early_make_page_readonly(pte_save, XENFEAT_writable_page_tables);
33654+ *pmd = __pmd(pte_phys | _KERNPG_TABLE);
33655+ } else {
33656+ make_page_readonly(pte_save, XENFEAT_writable_page_tables);
33657+ set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
33658+ }
33659+ }
33660+}
33661+
33662+static void __meminit
33663+phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
33664+{
33665+ pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
33666+
33667+ if (pmd_none(*pmd)) {
33668+ spin_lock(&init_mm.page_table_lock);
33669+ phys_pmd_init(pmd, address, end);
33670+ spin_unlock(&init_mm.page_table_lock);
33671+ __flush_tlb_all();
33672+ }
33673+}
33674+
33675+static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
33676+{
33677+ long i = pud_index(address);
33678+
33679+ pud = pud + i;
33680+
33681+ if (after_bootmem && pud_val(*pud)) {
33682+ phys_pmd_update(pud, address, end);
33683+ return;
33684+ }
33685+
33686+ for (; i < PTRS_PER_PUD; pud++, i++) {
33687+ unsigned long paddr, pmd_phys;
33688+ pmd_t *pmd;
33689+
33690+ paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
33691+ if (paddr >= end)
33692+ break;
33693+
33694+ pmd = alloc_static_page(&pmd_phys);
33695+
33696+ spin_lock(&init_mm.page_table_lock);
33697+ *pud = __pud(pmd_phys | _KERNPG_TABLE);
33698+ phys_pmd_init(pmd, paddr, end);
33699+ spin_unlock(&init_mm.page_table_lock);
33700+
33701+ early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
33702+ }
33703+ __flush_tlb();
33704+}
33705+
33706+void __init xen_init_pt(void)
33707+{
33708+ unsigned long addr, *page;
33709+
33710+ /* Find the initial pte page that was built for us. */
33711+ page = (unsigned long *)xen_start_info->pt_base;
33712+ addr = page[pgd_index(__START_KERNEL_map)];
33713+ addr_to_page(addr, page);
33714+ addr = page[pud_index(__START_KERNEL_map)];
33715+ addr_to_page(addr, page);
33716+
33717+#if CONFIG_XEN_COMPAT <= 0x030002
33718+ /* On Xen 3.0.2 and older we may need to explicitly specify _PAGE_USER
33719+ in kernel PTEs. We check that here. */
33720+ if (HYPERVISOR_xen_version(XENVER_version, NULL) <= 0x30000) {
33721+ unsigned long *pg;
33722+ pte_t pte;
33723+
33724+ /* Mess with the initial mapping of page 0. It's not needed. */
33725+ BUILD_BUG_ON(__START_KERNEL <= __START_KERNEL_map);
33726+ addr = page[pmd_index(__START_KERNEL_map)];
33727+ addr_to_page(addr, pg);
33728+ pte.pte = pg[pte_index(__START_KERNEL_map)];
33729+ BUG_ON(!(pte.pte & _PAGE_PRESENT));
33730+
33731+ /* If _PAGE_USER isn't set, we obviously do not need it. */
33732+ if (pte.pte & _PAGE_USER) {
33733+ /* _PAGE_USER is needed, but is it set implicitly? */
33734+ pte.pte &= ~_PAGE_USER;
33735+ if ((HYPERVISOR_update_va_mapping(__START_KERNEL_map,
33736+ pte, 0) != 0) ||
33737+ !(pg[pte_index(__START_KERNEL_map)] & _PAGE_USER))
33738+ /* We need to explicitly specify _PAGE_USER. */
33739+ __kernel_page_user = _PAGE_USER;
33740+ }
33741+ }
33742+#endif
33743+
33744+ /* Construct mapping of initial pte page in our own directories. */
33745+ init_level4_pgt[pgd_index(__START_KERNEL_map)] =
33746+ __pgd(__pa_symbol(level3_kernel_pgt) | _PAGE_TABLE);
33747+ level3_kernel_pgt[pud_index(__START_KERNEL_map)] =
33748+ __pud(__pa_symbol(level2_kernel_pgt) | _PAGE_TABLE);
33749+ memcpy(level2_kernel_pgt, page, PAGE_SIZE);
33750+
33751+ __user_pgd(init_level4_pgt)[pgd_index(VSYSCALL_START)] =
33752+ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
33753+
33754+ early_make_page_readonly(init_level4_pgt,
33755+ XENFEAT_writable_page_tables);
33756+ early_make_page_readonly(__user_pgd(init_level4_pgt),
33757+ XENFEAT_writable_page_tables);
33758+ early_make_page_readonly(level3_kernel_pgt,
33759+ XENFEAT_writable_page_tables);
33760+ early_make_page_readonly(level3_user_pgt,
33761+ XENFEAT_writable_page_tables);
33762+ early_make_page_readonly(level2_kernel_pgt,
33763+ XENFEAT_writable_page_tables);
33764+
33765+ if (!xen_feature(XENFEAT_writable_page_tables)) {
33766+ xen_pgd_pin(__pa_symbol(init_level4_pgt));
33767+ xen_pgd_pin(__pa_symbol(__user_pgd(init_level4_pgt)));
33768+ }
33769+}
33770+
33771+static void __init extend_init_mapping(unsigned long tables_space)
33772+{
33773+ unsigned long va = __START_KERNEL_map;
33774+ unsigned long phys, addr, *pte_page;
33775+ pmd_t *pmd;
33776+ pte_t *pte, new_pte;
33777+ unsigned long *page = (unsigned long *)init_level4_pgt;
33778+
33779+ addr = page[pgd_index(va)];
33780+ addr_to_page(addr, page);
33781+ addr = page[pud_index(va)];
33782+ addr_to_page(addr, page);
33783+
33784+ /* Kill mapping of low 1MB. */
33785+ while (va < (unsigned long)&_text) {
33786+ if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
33787+ BUG();
33788+ va += PAGE_SIZE;
33789+ }
33790+
33791+ /* Ensure init mappings cover kernel text/data and initial tables. */
33792+ while (va < (__START_KERNEL_map
33793+ + (start_pfn << PAGE_SHIFT)
33794+ + tables_space)) {
33795+ pmd = (pmd_t *)&page[pmd_index(va)];
33796+ if (pmd_none(*pmd)) {
33797+ pte_page = alloc_static_page(&phys);
33798+ early_make_page_readonly(
33799+ pte_page, XENFEAT_writable_page_tables);
33800+ set_pmd(pmd, __pmd(phys | _KERNPG_TABLE));
33801+ } else {
33802+ addr = page[pmd_index(va)];
33803+ addr_to_page(addr, pte_page);
33804+ }
33805+ pte = (pte_t *)&pte_page[pte_index(va)];
33806+ if (pte_none(*pte)) {
33807+ new_pte = pfn_pte(
33808+ (va - __START_KERNEL_map) >> PAGE_SHIFT,
33809+ __pgprot(_KERNPG_TABLE));
33810+ xen_l1_entry_update(pte, new_pte);
33811+ }
33812+ va += PAGE_SIZE;
33813+ }
33814+
33815+ /* Finally, blow away any spurious initial mappings. */
33816+ while (1) {
33817+ pmd = (pmd_t *)&page[pmd_index(va)];
33818+ if (pmd_none(*pmd))
33819+ break;
33820+ if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
33821+ BUG();
33822+ va += PAGE_SIZE;
33823+ }
33824+}
33825+
33826+static void __init find_early_table_space(unsigned long end)
33827+{
33828+ unsigned long puds, pmds, ptes, tables;
33829+
33830+ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
33831+ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
33832+ ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
33833+
33834+ tables = round_up(puds * 8, PAGE_SIZE) +
33835+ round_up(pmds * 8, PAGE_SIZE) +
33836+ round_up(ptes * 8, PAGE_SIZE);
33837+
33838+ extend_init_mapping(tables);
33839+
33840+ table_start = start_pfn;
33841+ table_end = table_start + (tables>>PAGE_SHIFT);
33842+
33843+ early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
33844+ end, table_start << PAGE_SHIFT,
33845+ (table_start << PAGE_SHIFT) + tables);
33846+}
33847+
33848+static void xen_finish_init_mapping(void)
33849+{
33850+ unsigned long i, start, end;
33851+
33852+ /* Re-vector virtual addresses pointing into the initial
33853+ mapping to the just-established permanent ones. */
33854+ xen_start_info = __va(__pa(xen_start_info));
33855+ xen_start_info->pt_base = (unsigned long)
33856+ __va(__pa(xen_start_info->pt_base));
33857+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
33858+ phys_to_machine_mapping =
33859+ __va(__pa(xen_start_info->mfn_list));
33860+ xen_start_info->mfn_list = (unsigned long)
33861+ phys_to_machine_mapping;
33862+ }
33863+ if (xen_start_info->mod_start)
33864+ xen_start_info->mod_start = (unsigned long)
33865+ __va(__pa(xen_start_info->mod_start));
33866+
33867+ /* Destroy the Xen-created mappings beyond the kernel image as
33868+ * well as the temporary mappings created above. Prevents
33869+ * overlap with modules area (if init mapping is very big).
33870+ */
33871+ start = PAGE_ALIGN((unsigned long)_end);
33872+ end = __START_KERNEL_map + (table_end << PAGE_SHIFT);
33873+ for (; start < end; start += PAGE_SIZE)
33874+ if (HYPERVISOR_update_va_mapping(start, __pte_ma(0), 0))
33875+ BUG();
33876+
33877+ /* Allocate pte's for initial fixmaps from 'start_pfn' allocator. */
33878+ table_end = ~0UL;
33879+
33880+ /*
33881+ * Prefetch pte's for the bt_ioremap() area. It gets used before the
33882+ * boot-time allocator is online, so allocate-on-demand would fail.
33883+ */
33884+ for (i = FIX_BTMAP_END; i <= FIX_BTMAP_BEGIN; i++)
33885+ __set_fixmap(i, 0, __pgprot(0));
33886+
33887+ /* Switch to the real shared_info page, and clear the dummy page. */
33888+ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
33889+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
33890+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
33891+
33892+ /* Set up mapping of lowest 1MB of physical memory. */
33893+ for (i = 0; i < NR_FIX_ISAMAPS; i++)
33894+ if (is_initial_xendomain())
33895+ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
33896+ else
33897+ __set_fixmap(FIX_ISAMAP_BEGIN - i,
33898+ virt_to_mfn(empty_zero_page)
33899+ << PAGE_SHIFT,
33900+ PAGE_KERNEL_RO);
33901+
33902+ /* Disable the 'start_pfn' allocator. */
33903+ table_end = start_pfn;
33904+}
33905+
33906+/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
33907+ This runs before bootmem is initialized and gets pages directly from the
33908+ physical memory. To access them they are temporarily mapped. */
33909+void __meminit init_memory_mapping(unsigned long start, unsigned long end)
33910+{
33911+ unsigned long next;
33912+
33913+ Dprintk("init_memory_mapping\n");
33914+
33915+ /*
33916+ * Find space for the kernel direct mapping tables.
33917+ * Later we should allocate these tables in the local node of the memory
33918+ * mapped. Unfortunately this is done currently before the nodes are
33919+ * discovered.
33920+ */
33921+ if (!after_bootmem)
33922+ find_early_table_space(end);
33923+
33924+ start = (unsigned long)__va(start);
33925+ end = (unsigned long)__va(end);
33926+
33927+ for (; start < end; start = next) {
33928+ unsigned long pud_phys;
33929+ pgd_t *pgd = pgd_offset_k(start);
33930+ pud_t *pud;
33931+
33932+ if (after_bootmem)
33933+ pud = pud_offset(pgd, start & PGDIR_MASK);
33934+ else
33935+ pud = alloc_static_page(&pud_phys);
33936+ next = start + PGDIR_SIZE;
33937+ if (next > end)
33938+ next = end;
33939+ phys_pud_init(pud, __pa(start), __pa(next));
33940+ if (!after_bootmem) {
33941+ early_make_page_readonly(pud, XENFEAT_writable_page_tables);
33942+ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
33943+ }
33944+ }
33945+
33946+ if (!after_bootmem) {
33947+ BUG_ON(start_pfn != table_end);
33948+ xen_finish_init_mapping();
33949+ }
33950+
33951+ __flush_tlb_all();
33952+}
33953+
33954+void __cpuinit zap_low_mappings(int cpu)
33955+{
33956+ /* this is not required for Xen */
33957+#if 0
33958+ swap_low_mappings();
33959+#endif
33960+}
33961+
33962+/* Compute zone sizes for the DMA and DMA32 zones in a node. */
33963+__init void
33964+size_zones(unsigned long *z, unsigned long *h,
33965+ unsigned long start_pfn, unsigned long end_pfn)
33966+{
33967+ int i;
33968+ unsigned long w;
33969+
33970+ for (i = 0; i < MAX_NR_ZONES; i++)
33971+ z[i] = 0;
33972+
33973+ if (start_pfn < MAX_DMA_PFN)
33974+ z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
33975+ if (start_pfn < MAX_DMA32_PFN) {
33976+ unsigned long dma32_pfn = MAX_DMA32_PFN;
33977+ if (dma32_pfn > end_pfn)
33978+ dma32_pfn = end_pfn;
33979+ z[ZONE_DMA32] = dma32_pfn - start_pfn;
33980+ }
33981+ z[ZONE_NORMAL] = end_pfn - start_pfn;
33982+
33983+ /* Remove lower zones from higher ones. */
33984+ w = 0;
33985+ for (i = 0; i < MAX_NR_ZONES; i++) {
33986+ if (z[i])
33987+ z[i] -= w;
33988+ w += z[i];
33989+ }
33990+
33991+ /* Compute holes */
33992+ w = start_pfn;
33993+ for (i = 0; i < MAX_NR_ZONES; i++) {
33994+ unsigned long s = w;
33995+ w += z[i];
33996+ h[i] = e820_hole_size(s, w);
33997+ }
33998+
33999+ /* Add the space pace needed for mem_map to the holes too. */
34000+ for (i = 0; i < MAX_NR_ZONES; i++)
34001+ h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
34002+
34003+ /* The 16MB DMA zone has the kernel and other misc mappings.
34004+ Account them too */
34005+ if (h[ZONE_DMA]) {
34006+ h[ZONE_DMA] += dma_reserve;
34007+ if (h[ZONE_DMA] >= z[ZONE_DMA]) {
34008+ printk(KERN_WARNING
34009+ "Kernel too large and filling up ZONE_DMA?\n");
34010+ h[ZONE_DMA] = z[ZONE_DMA];
34011+ }
34012+ }
34013+}
34014+
34015+#ifndef CONFIG_NUMA
34016+void __init paging_init(void)
34017+{
34018+ unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
34019+
34020+ memory_present(0, 0, end_pfn);
34021+ sparse_init();
34022+ size_zones(zones, holes, 0, end_pfn);
34023+ free_area_init_node(0, NODE_DATA(0), zones,
34024+ __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
34025+
34026+ init_mm.context.pinned = 1;
34027+}
34028+#endif
34029+
34030+/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
34031+ from the CPU leading to inconsistent cache lines. address and size
34032+ must be aligned to 2MB boundaries.
34033+ Does nothing when the mapping doesn't exist. */
34034+void __init clear_kernel_mapping(unsigned long address, unsigned long size)
34035+{
34036+ unsigned long end = address + size;
34037+
34038+ BUG_ON(address & ~LARGE_PAGE_MASK);
34039+ BUG_ON(size & ~LARGE_PAGE_MASK);
34040+
34041+ for (; address < end; address += LARGE_PAGE_SIZE) {
34042+ pgd_t *pgd = pgd_offset_k(address);
34043+ pud_t *pud;
34044+ pmd_t *pmd;
34045+ if (pgd_none(*pgd))
34046+ continue;
34047+ pud = pud_offset(pgd, address);
34048+ if (pud_none(*pud))
34049+ continue;
34050+ pmd = pmd_offset(pud, address);
34051+ if (!pmd || pmd_none(*pmd))
34052+ continue;
34053+ if (0 == (__pmd_val(*pmd) & _PAGE_PSE)) {
34054+ /* Could handle this, but it should not happen currently. */
34055+ printk(KERN_ERR
34056+ "clear_kernel_mapping: mapping has been split. will leak memory\n");
34057+ pmd_ERROR(*pmd);
34058+ }
34059+ set_pmd(pmd, __pmd(0));
34060+ }
34061+ __flush_tlb_all();
34062+}
34063+
34064+/*
34065+ * Memory hotplug specific functions
34066+ */
34067+void online_page(struct page *page)
34068+{
34069+ ClearPageReserved(page);
34070+ init_page_count(page);
34071+ __free_page(page);
34072+ totalram_pages++;
34073+ num_physpages++;
34074+}
34075+
34076+#ifdef CONFIG_MEMORY_HOTPLUG
34077+/*
34078+ * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
34079+ * via probe interface of sysfs. If acpi notifies hot-add event, then it
34080+ * can tell node id by searching dsdt. But, probe interface doesn't have
34081+ * node id. So, return 0 as node id at this time.
34082+ */
34083+#ifdef CONFIG_NUMA
34084+int memory_add_physaddr_to_nid(u64 start)
34085+{
34086+ return 0;
34087+}
34088+#endif
34089+
34090+/*
34091+ * Memory is added always to NORMAL zone. This means you will never get
34092+ * additional DMA/DMA32 memory.
34093+ */
34094+int arch_add_memory(int nid, u64 start, u64 size)
34095+{
34096+ struct pglist_data *pgdat = NODE_DATA(nid);
34097+ struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
34098+ unsigned long start_pfn = start >> PAGE_SHIFT;
34099+ unsigned long nr_pages = size >> PAGE_SHIFT;
34100+ int ret;
34101+
34102+ ret = __add_pages(zone, start_pfn, nr_pages);
34103+ if (ret)
34104+ goto error;
34105+
34106+ init_memory_mapping(start, (start + size -1));
34107+
34108+ return ret;
34109+error:
34110+ printk("%s: Problem encountered in __add_pages!\n", __func__);
34111+ return ret;
34112+}
34113+EXPORT_SYMBOL_GPL(arch_add_memory);
34114+
34115+int remove_memory(u64 start, u64 size)
34116+{
34117+ return -EINVAL;
34118+}
34119+EXPORT_SYMBOL_GPL(remove_memory);
34120+
34121+#else /* CONFIG_MEMORY_HOTPLUG */
34122+/*
34123+ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
34124+ * just online the pages.
34125+ */
34126+int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
34127+{
34128+ int err = -EIO;
34129+ unsigned long pfn;
34130+ unsigned long total = 0, mem = 0;
34131+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
34132+ if (pfn_valid(pfn)) {
34133+ online_page(pfn_to_page(pfn));
34134+ err = 0;
34135+ mem++;
34136+ }
34137+ total++;
34138+ }
34139+ if (!err) {
34140+ z->spanned_pages += total;
34141+ z->present_pages += mem;
34142+ z->zone_pgdat->node_spanned_pages += total;
34143+ z->zone_pgdat->node_present_pages += mem;
34144+ }
34145+ return err;
34146+}
34147+#endif /* CONFIG_MEMORY_HOTPLUG */
34148+
34149+static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
34150+ kcore_vsyscall;
34151+
34152+void __init mem_init(void)
34153+{
34154+ long codesize, reservedpages, datasize, initsize;
34155+ unsigned long pfn;
34156+
34157+ pci_iommu_alloc();
34158+
34159+ /* How many end-of-memory variables you have, grandma! */
34160+ max_low_pfn = end_pfn;
34161+ max_pfn = end_pfn;
34162+ num_physpages = end_pfn;
34163+ high_memory = (void *) __va(end_pfn * PAGE_SIZE);
34164+
34165+ /* clear the zero-page */
34166+ memset(empty_zero_page, 0, PAGE_SIZE);
34167+
34168+ reservedpages = 0;
34169+
34170+ /* this will put all low memory onto the freelists */
34171+#ifdef CONFIG_NUMA
34172+ totalram_pages = numa_free_all_bootmem();
34173+#else
34174+ totalram_pages = free_all_bootmem();
34175+#endif
34176+ /* XEN: init and count pages outside initial allocation. */
34177+ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
34178+ ClearPageReserved(pfn_to_page(pfn));
34179+ init_page_count(pfn_to_page(pfn));
34180+ totalram_pages++;
34181+ }
34182+ reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
34183+
34184+ after_bootmem = 1;
34185+
34186+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
34187+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
34188+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
34189+
34190+ /* Register memory areas for /proc/kcore */
34191+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
34192+ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
34193+ VMALLOC_END-VMALLOC_START);
34194+ kclist_add(&kcore_kernel, &_stext, _end - _stext);
34195+ kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
34196+ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
34197+ VSYSCALL_END - VSYSCALL_START);
34198+
34199+ printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
34200+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
34201+ end_pfn << (PAGE_SHIFT-10),
34202+ codesize >> 10,
34203+ reservedpages << (PAGE_SHIFT-10),
34204+ datasize >> 10,
34205+ initsize >> 10);
34206+
34207+#ifndef CONFIG_XEN
34208+#ifdef CONFIG_SMP
34209+ /*
34210+ * Sync boot_level4_pgt mappings with the init_level4_pgt
34211+ * except for the low identity mappings which are already zapped
34212+ * in init_level4_pgt. This sync-up is essential for AP's bringup
34213+ */
34214+ memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
34215+#endif
34216+#endif
34217+}
34218+
34219+void free_init_pages(char *what, unsigned long begin, unsigned long end)
34220+{
34221+ unsigned long addr;
34222+
34223+ if (begin >= end)
34224+ return;
34225+
34226+ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
34227+ for (addr = begin; addr < end; addr += PAGE_SIZE) {
34228+ ClearPageReserved(virt_to_page(addr));
34229+ init_page_count(virt_to_page(addr));
34230+ memset((void *)(addr & ~(PAGE_SIZE-1)),
34231+ POISON_FREE_INITMEM, PAGE_SIZE);
34232+ if (addr >= __START_KERNEL_map) {
34233+ /* make_readonly() reports all kernel addresses. */
34234+ __make_page_writable(__va(__pa(addr)));
34235+ if (HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
34236+ pgd_t *pgd = pgd_offset_k(addr);
34237+ pud_t *pud = pud_offset(pgd, addr);
34238+ pmd_t *pmd = pmd_offset(pud, addr);
34239+ pte_t *pte = pte_offset_kernel(pmd, addr);
34240+
34241+ xen_l1_entry_update(pte, __pte(0)); /* fallback */
34242+ }
34243+ }
34244+ free_page(addr);
34245+ totalram_pages++;
34246+ }
34247+}
34248+
34249+void free_initmem(void)
34250+{
34251+ memset(__initdata_begin, POISON_FREE_INITDATA,
34252+ __initdata_end - __initdata_begin);
34253+ free_init_pages("unused kernel memory",
34254+ (unsigned long)(&__init_begin),
34255+ (unsigned long)(&__init_end));
34256+}
34257+
34258+#ifdef CONFIG_DEBUG_RODATA
34259+
34260+void mark_rodata_ro(void)
34261+{
34262+ unsigned long addr = (unsigned long)__start_rodata;
34263+
34264+ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
34265+ change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
34266+
34267+ printk ("Write protecting the kernel read-only data: %luk\n",
34268+ (__end_rodata - __start_rodata) >> 10);
34269+
34270+ /*
34271+ * change_page_attr_addr() requires a global_flush_tlb() call after it.
34272+ * We do this after the printk so that if something went wrong in the
34273+ * change, the printk gets out at least to give a better debug hint
34274+ * of who is the culprit.
34275+ */
34276+ global_flush_tlb();
34277+}
34278+#endif
34279+
34280+#ifdef CONFIG_BLK_DEV_INITRD
34281+void free_initrd_mem(unsigned long start, unsigned long end)
34282+{
34283+ free_init_pages("initrd memory", start, end);
34284+}
34285+#endif
34286+
34287+void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
34288+{
34289+ /* Should check here against the e820 map to avoid double free */
34290+#ifdef CONFIG_NUMA
34291+ int nid = phys_to_nid(phys);
34292+ reserve_bootmem_node(NODE_DATA(nid), phys, len);
34293+#else
34294+ reserve_bootmem(phys, len);
34295+#endif
34296+ if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
34297+ dma_reserve += len / PAGE_SIZE;
34298+}
34299+
34300+int kern_addr_valid(unsigned long addr)
34301+{
34302+ unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
34303+ pgd_t *pgd;
34304+ pud_t *pud;
34305+ pmd_t *pmd;
34306+ pte_t *pte;
34307+
34308+ if (above != 0 && above != -1UL)
34309+ return 0;
34310+
34311+ pgd = pgd_offset_k(addr);
34312+ if (pgd_none(*pgd))
34313+ return 0;
34314+
34315+ pud = pud_offset(pgd, addr);
34316+ if (pud_none(*pud))
34317+ return 0;
34318+
34319+ pmd = pmd_offset(pud, addr);
34320+ if (pmd_none(*pmd))
34321+ return 0;
34322+ if (pmd_large(*pmd))
34323+ return pfn_valid(pmd_pfn(*pmd));
34324+
34325+ pte = pte_offset_kernel(pmd, addr);
34326+ if (pte_none(*pte))
34327+ return 0;
34328+ return pfn_valid(pte_pfn(*pte));
34329+}
34330+
34331+#ifdef CONFIG_SYSCTL
34332+#include <linux/sysctl.h>
34333+
34334+extern int exception_trace, page_fault_trace;
34335+
34336+static ctl_table debug_table2[] = {
34337+ { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
34338+ proc_dointvec },
34339+ { 0, }
34340+};
34341+
34342+static ctl_table debug_root_table2[] = {
34343+ { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
34344+ .child = debug_table2 },
34345+ { 0 },
34346+};
34347+
34348+static __init int x8664_sysctl_init(void)
34349+{
34350+ register_sysctl_table(debug_root_table2, 1);
34351+ return 0;
34352+}
34353+__initcall(x8664_sysctl_init);
34354+#endif
34355+
34356+/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
34357+ covers the 64bit vsyscall page now. 32bit has a real VMA now and does
34358+ not need special handling anymore. */
34359+
34360+static struct vm_area_struct gate_vma = {
34361+ .vm_start = VSYSCALL_START,
34362+ .vm_end = VSYSCALL_END,
34363+ .vm_page_prot = PAGE_READONLY
34364+};
34365+
34366+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
34367+{
34368+#ifdef CONFIG_IA32_EMULATION
34369+ if (test_tsk_thread_flag(tsk, TIF_IA32))
34370+ return NULL;
34371+#endif
34372+ return &gate_vma;
34373+}
34374+
34375+int in_gate_area(struct task_struct *task, unsigned long addr)
34376+{
34377+ struct vm_area_struct *vma = get_gate_vma(task);
34378+ if (!vma)
34379+ return 0;
34380+ return (addr >= vma->vm_start) && (addr < vma->vm_end);
34381+}
34382+
34383+/* Use this when you have no reliable task/vma, typically from interrupt
34384+ * context. It is less reliable than using the task's vma and may give
34385+ * false positives.
34386+ */
34387+int in_gate_area_no_task(unsigned long addr)
34388+{
34389+ return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
34390+}
34391Index: head-2008-11-25/arch/x86/mm/pageattr_64-xen.c
34392===================================================================
34393--- /dev/null 1970-01-01 00:00:00.000000000 +0000
34394+++ head-2008-11-25/arch/x86/mm/pageattr_64-xen.c 2008-07-21 11:00:32.000000000 +0200
34395@@ -0,0 +1,502 @@
34396+/*
34397+ * Copyright 2002 Andi Kleen, SuSE Labs.
34398+ * Thanks to Ben LaHaise for precious feedback.
34399+ */
34400+
34401+#include <linux/mm.h>
34402+#include <linux/sched.h>
34403+#include <linux/highmem.h>
34404+#include <linux/module.h>
34405+#include <linux/slab.h>
34406+#include <asm/uaccess.h>
34407+#include <asm/processor.h>
34408+#include <asm/tlbflush.h>
34409+#include <asm/io.h>
34410+
34411+#ifdef CONFIG_XEN
34412+#include <asm/pgalloc.h>
34413+#include <asm/mmu_context.h>
34414+
34415+LIST_HEAD(mm_unpinned);
34416+DEFINE_SPINLOCK(mm_unpinned_lock);
34417+
34418+static void _pin_lock(struct mm_struct *mm, int lock) {
34419+ if (lock)
34420+ spin_lock(&mm->page_table_lock);
34421+#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
34422+ /* While mm->page_table_lock protects us against insertions and
34423+ * removals of higher level page table pages, it doesn't protect
34424+ * against updates of pte-s. Such updates, however, require the
34425+ * pte pages to be in consistent state (unpinned+writable or
34426+ * pinned+readonly). The pinning and attribute changes, however
34427+ * cannot be done atomically, which is why such updates must be
34428+ * prevented from happening concurrently.
34429+ * Note that no pte lock can ever elsewhere be acquired nesting
34430+ * with an already acquired one in the same mm, or with the mm's
34431+ * page_table_lock already acquired, as that would break in the
34432+ * non-split case (where all these are actually resolving to the
34433+ * one page_table_lock). Thus acquiring all of them here is not
34434+ * going to result in dead locks, and the order of acquires
34435+ * doesn't matter.
34436+ */
34437+ {
34438+ pgd_t *pgd = mm->pgd;
34439+ unsigned g;
34440+
34441+ for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
34442+ pud_t *pud;
34443+ unsigned u;
34444+
34445+ if (pgd_none(*pgd))
34446+ continue;
34447+ pud = pud_offset(pgd, 0);
34448+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
34449+ pmd_t *pmd;
34450+ unsigned m;
34451+
34452+ if (pud_none(*pud))
34453+ continue;
34454+ pmd = pmd_offset(pud, 0);
34455+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
34456+ spinlock_t *ptl;
34457+
34458+ if (pmd_none(*pmd))
34459+ continue;
34460+ ptl = pte_lockptr(0, pmd);
34461+ if (lock)
34462+ spin_lock(ptl);
34463+ else
34464+ spin_unlock(ptl);
34465+ }
34466+ }
34467+ }
34468+ }
34469+#endif
34470+ if (!lock)
34471+ spin_unlock(&mm->page_table_lock);
34472+}
34473+#define pin_lock(mm) _pin_lock(mm, 1)
34474+#define pin_unlock(mm) _pin_lock(mm, 0)
34475+
34476+#define PIN_BATCH 8
34477+static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
34478+
34479+static inline unsigned int mm_walk_set_prot(void *pt, pgprot_t flags,
34480+ unsigned int cpu, unsigned int seq)
34481+{
34482+ struct page *page = virt_to_page(pt);
34483+ unsigned long pfn = page_to_pfn(page);
34484+
34485+ MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
34486+ (unsigned long)__va(pfn << PAGE_SHIFT),
34487+ pfn_pte(pfn, flags), 0);
34488+ if (unlikely(++seq == PIN_BATCH)) {
34489+ if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
34490+ PIN_BATCH, NULL)))
34491+ BUG();
34492+ seq = 0;
34493+ }
34494+
34495+ return seq;
34496+}
34497+
34498+static void mm_walk(struct mm_struct *mm, pgprot_t flags)
34499+{
34500+ pgd_t *pgd;
34501+ pud_t *pud;
34502+ pmd_t *pmd;
34503+ pte_t *pte;
34504+ int g,u,m;
34505+ unsigned int cpu, seq;
34506+ multicall_entry_t *mcl;
34507+
34508+ pgd = mm->pgd;
34509+ cpu = get_cpu();
34510+
34511+ /*
34512+ * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
34513+ * be the 'current' task's pagetables (e.g., current may be 32-bit,
34514+ * but the pagetables may be for a 64-bit task).
34515+ * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
34516+ * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
34517+ */
34518+ for (g = 0, seq = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
34519+ if (pgd_none(*pgd))
34520+ continue;
34521+ pud = pud_offset(pgd, 0);
34522+ if (PTRS_PER_PUD > 1) /* not folded */
34523+ seq = mm_walk_set_prot(pud,flags,cpu,seq);
34524+ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
34525+ if (pud_none(*pud))
34526+ continue;
34527+ pmd = pmd_offset(pud, 0);
34528+ if (PTRS_PER_PMD > 1) /* not folded */
34529+ seq = mm_walk_set_prot(pmd,flags,cpu,seq);
34530+ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
34531+ if (pmd_none(*pmd))
34532+ continue;
34533+ pte = pte_offset_kernel(pmd,0);
34534+ seq = mm_walk_set_prot(pte,flags,cpu,seq);
34535+ }
34536+ }
34537+ }
34538+
34539+ mcl = per_cpu(pb_mcl, cpu);
34540+ if (unlikely(seq > PIN_BATCH - 2)) {
34541+ if (unlikely(HYPERVISOR_multicall_check(mcl, seq, NULL)))
34542+ BUG();
34543+ seq = 0;
34544+ }
34545+ MULTI_update_va_mapping(mcl + seq,
34546+ (unsigned long)__user_pgd(mm->pgd),
34547+ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, flags),
34548+ 0);
34549+ MULTI_update_va_mapping(mcl + seq + 1,
34550+ (unsigned long)mm->pgd,
34551+ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, flags),
34552+ UVMF_TLB_FLUSH);
34553+ if (unlikely(HYPERVISOR_multicall_check(mcl, seq + 2, NULL)))
34554+ BUG();
34555+
34556+ put_cpu();
34557+}
34558+
34559+void mm_pin(struct mm_struct *mm)
34560+{
34561+ if (xen_feature(XENFEAT_writable_page_tables))
34562+ return;
34563+
34564+ pin_lock(mm);
34565+
34566+ mm_walk(mm, PAGE_KERNEL_RO);
34567+ xen_pgd_pin(__pa(mm->pgd)); /* kernel */
34568+ xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
34569+ mm->context.pinned = 1;
34570+ spin_lock(&mm_unpinned_lock);
34571+ list_del(&mm->context.unpinned);
34572+ spin_unlock(&mm_unpinned_lock);
34573+
34574+ pin_unlock(mm);
34575+}
34576+
34577+void mm_unpin(struct mm_struct *mm)
34578+{
34579+ if (xen_feature(XENFEAT_writable_page_tables))
34580+ return;
34581+
34582+ pin_lock(mm);
34583+
34584+ xen_pgd_unpin(__pa(mm->pgd));
34585+ xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
34586+ mm_walk(mm, PAGE_KERNEL);
34587+ mm->context.pinned = 0;
34588+ spin_lock(&mm_unpinned_lock);
34589+ list_add(&mm->context.unpinned, &mm_unpinned);
34590+ spin_unlock(&mm_unpinned_lock);
34591+
34592+ pin_unlock(mm);
34593+}
34594+
34595+void mm_pin_all(void)
34596+{
34597+ if (xen_feature(XENFEAT_writable_page_tables))
34598+ return;
34599+
34600+ /*
34601+ * Allow uninterrupted access to the mm_unpinned list. We don't
34602+ * actually take the mm_unpinned_lock as it is taken inside mm_pin().
34603+ * All other CPUs must be at a safe point (e.g., in stop_machine
34604+ * or offlined entirely).
34605+ */
34606+ preempt_disable();
34607+ while (!list_empty(&mm_unpinned))
34608+ mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
34609+ context.unpinned));
34610+ preempt_enable();
34611+}
34612+
34613+void _arch_dup_mmap(struct mm_struct *mm)
34614+{
34615+ if (!mm->context.pinned)
34616+ mm_pin(mm);
34617+}
34618+
34619+void _arch_exit_mmap(struct mm_struct *mm)
34620+{
34621+ struct task_struct *tsk = current;
34622+
34623+ task_lock(tsk);
34624+
34625+ /*
34626+ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
34627+ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
34628+ */
34629+ if (tsk->active_mm == mm) {
34630+ tsk->active_mm = &init_mm;
34631+ atomic_inc(&init_mm.mm_count);
34632+
34633+ switch_mm(mm, &init_mm, tsk);
34634+
34635+ atomic_dec(&mm->mm_count);
34636+ BUG_ON(atomic_read(&mm->mm_count) == 0);
34637+ }
34638+
34639+ task_unlock(tsk);
34640+
34641+ if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
34642+ !mm->context.has_foreign_mappings )
34643+ mm_unpin(mm);
34644+}
34645+
34646+struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
34647+{
34648+ struct page *pte;
34649+
34650+ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
34651+ if (pte) {
34652+ SetPageForeign(pte, pte_free);
34653+ init_page_count(pte);
34654+ }
34655+ return pte;
34656+}
34657+
34658+void pte_free(struct page *pte)
34659+{
34660+ unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
34661+
34662+ if (!pte_write(*virt_to_ptep(va)))
34663+ if (HYPERVISOR_update_va_mapping(
34664+ va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0))
34665+ BUG();
34666+
34667+ ClearPageForeign(pte);
34668+ init_page_count(pte);
34669+
34670+ __free_page(pte);
34671+}
34672+#endif /* CONFIG_XEN */
34673+
34674+pte_t *lookup_address(unsigned long address)
34675+{
34676+ pgd_t *pgd = pgd_offset_k(address);
34677+ pud_t *pud;
34678+ pmd_t *pmd;
34679+ pte_t *pte;
34680+ if (pgd_none(*pgd))
34681+ return NULL;
34682+ pud = pud_offset(pgd, address);
34683+ if (!pud_present(*pud))
34684+ return NULL;
34685+ pmd = pmd_offset(pud, address);
34686+ if (!pmd_present(*pmd))
34687+ return NULL;
34688+ if (pmd_large(*pmd))
34689+ return (pte_t *)pmd;
34690+ pte = pte_offset_kernel(pmd, address);
34691+ if (pte && !pte_present(*pte))
34692+ pte = NULL;
34693+ return pte;
34694+}
34695+
34696+static struct page *split_large_page(unsigned long address, pgprot_t prot,
34697+ pgprot_t ref_prot)
34698+{
34699+ int i;
34700+ unsigned long addr;
34701+ struct page *base = alloc_pages(GFP_KERNEL, 0);
34702+ pte_t *pbase;
34703+ if (!base)
34704+ return NULL;
34705+ /*
34706+ * page_private is used to track the number of entries in
34707+ * the page table page have non standard attributes.
34708+ */
34709+ SetPagePrivate(base);
34710+ page_private(base) = 0;
34711+
34712+ address = __pa(address);
34713+ addr = address & LARGE_PAGE_MASK;
34714+ pbase = (pte_t *)page_address(base);
34715+ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
34716+ pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
34717+ addr == address ? prot : ref_prot);
34718+ }
34719+ return base;
34720+}
34721+
34722+
34723+static void flush_kernel_map(void *address)
34724+{
34725+ if (0 && address && cpu_has_clflush) {
34726+ /* is this worth it? */
34727+ int i;
34728+ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
34729+ asm volatile("clflush (%0)" :: "r" (address + i));
34730+ } else
34731+ asm volatile("wbinvd":::"memory");
34732+ if (address)
34733+ __flush_tlb_one(address);
34734+ else
34735+ __flush_tlb_all();
34736+}
34737+
34738+
34739+static inline void flush_map(unsigned long address)
34740+{
34741+ on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
34742+}
34743+
34744+static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
34745+
34746+static inline void save_page(struct page *fpage)
34747+{
34748+ fpage->lru.next = (struct list_head *)deferred_pages;
34749+ deferred_pages = fpage;
34750+}
34751+
34752+/*
34753+ * No more special protections in this 2/4MB area - revert to a
34754+ * large page again.
34755+ */
34756+static void revert_page(unsigned long address, pgprot_t ref_prot)
34757+{
34758+ pgd_t *pgd;
34759+ pud_t *pud;
34760+ pmd_t *pmd;
34761+ pte_t large_pte;
34762+
34763+ pgd = pgd_offset_k(address);
34764+ BUG_ON(pgd_none(*pgd));
34765+ pud = pud_offset(pgd,address);
34766+ BUG_ON(pud_none(*pud));
34767+ pmd = pmd_offset(pud, address);
34768+ BUG_ON(__pmd_val(*pmd) & _PAGE_PSE);
34769+ pgprot_val(ref_prot) |= _PAGE_PSE;
34770+ large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
34771+ set_pte((pte_t *)pmd, large_pte);
34772+}
34773+
34774+static int
34775+__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
34776+ pgprot_t ref_prot)
34777+{
34778+ pte_t *kpte;
34779+ struct page *kpte_page;
34780+ unsigned kpte_flags;
34781+ pgprot_t ref_prot2;
34782+ kpte = lookup_address(address);
34783+ if (!kpte) return 0;
34784+ kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
34785+ kpte_flags = pte_val(*kpte);
34786+ if (pgprot_val(prot) != pgprot_val(ref_prot)) {
34787+ if ((kpte_flags & _PAGE_PSE) == 0) {
34788+ set_pte(kpte, pfn_pte(pfn, prot));
34789+ } else {
34790+ /*
34791+ * split_large_page will take the reference for this
34792+ * change_page_attr on the split page.
34793+ */
34794+
34795+ struct page *split;
34796+ ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
34797+
34798+ split = split_large_page(address, prot, ref_prot2);
34799+ if (!split)
34800+ return -ENOMEM;
34801+ set_pte(kpte,mk_pte(split, ref_prot2));
34802+ kpte_page = split;
34803+ }
34804+ page_private(kpte_page)++;
34805+ } else if ((kpte_flags & _PAGE_PSE) == 0) {
34806+ set_pte(kpte, pfn_pte(pfn, ref_prot));
34807+ BUG_ON(page_private(kpte_page) == 0);
34808+ page_private(kpte_page)--;
34809+ } else
34810+ BUG();
34811+
34812+ /* on x86-64 the direct mapping set at boot is not using 4k pages */
34813+ /*
34814+ * ..., but the XEN guest kernels (currently) do:
34815+ * If the pte was reserved, it means it was created at boot
34816+ * time (not via split_large_page) and in turn we must not
34817+ * replace it with a large page.
34818+ */
34819+#ifndef CONFIG_XEN
34820+ BUG_ON(PageReserved(kpte_page));
34821+#else
34822+ if (PageReserved(kpte_page))
34823+ return 0;
34824+#endif
34825+
34826+ if (page_private(kpte_page) == 0) {
34827+ save_page(kpte_page);
34828+ revert_page(address, ref_prot);
34829+ }
34830+ return 0;
34831+}
34832+
34833+/*
34834+ * Change the page attributes of an page in the linear mapping.
34835+ *
34836+ * This should be used when a page is mapped with a different caching policy
34837+ * than write-back somewhere - some CPUs do not like it when mappings with
34838+ * different caching policies exist. This changes the page attributes of the
34839+ * in kernel linear mapping too.
34840+ *
34841+ * The caller needs to ensure that there are no conflicting mappings elsewhere.
34842+ * This function only deals with the kernel linear map.
34843+ *
34844+ * Caller must call global_flush_tlb() after this.
34845+ */
34846+int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
34847+{
34848+ int err = 0;
34849+ int i;
34850+
34851+ down_write(&init_mm.mmap_sem);
34852+ for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
34853+ unsigned long pfn = __pa(address) >> PAGE_SHIFT;
34854+
34855+ err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
34856+ if (err)
34857+ break;
34858+ /* Handle kernel mapping too which aliases part of the
34859+ * lowmem */
34860+ if (__pa(address) < KERNEL_TEXT_SIZE) {
34861+ unsigned long addr2;
34862+ pgprot_t prot2 = prot;
34863+ addr2 = __START_KERNEL_map + __pa(address);
34864+ pgprot_val(prot2) &= ~_PAGE_NX;
34865+ err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
34866+ }
34867+ }
34868+ up_write(&init_mm.mmap_sem);
34869+ return err;
34870+}
34871+
34872+/* Don't call this for MMIO areas that may not have a mem_map entry */
34873+int change_page_attr(struct page *page, int numpages, pgprot_t prot)
34874+{
34875+ unsigned long addr = (unsigned long)page_address(page);
34876+ return change_page_attr_addr(addr, numpages, prot);
34877+}
34878+
34879+void global_flush_tlb(void)
34880+{
34881+ struct page *dpage;
34882+
34883+ down_read(&init_mm.mmap_sem);
34884+ dpage = xchg(&deferred_pages, NULL);
34885+ up_read(&init_mm.mmap_sem);
34886+
34887+ flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
34888+ while (dpage) {
34889+ struct page *tmp = dpage;
34890+ dpage = (struct page *)dpage->lru.next;
34891+ ClearPagePrivate(tmp);
34892+ __free_page(tmp);
34893+ }
34894+}
34895+
34896+EXPORT_SYMBOL(change_page_attr);
34897+EXPORT_SYMBOL(global_flush_tlb);
34898Index: head-2008-11-25/drivers/pci/msi-xen.c
34899===================================================================
34900--- /dev/null 1970-01-01 00:00:00.000000000 +0000
34901+++ head-2008-11-25/drivers/pci/msi-xen.c 2008-10-13 13:43:45.000000000 +0200
34902@@ -0,0 +1,809 @@
34903+/*
34904+ * File: msi.c
34905+ * Purpose: PCI Message Signaled Interrupt (MSI)
34906+ *
34907+ * Copyright (C) 2003-2004 Intel
34908+ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
34909+ */
34910+
34911+#include <linux/mm.h>
34912+#include <linux/irq.h>
34913+#include <linux/interrupt.h>
34914+#include <linux/init.h>
34915+#include <linux/ioport.h>
34916+#include <linux/smp_lock.h>
34917+#include <linux/pci.h>
34918+#include <linux/proc_fs.h>
34919+
34920+#include <xen/evtchn.h>
34921+
34922+#include <asm/errno.h>
34923+#include <asm/io.h>
34924+#include <asm/smp.h>
34925+
34926+#include "pci.h"
34927+#include "msi.h"
34928+
34929+static int pci_msi_enable = 1;
34930+
34931+static struct msi_ops *msi_ops;
34932+
34933+int msi_register(struct msi_ops *ops)
34934+{
34935+ msi_ops = ops;
34936+ return 0;
34937+}
34938+
34939+static LIST_HEAD(msi_dev_head);
34940+DEFINE_SPINLOCK(msi_dev_lock);
34941+
34942+struct msi_dev_list {
34943+ struct pci_dev *dev;
34944+ struct list_head list;
34945+ spinlock_t pirq_list_lock;
34946+ struct list_head pirq_list_head;
34947+};
34948+
34949+struct msi_pirq_entry {
34950+ struct list_head list;
34951+ int pirq;
34952+ int entry_nr;
34953+};
34954+
34955+static struct msi_dev_list *get_msi_dev_pirq_list(struct pci_dev *dev)
34956+{
34957+ struct msi_dev_list *msi_dev_list, *ret = NULL;
34958+ unsigned long flags;
34959+
34960+ spin_lock_irqsave(&msi_dev_lock, flags);
34961+
34962+ list_for_each_entry(msi_dev_list, &msi_dev_head, list)
34963+ if ( msi_dev_list->dev == dev )
34964+ ret = msi_dev_list;
34965+
34966+ if ( ret ) {
34967+ spin_unlock_irqrestore(&msi_dev_lock, flags);
34968+ return ret;
34969+ }
34970+
34971+ /* Has not allocate msi_dev until now. */
34972+ ret = kzalloc(sizeof(struct msi_dev_list), GFP_ATOMIC);
34973+
34974+ /* Failed to allocate msi_dev structure */
34975+ if ( !ret ) {
34976+ spin_unlock_irqrestore(&msi_dev_lock, flags);
34977+ return NULL;
34978+ }
34979+
34980+ ret->dev = dev;
34981+ spin_lock_init(&ret->pirq_list_lock);
34982+ INIT_LIST_HEAD(&ret->pirq_list_head);
34983+ list_add_tail(&ret->list, &msi_dev_head);
34984+ spin_unlock_irqrestore(&msi_dev_lock, flags);
34985+ return ret;
34986+}
34987+
34988+static int attach_pirq_entry(int pirq, int entry_nr,
34989+ struct msi_dev_list *msi_dev_entry)
34990+{
34991+ struct msi_pirq_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
34992+ unsigned long flags;
34993+
34994+ if (!entry)
34995+ return -ENOMEM;
34996+ entry->pirq = pirq;
34997+ entry->entry_nr = entry_nr;
34998+ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
34999+ list_add_tail(&entry->list, &msi_dev_entry->pirq_list_head);
35000+ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
35001+ return 0;
35002+}
35003+
35004+static void detach_pirq_entry(int entry_nr,
35005+ struct msi_dev_list *msi_dev_entry)
35006+{
35007+ unsigned long flags;
35008+ struct msi_pirq_entry *pirq_entry;
35009+
35010+ list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
35011+ if (pirq_entry->entry_nr == entry_nr) {
35012+ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
35013+ list_del(&pirq_entry->list);
35014+ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
35015+ kfree(pirq_entry);
35016+ return;
35017+ }
35018+ }
35019+}
35020+
35021+/*
35022+ * pciback will provide device's owner
35023+ */
35024+static int (*get_owner)(struct pci_dev *dev);
35025+
35026+int register_msi_get_owner(int (*func)(struct pci_dev *dev))
35027+{
35028+ if (get_owner) {
35029+ printk(KERN_WARNING "register msi_get_owner again\n");
35030+ return -EEXIST;
35031+ }
35032+ get_owner = func;
35033+ return 0;
35034+}
35035+
35036+int unregister_msi_get_owner(int (*func)(struct pci_dev *dev))
35037+{
35038+ if (get_owner != func)
35039+ return -EINVAL;
35040+ get_owner = NULL;
35041+ return 0;
35042+}
35043+
35044+static int msi_get_dev_owner(struct pci_dev *dev)
35045+{
35046+ int owner;
35047+
35048+ BUG_ON(!is_initial_xendomain());
35049+ if (get_owner && (owner = get_owner(dev)) >= 0) {
35050+ printk(KERN_INFO "get owner for dev %x get %x \n",
35051+ dev->devfn, owner);
35052+ return owner;
35053+ }
35054+
35055+ return DOMID_SELF;
35056+}
35057+
35058+static int msi_unmap_pirq(struct pci_dev *dev, int pirq)
35059+{
35060+ struct physdev_unmap_pirq unmap;
35061+ int rc;
35062+
35063+ unmap.domid = msi_get_dev_owner(dev);
35064+ /* See comments in msi_map_pirq_to_vector, input parameter pirq
35065+ * mean irq number only if the device belongs to dom0 itself.
35066+ */
35067+ unmap.pirq = (unmap.domid != DOMID_SELF)
35068+ ? pirq : evtchn_get_xen_pirq(pirq);
35069+
35070+ if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap)))
35071+ printk(KERN_WARNING "unmap irq %x failed\n", pirq);
35072+
35073+ if (rc < 0)
35074+ return rc;
35075+
35076+ if (unmap.domid == DOMID_SELF)
35077+ evtchn_map_pirq(pirq, 0);
35078+
35079+ return 0;
35080+}
35081+
35082+static u64 find_table_base(struct pci_dev *dev, int pos)
35083+{
35084+ u8 bar;
35085+ u32 reg;
35086+ unsigned long flags;
35087+
35088+ pci_read_config_dword(dev, msix_table_offset_reg(pos), &reg);
35089+ bar = reg & PCI_MSIX_FLAGS_BIRMASK;
35090+
35091+ flags = pci_resource_flags(dev, bar);
35092+ if (flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | IORESOURCE_BUSY))
35093+ return 0;
35094+
35095+ return pci_resource_start(dev, bar);
35096+}
35097+
35098+/*
35099+ * Protected by msi_lock
35100+ */
35101+static int msi_map_pirq_to_vector(struct pci_dev *dev, int pirq,
35102+ int entry_nr, u64 table_base)
35103+{
35104+ struct physdev_map_pirq map_irq;
35105+ int rc;
35106+ domid_t domid = DOMID_SELF;
35107+
35108+ domid = msi_get_dev_owner(dev);
35109+
35110+ map_irq.domid = domid;
35111+ map_irq.type = MAP_PIRQ_TYPE_MSI;
35112+ map_irq.index = -1;
35113+ map_irq.pirq = pirq < 0 ? -1 : evtchn_get_xen_pirq(pirq);
35114+ map_irq.bus = dev->bus->number;
35115+ map_irq.devfn = dev->devfn;
35116+ map_irq.entry_nr = entry_nr;
35117+ map_irq.table_base = table_base;
35118+
35119+ if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq)))
35120+ printk(KERN_WARNING "map irq failed\n");
35121+
35122+ if (rc < 0)
35123+ return rc;
35124+ /* This happens when MSI support is not enabled in Xen. */
35125+ if (rc == 0 && map_irq.pirq < 0)
35126+ return -ENOSYS;
35127+
35128+ BUG_ON(map_irq.pirq <= 0);
35129+
35130+ /* If mapping of this particular MSI is on behalf of another domain,
35131+ * we do not need to get an irq in dom0. This also implies:
35132+ * dev->irq in dom0 will be 'Xen pirq' if this device belongs to
35133+ * to another domain, and will be 'Linux irq' if it belongs to dom0.
35134+ */
35135+ return ((domid != DOMID_SELF) ?
35136+ map_irq.pirq : evtchn_map_pirq(pirq, map_irq.pirq));
35137+}
35138+
35139+static int msi_map_vector(struct pci_dev *dev, int entry_nr, u64 table_base)
35140+{
35141+ return msi_map_pirq_to_vector(dev, -1, entry_nr, table_base);
35142+}
35143+
35144+static int msi_init(void)
35145+{
35146+ static int status = 0;
35147+
35148+ if (pci_msi_quirk) {
35149+ pci_msi_enable = 0;
35150+ printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
35151+ status = -EINVAL;
35152+ }
35153+
35154+ return status;
35155+}
35156+
35157+void pci_scan_msi_device(struct pci_dev *dev) { }
35158+
35159+void disable_msi_mode(struct pci_dev *dev, int pos, int type)
35160+{
35161+ u16 control;
35162+
35163+ pci_read_config_word(dev, msi_control_reg(pos), &control);
35164+ if (type == PCI_CAP_ID_MSI) {
35165+ /* Set enabled bits to single MSI & enable MSI_enable bit */
35166+ msi_disable(control);
35167+ pci_write_config_word(dev, msi_control_reg(pos), control);
35168+ dev->msi_enabled = 0;
35169+ } else {
35170+ msix_disable(control);
35171+ pci_write_config_word(dev, msi_control_reg(pos), control);
35172+ dev->msix_enabled = 0;
35173+ }
35174+ if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
35175+ /* PCI Express Endpoint device detected */
35176+ pci_intx(dev, 1); /* enable intx */
35177+ }
35178+}
35179+
35180+static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
35181+{
35182+ u16 control;
35183+
35184+ pci_read_config_word(dev, msi_control_reg(pos), &control);
35185+ if (type == PCI_CAP_ID_MSI) {
35186+ /* Set enabled bits to single MSI & enable MSI_enable bit */
35187+ msi_enable(control, 1);
35188+ pci_write_config_word(dev, msi_control_reg(pos), control);
35189+ dev->msi_enabled = 1;
35190+ } else {
35191+ msix_enable(control);
35192+ pci_write_config_word(dev, msi_control_reg(pos), control);
35193+ dev->msix_enabled = 1;
35194+ }
35195+ if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
35196+ /* PCI Express Endpoint device detected */
35197+ pci_intx(dev, 0); /* disable intx */
35198+ }
35199+}
35200+
35201+#ifdef CONFIG_PM
35202+int pci_save_msi_state(struct pci_dev *dev)
35203+{
35204+ int pos;
35205+
35206+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
35207+ if (pos <= 0 || dev->no_msi)
35208+ return 0;
35209+
35210+ if (!dev->msi_enabled)
35211+ return 0;
35212+
35213+ /* Restore dev->irq to its default pin-assertion vector */
35214+ msi_unmap_pirq(dev, dev->irq);
35215+ /* Disable MSI mode */
35216+ disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
35217+ /* Set the flags for use of restore */
35218+ dev->msi_enabled = 1;
35219+ return 0;
35220+}
35221+
35222+void pci_restore_msi_state(struct pci_dev *dev)
35223+{
35224+ int pos, pirq;
35225+
35226+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
35227+ if (pos <= 0)
35228+ return;
35229+
35230+ if (!dev->msi_enabled)
35231+ return;
35232+
35233+ pirq = msi_map_pirq_to_vector(dev, dev->irq, 0, 0);
35234+ if (pirq < 0)
35235+ return;
35236+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
35237+}
35238+
35239+int pci_save_msix_state(struct pci_dev *dev)
35240+{
35241+ int pos;
35242+ unsigned long flags;
35243+ struct msi_dev_list *msi_dev_entry;
35244+ struct msi_pirq_entry *pirq_entry, *tmp;
35245+
35246+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
35247+ if (pos <= 0 || dev->no_msi)
35248+ return 0;
35249+
35250+ /* save the capability */
35251+ if (!dev->msix_enabled)
35252+ return 0;
35253+
35254+ msi_dev_entry = get_msi_dev_pirq_list(dev);
35255+
35256+ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
35257+ list_for_each_entry_safe(pirq_entry, tmp,
35258+ &msi_dev_entry->pirq_list_head, list)
35259+ msi_unmap_pirq(dev, pirq_entry->pirq);
35260+ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
35261+
35262+ disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
35263+ /* Set the flags for use of restore */
35264+ dev->msix_enabled = 1;
35265+
35266+ return 0;
35267+}
35268+
35269+void pci_restore_msix_state(struct pci_dev *dev)
35270+{
35271+ int pos;
35272+ unsigned long flags;
35273+ u64 table_base;
35274+ struct msi_dev_list *msi_dev_entry;
35275+ struct msi_pirq_entry *pirq_entry, *tmp;
35276+
35277+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
35278+ if (pos <= 0)
35279+ return;
35280+
35281+ if (!dev->msix_enabled)
35282+ return;
35283+
35284+ msi_dev_entry = get_msi_dev_pirq_list(dev);
35285+ table_base = find_table_base(dev, pos);
35286+ if (!table_base)
35287+ return;
35288+
35289+ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
35290+ list_for_each_entry_safe(pirq_entry, tmp,
35291+ &msi_dev_entry->pirq_list_head, list) {
35292+ int rc = msi_map_pirq_to_vector(dev, pirq_entry->pirq,
35293+ pirq_entry->entry_nr, table_base);
35294+ if (rc < 0)
35295+ printk(KERN_WARNING
35296+ "%s: re-mapping irq #%d (pirq%d) failed: %d\n",
35297+ pci_name(dev), pirq_entry->entry_nr,
35298+ pirq_entry->pirq, rc);
35299+ }
35300+ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
35301+
35302+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
35303+}
35304+#endif
35305+
35306+/**
35307+ * msi_capability_init - configure device's MSI capability structure
35308+ * @dev: pointer to the pci_dev data structure of MSI device function
35309+ *
35310+ * Setup the MSI capability structure of device function with a single
35311+ * MSI vector, regardless of device function is capable of handling
35312+ * multiple messages. A return of zero indicates the successful setup
35313+ * of an entry zero with the new MSI vector or non-zero for otherwise.
35314+ **/
35315+static int msi_capability_init(struct pci_dev *dev)
35316+{
35317+ int pos, pirq;
35318+ u16 control;
35319+
35320+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
35321+ pci_read_config_word(dev, msi_control_reg(pos), &control);
35322+
35323+ pirq = msi_map_vector(dev, 0, 0);
35324+ if (pirq < 0)
35325+ return -EBUSY;
35326+
35327+ dev->irq = pirq;
35328+ /* Set MSI enabled bits */
35329+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
35330+ dev->msi_enabled = 1;
35331+
35332+ return 0;
35333+}
35334+
35335+/**
35336+ * msix_capability_init - configure device's MSI-X capability
35337+ * @dev: pointer to the pci_dev data structure of MSI-X device function
35338+ * @entries: pointer to an array of struct msix_entry entries
35339+ * @nvec: number of @entries
35340+ *
35341+ * Setup the MSI-X capability structure of device function with a
35342+ * single MSI-X vector. A return of zero indicates the successful setup of
35343+ * requested MSI-X entries with allocated vectors or non-zero for otherwise.
35344+ **/
35345+static int msix_capability_init(struct pci_dev *dev,
35346+ struct msix_entry *entries, int nvec)
35347+{
35348+ u64 table_base;
35349+ int pirq, i, j, mapped, pos;
35350+ struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev);
35351+ struct msi_pirq_entry *pirq_entry;
35352+
35353+ if (!msi_dev_entry)
35354+ return -ENOMEM;
35355+
35356+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
35357+ table_base = find_table_base(dev, pos);
35358+ if (!table_base)
35359+ return -ENODEV;
35360+
35361+ /* MSI-X Table Initialization */
35362+ for (i = 0; i < nvec; i++) {
35363+ mapped = 0;
35364+ list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
35365+ if (pirq_entry->entry_nr == entries[i].entry) {
35366+ printk(KERN_WARNING "msix entry %d for dev %02x:%02x:%01x are \
35367+ not freed before acquire again.\n", entries[i].entry,
35368+ dev->bus->number, PCI_SLOT(dev->devfn),
35369+ PCI_FUNC(dev->devfn));
35370+ (entries + i)->vector = pirq_entry->pirq;
35371+ mapped = 1;
35372+ break;
35373+ }
35374+ }
35375+ if (mapped)
35376+ continue;
35377+ pirq = msi_map_vector(dev, entries[i].entry, table_base);
35378+ if (pirq < 0)
35379+ break;
35380+ attach_pirq_entry(pirq, entries[i].entry, msi_dev_entry);
35381+ (entries + i)->vector = pirq;
35382+ }
35383+
35384+ if (i != nvec) {
35385+ for (j = --i; j >= 0; j--) {
35386+ msi_unmap_pirq(dev, entries[j].vector);
35387+ detach_pirq_entry(entries[j].entry, msi_dev_entry);
35388+ entries[j].vector = 0;
35389+ }
35390+ return -EBUSY;
35391+ }
35392+
35393+ enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
35394+ dev->msix_enabled = 1;
35395+
35396+ return 0;
35397+}
35398+
35399+/**
35400+ * pci_enable_msi - configure device's MSI capability structure
35401+ * @dev: pointer to the pci_dev data structure of MSI device function
35402+ *
35403+ * Setup the MSI capability structure of device function with
35404+ * a single MSI vector upon its software driver call to request for
35405+ * MSI mode enabled on its hardware device function. A return of zero
35406+ * indicates the successful setup of an entry zero with the new MSI
35407+ * vector or non-zero for otherwise.
35408+ **/
35409+extern int pci_frontend_enable_msi(struct pci_dev *dev);
35410+int pci_enable_msi(struct pci_dev* dev)
35411+{
35412+ struct pci_bus *bus;
35413+ int pos, temp, status = -EINVAL;
35414+
35415+ if (!pci_msi_enable || !dev)
35416+ return status;
35417+
35418+ if (dev->no_msi)
35419+ return status;
35420+
35421+ for (bus = dev->bus; bus; bus = bus->parent)
35422+ if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
35423+ return -EINVAL;
35424+
35425+ status = msi_init();
35426+ if (status < 0)
35427+ return status;
35428+
35429+#ifdef CONFIG_XEN_PCIDEV_FRONTEND
35430+ if (!is_initial_xendomain())
35431+ {
35432+ int ret;
35433+
35434+ temp = dev->irq;
35435+ ret = pci_frontend_enable_msi(dev);
35436+ if (ret)
35437+ return ret;
35438+
35439+ dev->irq = evtchn_map_pirq(-1, dev->irq);
35440+ dev->irq_old = temp;
35441+
35442+ return ret;
35443+ }
35444+#endif
35445+
35446+ temp = dev->irq;
35447+
35448+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
35449+ if (!pos)
35450+ return -EINVAL;
35451+
35452+ /* Check whether driver already requested for MSI-X vectors */
35453+ if (dev->msix_enabled) {
35454+ printk(KERN_INFO "PCI: %s: Can't enable MSI. "
35455+ "Device already has MSI-X vectors assigned\n",
35456+ pci_name(dev));
35457+ dev->irq = temp;
35458+ return -EINVAL;
35459+ }
35460+
35461+ status = msi_capability_init(dev);
35462+ if ( !status )
35463+ dev->irq_old = temp;
35464+ else
35465+ dev->irq = temp;
35466+
35467+ return status;
35468+}
35469+
35470+extern void pci_frontend_disable_msi(struct pci_dev* dev);
35471+void pci_disable_msi(struct pci_dev* dev)
35472+{
35473+ int pos;
35474+ int pirq;
35475+
35476+ if (!pci_msi_enable)
35477+ return;
35478+ if (!dev)
35479+ return;
35480+
35481+#ifdef CONFIG_XEN_PCIDEV_FRONTEND
35482+ if (!is_initial_xendomain()) {
35483+ evtchn_map_pirq(dev->irq, 0);
35484+ pci_frontend_disable_msi(dev);
35485+ dev->irq = dev->irq_old;
35486+ return;
35487+ }
35488+#endif
35489+
35490+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
35491+ if (!pos)
35492+ return;
35493+
35494+ pirq = dev->irq;
35495+ /* Restore dev->irq to its default pin-assertion vector */
35496+ dev->irq = dev->irq_old;
35497+ msi_unmap_pirq(dev, pirq);
35498+
35499+ /* Disable MSI mode */
35500+ disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
35501+}
35502+
35503+/**
35504+ * pci_enable_msix - configure device's MSI-X capability structure
35505+ * @dev: pointer to the pci_dev data structure of MSI-X device function
35506+ * @entries: pointer to an array of MSI-X entries
35507+ * @nvec: number of MSI-X vectors requested for allocation by device driver
35508+ *
35509+ * Setup the MSI-X capability structure of device function with the number
35510+ * of requested vectors upon its software driver call to request for
35511+ * MSI-X mode enabled on its hardware device function. A return of zero
35512+ * indicates the successful configuration of MSI-X capability structure
35513+ * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
35514+ * Or a return of > 0 indicates that driver request is exceeding the number
35515+ * of vectors available. Driver should use the returned value to re-send
35516+ * its request.
35517+ **/
35518+extern int pci_frontend_enable_msix(struct pci_dev *dev,
35519+ struct msix_entry *entries, int nvec);
35520+int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
35521+{
35522+ struct pci_bus *bus;
35523+ int status, pos, nr_entries;
35524+ int i, j, temp;
35525+ u16 control;
35526+
35527+ if (!pci_msi_enable || !dev || !entries)
35528+ return -EINVAL;
35529+
35530+ if (dev->no_msi)
35531+ return -EINVAL;
35532+
35533+ for (bus = dev->bus; bus; bus = bus->parent)
35534+ if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
35535+ return -EINVAL;
35536+
35537+#ifdef CONFIG_XEN_PCIDEV_FRONTEND
35538+ if (!is_initial_xendomain()) {
35539+ struct msi_dev_list *msi_dev_entry;
35540+ struct msi_pirq_entry *pirq_entry;
35541+ int ret, irq;
35542+
35543+ ret = pci_frontend_enable_msix(dev, entries, nvec);
35544+ if (ret) {
35545+ printk("get %x from pci_frontend_enable_msix\n", ret);
35546+ return ret;
35547+ }
35548+
35549+ msi_dev_entry = get_msi_dev_pirq_list(dev);
35550+ for (i = 0; i < nvec; i++) {
35551+ int mapped = 0;
35552+
35553+ list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
35554+ if (pirq_entry->entry_nr == entries[i].entry) {
35555+ irq = pirq_entry->pirq;
35556+ BUG_ON(entries[i].vector != evtchn_get_xen_pirq(irq));
35557+ entries[i].vector = irq;
35558+ mapped = 1;
35559+ break;
35560+ }
35561+ }
35562+ if (mapped)
35563+ continue;
35564+ irq = evtchn_map_pirq(-1, entries[i].vector);
35565+ attach_pirq_entry(irq, entries[i].entry, msi_dev_entry);
35566+ entries[i].vector = irq;
35567+ }
35568+ return 0;
35569+ }
35570+#endif
35571+
35572+ status = msi_init();
35573+ if (status < 0)
35574+ return status;
35575+
35576+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
35577+ if (!pos)
35578+ return -EINVAL;
35579+
35580+ pci_read_config_word(dev, msi_control_reg(pos), &control);
35581+ nr_entries = multi_msix_capable(control);
35582+ if (nvec > nr_entries)
35583+ return -EINVAL;
35584+
35585+ /* Check for any invalid entries */
35586+ for (i = 0; i < nvec; i++) {
35587+ if (entries[i].entry >= nr_entries)
35588+ return -EINVAL; /* invalid entry */
35589+ for (j = i + 1; j < nvec; j++) {
35590+ if (entries[i].entry == entries[j].entry)
35591+ return -EINVAL; /* duplicate entry */
35592+ }
35593+ }
35594+
35595+ temp = dev->irq;
35596+ /* Check whether driver already requested for MSI vector */
35597+ if (dev->msi_enabled) {
35598+ printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
35599+ "Device already has an MSI vector assigned\n",
35600+ pci_name(dev));
35601+ dev->irq = temp;
35602+ return -EINVAL;
35603+ }
35604+
35605+ status = msix_capability_init(dev, entries, nvec);
35606+
35607+ if ( !status )
35608+ dev->irq_old = temp;
35609+ else
35610+ dev->irq = temp;
35611+
35612+ return status;
35613+}
35614+
35615+extern void pci_frontend_disable_msix(struct pci_dev* dev);
35616+void pci_disable_msix(struct pci_dev* dev)
35617+{
35618+ int pos;
35619+ u16 control;
35620+
35621+
35622+ if (!pci_msi_enable)
35623+ return;
35624+ if (!dev)
35625+ return;
35626+
35627+#ifdef CONFIG_XEN_PCIDEV_FRONTEND
35628+ if (!is_initial_xendomain()) {
35629+ struct msi_dev_list *msi_dev_entry;
35630+ struct msi_pirq_entry *pirq_entry, *tmp;
35631+
35632+ pci_frontend_disable_msix(dev);
35633+
35634+ msi_dev_entry = get_msi_dev_pirq_list(dev);
35635+ list_for_each_entry_safe(pirq_entry, tmp,
35636+ &msi_dev_entry->pirq_list_head, list) {
35637+ evtchn_map_pirq(pirq_entry->pirq, 0);
35638+ list_del(&pirq_entry->list);
35639+ kfree(pirq_entry);
35640+ }
35641+
35642+ dev->irq = dev->irq_old;
35643+ return;
35644+ }
35645+#endif
35646+
35647+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
35648+ if (!pos)
35649+ return;
35650+
35651+ pci_read_config_word(dev, msi_control_reg(pos), &control);
35652+ if (!(control & PCI_MSIX_FLAGS_ENABLE))
35653+ return;
35654+
35655+ msi_remove_pci_irq_vectors(dev);
35656+
35657+ /* Disable MSI mode */
35658+ disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
35659+}
35660+
35661+/**
35662+ * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
35663+ * @dev: pointer to the pci_dev data structure of MSI(X) device function
35664+ *
35665+ * Being called during hotplug remove, from which the device function
35666+ * is hot-removed. All previous assigned MSI/MSI-X vectors, if
35667+ * allocated for this device function, are reclaimed to unused state,
35668+ * which may be used later on.
35669+ **/
35670+void msi_remove_pci_irq_vectors(struct pci_dev* dev)
35671+{
35672+ unsigned long flags;
35673+ struct msi_dev_list *msi_dev_entry;
35674+ struct msi_pirq_entry *pirq_entry, *tmp;
35675+
35676+ if (!pci_msi_enable || !dev)
35677+ return;
35678+
35679+ msi_dev_entry = get_msi_dev_pirq_list(dev);
35680+
35681+ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
35682+ if (!list_empty(&msi_dev_entry->pirq_list_head))
35683+ {
35684+ printk(KERN_WARNING "msix pirqs for dev %02x:%02x:%01x are not freed \
35685+ before acquire again.\n", dev->bus->number, PCI_SLOT(dev->devfn),
35686+ PCI_FUNC(dev->devfn));
35687+ list_for_each_entry_safe(pirq_entry, tmp,
35688+ &msi_dev_entry->pirq_list_head, list) {
35689+ msi_unmap_pirq(dev, pirq_entry->pirq);
35690+ list_del(&pirq_entry->list);
35691+ kfree(pirq_entry);
35692+ }
35693+ }
35694+ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
35695+ dev->irq = dev->irq_old;
35696+}
35697+
35698+void pci_no_msi(void)
35699+{
35700+ pci_msi_enable = 0;
35701+}
35702+
35703+EXPORT_SYMBOL(pci_enable_msi);
35704+EXPORT_SYMBOL(pci_disable_msi);
35705+EXPORT_SYMBOL(pci_enable_msix);
35706+EXPORT_SYMBOL(pci_disable_msix);
35707+#ifdef CONFIG_XEN
35708+EXPORT_SYMBOL(register_msi_get_owner);
35709+EXPORT_SYMBOL(unregister_msi_get_owner);
35710+#endif
35711+
35712Index: head-2008-11-25/include/asm-x86/mach-xen/asm/agp.h
35713===================================================================
35714--- /dev/null 1970-01-01 00:00:00.000000000 +0000
35715+++ head-2008-11-25/include/asm-x86/mach-xen/asm/agp.h 2007-06-22 09:08:06.000000000 +0200
35716@@ -0,0 +1,44 @@
35717+#ifndef AGP_H
35718+#define AGP_H 1
35719+
35720+#include <asm/pgtable.h>
35721+#include <asm/cacheflush.h>
35722+#include <asm/system.h>
35723+
35724+/*
35725+ * Functions to keep the agpgart mappings coherent with the MMU.
35726+ * The GART gives the CPU a physical alias of pages in memory. The alias region is
35727+ * mapped uncacheable. Make sure there are no conflicting mappings
35728+ * with different cachability attributes for the same page. This avoids
35729+ * data corruption on some CPUs.
35730+ */
35731+
35732+/* Caller's responsibility to call global_flush_tlb() for
35733+ * performance reasons */
35734+#define map_page_into_agp(page) ( \
35735+ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
35736+ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
35737+#define unmap_page_from_agp(page) ( \
35738+ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \
35739+ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \
35740+ change_page_attr(page, 1, PAGE_KERNEL))
35741+#define flush_agp_mappings() global_flush_tlb()
35742+
35743+/* Could use CLFLUSH here if the cpu supports it. But then it would
35744+ need to be called for each cacheline of the whole page so it may not be
35745+ worth it. Would need a page for it. */
35746+#define flush_agp_cache() wbinvd()
35747+
35748+/* Convert a physical address to an address suitable for the GART. */
35749+#define phys_to_gart(x) phys_to_machine(x)
35750+#define gart_to_phys(x) machine_to_phys(x)
35751+
35752+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
35753+#define alloc_gatt_pages(order) ({ \
35754+ char *_t; dma_addr_t _d; \
35755+ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \
35756+ _t; })
35757+#define free_gatt_pages(table, order) \
35758+ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
35759+
35760+#endif
35761Index: head-2008-11-25/include/asm-x86/mach-xen/asm/desc_32.h
35762===================================================================
35763--- /dev/null 1970-01-01 00:00:00.000000000 +0000
35764+++ head-2008-11-25/include/asm-x86/mach-xen/asm/desc_32.h 2008-01-28 12:24:19.000000000 +0100
35765@@ -0,0 +1,166 @@
35766+#ifndef __ARCH_DESC_H
35767+#define __ARCH_DESC_H
35768+
35769+#include <asm/ldt.h>
35770+#include <asm/segment.h>
35771+
35772+#define CPU_16BIT_STACK_SIZE 1024
35773+
35774+#ifndef __ASSEMBLY__
35775+
35776+#include <linux/preempt.h>
35777+#include <linux/smp.h>
35778+
35779+#include <asm/mmu.h>
35780+
35781+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
35782+
35783+DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
35784+
35785+struct Xgt_desc_struct {
35786+ unsigned short size;
35787+ unsigned long address __attribute__((packed));
35788+ unsigned short pad;
35789+} __attribute__ ((packed));
35790+
35791+extern struct Xgt_desc_struct idt_descr;
35792+DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
35793+
35794+
35795+static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
35796+{
35797+ return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
35798+}
35799+
35800+#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
35801+#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
35802+
35803+#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
35804+#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
35805+#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
35806+#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
35807+
35808+#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
35809+#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
35810+#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
35811+#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
35812+
35813+/*
35814+ * This is the ldt that every process will get unless we need
35815+ * something other than this.
35816+ */
35817+extern struct desc_struct default_ldt[];
35818+extern void set_intr_gate(unsigned int irq, void * addr);
35819+
35820+#define _set_tssldt_desc(n,addr,limit,type) \
35821+__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
35822+ "movw %w1,2(%2)\n\t" \
35823+ "rorl $16,%1\n\t" \
35824+ "movb %b1,4(%2)\n\t" \
35825+ "movb %4,5(%2)\n\t" \
35826+ "movb $0,6(%2)\n\t" \
35827+ "movb %h1,7(%2)\n\t" \
35828+ "rorl $16,%1" \
35829+ : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
35830+
35831+#ifndef CONFIG_X86_NO_TSS
35832+static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
35833+{
35834+ _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
35835+ offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
35836+}
35837+
35838+#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
35839+#endif
35840+
35841+static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
35842+{
35843+ _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
35844+}
35845+
35846+#define LDT_entry_a(info) \
35847+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
35848+
35849+#define LDT_entry_b(info) \
35850+ (((info)->base_addr & 0xff000000) | \
35851+ (((info)->base_addr & 0x00ff0000) >> 16) | \
35852+ ((info)->limit & 0xf0000) | \
35853+ (((info)->read_exec_only ^ 1) << 9) | \
35854+ ((info)->contents << 10) | \
35855+ (((info)->seg_not_present ^ 1) << 15) | \
35856+ ((info)->seg_32bit << 22) | \
35857+ ((info)->limit_in_pages << 23) | \
35858+ ((info)->useable << 20) | \
35859+ 0x7000)
35860+
35861+#define LDT_empty(info) (\
35862+ (info)->base_addr == 0 && \
35863+ (info)->limit == 0 && \
35864+ (info)->contents == 0 && \
35865+ (info)->read_exec_only == 1 && \
35866+ (info)->seg_32bit == 0 && \
35867+ (info)->limit_in_pages == 0 && \
35868+ (info)->seg_not_present == 1 && \
35869+ (info)->useable == 0 )
35870+
35871+extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
35872+
35873+#if TLS_SIZE != 24
35874+# error update this code.
35875+#endif
35876+
35877+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
35878+{
35879+#define C(i) if (HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), \
35880+ *(u64 *)&t->tls_array[i])) \
35881+ BUG();
35882+ C(0); C(1); C(2);
35883+#undef C
35884+}
35885+
35886+static inline void clear_LDT(void)
35887+{
35888+ int cpu = get_cpu();
35889+
35890+ /*
35891+ * NB. We load the default_ldt for lcall7/27 handling on demand, as
35892+ * it slows down context switching. Noone uses it anyway.
35893+ */
35894+ cpu = cpu; /* XXX avoid compiler warning */
35895+ xen_set_ldt(NULL, 0);
35896+ put_cpu();
35897+}
35898+
35899+/*
35900+ * load one particular LDT into the current CPU
35901+ */
35902+static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
35903+{
35904+ void *segments = pc->ldt;
35905+ int count = pc->size;
35906+
35907+ if (likely(!count))
35908+ segments = NULL;
35909+
35910+ xen_set_ldt(segments, count);
35911+}
35912+
35913+static inline void load_LDT(mm_context_t *pc)
35914+{
35915+ int cpu = get_cpu();
35916+ load_LDT_nolock(pc, cpu);
35917+ put_cpu();
35918+}
35919+
35920+static inline unsigned long get_desc_base(unsigned long *desc)
35921+{
35922+ unsigned long base;
35923+ base = ((desc[0] >> 16) & 0x0000ffff) |
35924+ ((desc[1] << 16) & 0x00ff0000) |
35925+ (desc[1] & 0xff000000);
35926+ return base;
35927+}
35928+
35929+#endif /* !__ASSEMBLY__ */
35930+
35931+#endif
35932Index: head-2008-11-25/include/asm-x86/mach-xen/asm/dma-mapping_32.h
35933===================================================================
35934--- /dev/null 1970-01-01 00:00:00.000000000 +0000
35935+++ head-2008-11-25/include/asm-x86/mach-xen/asm/dma-mapping_32.h 2008-04-02 12:34:02.000000000 +0200
35936@@ -0,0 +1,151 @@
35937+#ifndef _ASM_I386_DMA_MAPPING_H
35938+#define _ASM_I386_DMA_MAPPING_H
35939+
35940+/*
35941+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
35942+ * documentation.
35943+ */
35944+
35945+#include <linux/mm.h>
35946+#include <asm/cache.h>
35947+#include <asm/io.h>
35948+#include <asm/scatterlist.h>
35949+#include <asm/swiotlb.h>
35950+
35951+static inline int
35952+address_needs_mapping(struct device *hwdev, dma_addr_t addr)
35953+{
35954+ dma_addr_t mask = 0xffffffff;
35955+ /* If the device has a mask, use it, otherwise default to 32 bits */
35956+ if (hwdev && hwdev->dma_mask)
35957+ mask = *hwdev->dma_mask;
35958+ return (addr & ~mask) != 0;
35959+}
35960+
35961+extern int range_straddles_page_boundary(paddr_t p, size_t size);
35962+
35963+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
35964+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
35965+
35966+void *dma_alloc_coherent(struct device *dev, size_t size,
35967+ dma_addr_t *dma_handle, gfp_t flag);
35968+
35969+void dma_free_coherent(struct device *dev, size_t size,
35970+ void *vaddr, dma_addr_t dma_handle);
35971+
35972+extern dma_addr_t
35973+dma_map_single(struct device *dev, void *ptr, size_t size,
35974+ enum dma_data_direction direction);
35975+
35976+extern void
35977+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
35978+ enum dma_data_direction direction);
35979+
35980+extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
35981+ int nents, enum dma_data_direction direction);
35982+extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
35983+ int nents, enum dma_data_direction direction);
35984+
35985+#ifdef CONFIG_HIGHMEM
35986+extern dma_addr_t
35987+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
35988+ size_t size, enum dma_data_direction direction);
35989+
35990+extern void
35991+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
35992+ enum dma_data_direction direction);
35993+#else
35994+#define dma_map_page(dev, page, offset, size, dir) \
35995+ dma_map_single(dev, page_address(page) + (offset), (size), (dir))
35996+#define dma_unmap_page dma_unmap_single
35997+#endif
35998+
35999+extern void
36000+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
36001+ enum dma_data_direction direction);
36002+
36003+extern void
36004+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
36005+ enum dma_data_direction direction);
36006+
36007+static inline void
36008+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
36009+ unsigned long offset, size_t size,
36010+ enum dma_data_direction direction)
36011+{
36012+ dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
36013+}
36014+
36015+static inline void
36016+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
36017+ unsigned long offset, size_t size,
36018+ enum dma_data_direction direction)
36019+{
36020+ dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
36021+}
36022+
36023+static inline void
36024+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
36025+ enum dma_data_direction direction)
36026+{
36027+ if (swiotlb)
36028+ swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
36029+ flush_write_buffers();
36030+}
36031+
36032+static inline void
36033+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
36034+ enum dma_data_direction direction)
36035+{
36036+ if (swiotlb)
36037+ swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
36038+ flush_write_buffers();
36039+}
36040+
36041+extern int
36042+dma_mapping_error(dma_addr_t dma_addr);
36043+
36044+extern int
36045+dma_supported(struct device *dev, u64 mask);
36046+
36047+static inline int
36048+dma_set_mask(struct device *dev, u64 mask)
36049+{
36050+ if(!dev->dma_mask || !dma_supported(dev, mask))
36051+ return -EIO;
36052+
36053+ *dev->dma_mask = mask;
36054+
36055+ return 0;
36056+}
36057+
36058+static inline int
36059+dma_get_cache_alignment(void)
36060+{
36061+ /* no easy way to get cache size on all x86, so return the
36062+ * maximum possible, to be safe */
36063+ return (1 << INTERNODE_CACHE_SHIFT);
36064+}
36065+
36066+#define dma_is_consistent(d) (1)
36067+
36068+static inline void
36069+dma_cache_sync(void *vaddr, size_t size,
36070+ enum dma_data_direction direction)
36071+{
36072+ flush_write_buffers();
36073+}
36074+
36075+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
36076+extern int
36077+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
36078+ dma_addr_t device_addr, size_t size, int flags);
36079+
36080+extern void
36081+dma_release_declared_memory(struct device *dev);
36082+
36083+extern void *
36084+dma_mark_declared_memory_occupied(struct device *dev,
36085+ dma_addr_t device_addr, size_t size);
36086+
36087+#endif
36088Index: head-2008-11-25/include/asm-x86/mach-xen/asm/fixmap_32.h
36089===================================================================
36090--- /dev/null 1970-01-01 00:00:00.000000000 +0000
36091+++ head-2008-11-25/include/asm-x86/mach-xen/asm/fixmap_32.h 2007-06-12 13:14:02.000000000 +0200
36092@@ -0,0 +1,155 @@
36093+/*
36094+ * fixmap.h: compile-time virtual memory allocation
36095+ *
36096+ * This file is subject to the terms and conditions of the GNU General Public
36097+ * License. See the file "COPYING" in the main directory of this archive
36098+ * for more details.
36099+ *
36100+ * Copyright (C) 1998 Ingo Molnar
36101+ *
36102+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
36103+ */
36104+
36105+#ifndef _ASM_FIXMAP_H
36106+#define _ASM_FIXMAP_H
36107+
36108+
36109+/* used by vmalloc.c, vsyscall.lds.S.
36110+ *
36111+ * Leave one empty page between vmalloc'ed areas and
36112+ * the start of the fixmap.
36113+ */
36114+extern unsigned long __FIXADDR_TOP;
36115+
36116+#ifndef __ASSEMBLY__
36117+#include <linux/kernel.h>
36118+#include <asm/acpi.h>
36119+#include <asm/apicdef.h>
36120+#include <asm/page.h>
36121+#ifdef CONFIG_HIGHMEM
36122+#include <linux/threads.h>
36123+#include <asm/kmap_types.h>
36124+#endif
36125+
36126+/*
36127+ * Here we define all the compile-time 'special' virtual
36128+ * addresses. The point is to have a constant address at
36129+ * compile time, but to set the physical address only
36130+ * in the boot process. We allocate these special addresses
36131+ * from the end of virtual memory (0xfffff000) backwards.
36132+ * Also this lets us do fail-safe vmalloc(), we
36133+ * can guarantee that these special addresses and
36134+ * vmalloc()-ed addresses never overlap.
36135+ *
36136+ * these 'compile-time allocated' memory buffers are
36137+ * fixed-size 4k pages. (or larger if used with an increment
36138+ * highger than 1) use fixmap_set(idx,phys) to associate
36139+ * physical memory with fixmap indices.
36140+ *
36141+ * TLB entries of such buffers will not be flushed across
36142+ * task switches.
36143+ */
36144+enum fixed_addresses {
36145+ FIX_HOLE,
36146+ FIX_VDSO,
36147+#ifdef CONFIG_X86_LOCAL_APIC
36148+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
36149+#endif
36150+#ifdef CONFIG_X86_IO_APIC
36151+ FIX_IO_APIC_BASE_0,
36152+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
36153+#endif
36154+#ifdef CONFIG_X86_VISWS_APIC
36155+ FIX_CO_CPU, /* Cobalt timer */
36156+ FIX_CO_APIC, /* Cobalt APIC Redirection Table */
36157+ FIX_LI_PCIA, /* Lithium PCI Bridge A */
36158+ FIX_LI_PCIB, /* Lithium PCI Bridge B */
36159+#endif
36160+#ifdef CONFIG_X86_F00F_BUG
36161+ FIX_F00F_IDT, /* Virtual mapping for IDT */
36162+#endif
36163+#ifdef CONFIG_X86_CYCLONE_TIMER
36164+ FIX_CYCLONE_TIMER, /*cyclone timer register*/
36165+#endif
36166+#ifdef CONFIG_HIGHMEM
36167+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
36168+ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
36169+#endif
36170+#ifdef CONFIG_ACPI
36171+ FIX_ACPI_BEGIN,
36172+ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
36173+#endif
36174+#ifdef CONFIG_PCI_MMCONFIG
36175+ FIX_PCIE_MCFG,
36176+#endif
36177+ FIX_SHARED_INFO,
36178+#define NR_FIX_ISAMAPS 256
36179+ FIX_ISAMAP_END,
36180+ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
36181+ __end_of_permanent_fixed_addresses,
36182+ /* temporary boot-time mappings, used before ioremap() is functional */
36183+#define NR_FIX_BTMAPS 16
36184+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
36185+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
36186+ FIX_WP_TEST,
36187+ __end_of_fixed_addresses
36188+};
36189+
36190+extern void set_fixaddr_top(unsigned long top);
36191+
36192+extern void __set_fixmap(enum fixed_addresses idx,
36193+ maddr_t phys, pgprot_t flags);
36194+
36195+#define set_fixmap(idx, phys) \
36196+ __set_fixmap(idx, phys, PAGE_KERNEL)
36197+/*
36198+ * Some hardware wants to get fixmapped without caching.
36199+ */
36200+#define set_fixmap_nocache(idx, phys) \
36201+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
36202+
36203+#define clear_fixmap(idx) \
36204+ __set_fixmap(idx, 0, __pgprot(0))
36205+
36206+#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
36207+
36208+#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
36209+#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
36210+#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
36211+#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
36212+
36213+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
36214+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
36215+
36216+extern void __this_fixmap_does_not_exist(void);
36217+
36218+/*
36219+ * 'index to address' translation. If anyone tries to use the idx
36220+ * directly without tranlation, we catch the bug with a NULL-deference
36221+ * kernel oops. Illegal ranges of incoming indices are caught too.
36222+ */
36223+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
36224+{
36225+ /*
36226+ * this branch gets completely eliminated after inlining,
36227+ * except when someone tries to use fixaddr indices in an
36228+ * illegal way. (such as mixing up address types or using
36229+ * out-of-range indices).
36230+ *
36231+ * If it doesn't get removed, the linker will complain
36232+ * loudly with a reasonably clear error message..
36233+ */
36234+ if (idx >= __end_of_fixed_addresses)
36235+ __this_fixmap_does_not_exist();
36236+
36237+ return __fix_to_virt(idx);
36238+}
36239+
36240+static inline unsigned long virt_to_fix(const unsigned long vaddr)
36241+{
36242+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
36243+ return __virt_to_fix(vaddr);
36244+}
36245+
36246+#endif /* !__ASSEMBLY__ */
36247+#endif
36248Index: head-2008-11-25/include/asm-x86/mach-xen/asm/gnttab_dma.h
36249===================================================================
36250--- /dev/null 1970-01-01 00:00:00.000000000 +0000
36251+++ head-2008-11-25/include/asm-x86/mach-xen/asm/gnttab_dma.h 2007-08-06 15:10:49.000000000 +0200
36252@@ -0,0 +1,41 @@
36253+/*
36254+ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
36255+ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
36256+ * VA Linux Systems Japan K.K.
36257+ *
36258+ * This program is free software; you can redistribute it and/or modify
36259+ * it under the terms of the GNU General Public License as published by
36260+ * the Free Software Foundation; either version 2 of the License, or
36261+ * (at your option) any later version.
36262+ *
36263+ * This program is distributed in the hope that it will be useful,
36264+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
36265+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
36266+ * GNU General Public License for more details.
36267+ *
36268+ * You should have received a copy of the GNU General Public License
36269+ * along with this program; if not, write to the Free Software
36270+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
36271+ */
36272+
36273+#ifndef _ASM_I386_GNTTAB_DMA_H
36274+#define _ASM_I386_GNTTAB_DMA_H
36275+
36276+static inline int gnttab_dma_local_pfn(struct page *page)
36277+{
36278+ /* Has it become a local MFN? */
36279+ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page))));
36280+}
36281+
36282+static inline maddr_t gnttab_dma_map_page(struct page *page)
36283+{
36284+ __gnttab_dma_map_page(page);
36285+ return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT);
36286+}
36287+
36288+static inline void gnttab_dma_unmap_page(maddr_t maddr)
36289+{
36290+ __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr)));
36291+}
36292+
36293+#endif /* _ASM_I386_GNTTAB_DMA_H */
36294Index: head-2008-11-25/include/asm-x86/mach-xen/asm/highmem.h
36295===================================================================
36296--- /dev/null 1970-01-01 00:00:00.000000000 +0000
36297+++ head-2008-11-25/include/asm-x86/mach-xen/asm/highmem.h 2008-10-29 09:55:56.000000000 +0100
36298@@ -0,0 +1,97 @@
36299+/*
36300+ * highmem.h: virtual kernel memory mappings for high memory
36301+ *
36302+ * Used in CONFIG_HIGHMEM systems for memory pages which
36303+ * are not addressable by direct kernel virtual addresses.
36304+ *
36305+ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
36306+ * Gerhard.Wichert@pdb.siemens.de
36307+ *
36308+ *
36309+ * Redesigned the x86 32-bit VM architecture to deal with
36310+ * up to 16 Terabyte physical memory. With current x86 CPUs
36311+ * we now support up to 64 Gigabytes physical RAM.
36312+ *
36313+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
36314+ */
36315+
36316+#ifndef _ASM_HIGHMEM_H
36317+#define _ASM_HIGHMEM_H
36318+
36319+#ifdef __KERNEL__
36320+
36321+#include <linux/interrupt.h>
36322+#include <linux/threads.h>
36323+#include <asm/kmap_types.h>
36324+#include <asm/tlbflush.h>
36325+
36326+/* declarations for highmem.c */
36327+extern unsigned long highstart_pfn, highend_pfn;
36328+
36329+extern pte_t *kmap_pte;
36330+extern pgprot_t kmap_prot;
36331+extern pte_t *pkmap_page_table;
36332+
36333+/*
36334+ * Right now we initialize only a single pte table. It can be extended
36335+ * easily, subsequent pte tables have to be allocated in one physical
36336+ * chunk of RAM.
36337+ */
36338+#ifdef CONFIG_X86_PAE
36339+#define LAST_PKMAP 512
36340+#else
36341+#define LAST_PKMAP 1024
36342+#endif
36343+/*
36344+ * Ordering is:
36345+ *
36346+ * FIXADDR_TOP
36347+ * fixed_addresses
36348+ * FIXADDR_START
36349+ * temp fixed addresses
36350+ * FIXADDR_BOOT_START
36351+ * Persistent kmap area
36352+ * PKMAP_BASE
36353+ * VMALLOC_END
36354+ * Vmalloc area
36355+ * VMALLOC_START
36356+ * high_memory
36357+ */
36358+#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
36359+#define LAST_PKMAP_MASK (LAST_PKMAP-1)
36360+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
36361+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
36362+
36363+extern void * FASTCALL(kmap_high(struct page *page));
36364+extern void FASTCALL(kunmap_high(struct page *page));
36365+
36366+void *kmap(struct page *page);
36367+void kunmap(struct page *page);
36368+void *kmap_atomic(struct page *page, enum km_type type);
36369+void *kmap_atomic_pte(struct page *page, enum km_type type);
36370+void kunmap_atomic(void *kvaddr, enum km_type type);
36371+void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
36372+struct page *kmap_atomic_to_page(void *ptr);
36373+
36374+#define flush_cache_kmaps() do { } while (0)
36375+
36376+void clear_highpage(struct page *);
36377+static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
36378+{
36379+ clear_highpage(page);
36380+}
36381+#define __HAVE_ARCH_CLEAR_HIGHPAGE
36382+#define __HAVE_ARCH_CLEAR_USER_HIGHPAGE
36383+
36384+void copy_highpage(struct page *to, struct page *from);
36385+static inline void copy_user_highpage(struct page *to, struct page *from,
36386+ unsigned long vaddr)
36387+{
36388+ copy_highpage(to, from);
36389+}
36390+#define __HAVE_ARCH_COPY_HIGHPAGE
36391+#define __HAVE_ARCH_COPY_USER_HIGHPAGE
36392+
36393+#endif /* __KERNEL__ */
36394+
36395+#endif /* _ASM_HIGHMEM_H */
36396Index: head-2008-11-25/include/asm-x86/mach-xen/asm/hypercall_32.h
36397===================================================================
36398--- /dev/null 1970-01-01 00:00:00.000000000 +0000
36399+++ head-2008-11-25/include/asm-x86/mach-xen/asm/hypercall_32.h 2008-11-25 12:22:34.000000000 +0100
36400@@ -0,0 +1,409 @@
36401+/******************************************************************************
36402+ * hypercall.h
36403+ *
36404+ * Linux-specific hypervisor handling.
36405+ *
36406+ * Copyright (c) 2002-2004, K A Fraser
36407+ *
36408+ * This program is free software; you can redistribute it and/or
36409+ * modify it under the terms of the GNU General Public License version 2
36410+ * as published by the Free Software Foundation; or, when distributed
36411+ * separately from the Linux kernel or incorporated into other
36412+ * software packages, subject to the following license:
36413+ *
36414+ * Permission is hereby granted, free of charge, to any person obtaining a copy
36415+ * of this source file (the "Software"), to deal in the Software without
36416+ * restriction, including without limitation the rights to use, copy, modify,
36417+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
36418+ * and to permit persons to whom the Software is furnished to do so, subject to
36419+ * the following conditions:
36420+ *
36421+ * The above copyright notice and this permission notice shall be included in
36422+ * all copies or substantial portions of the Software.
36423+ *
36424+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36425+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36426+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
36427+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36428+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
36429+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36430+ * IN THE SOFTWARE.
36431+ */
36432+
36433+#ifndef __HYPERCALL_H__
36434+#define __HYPERCALL_H__
36435+
36436+#include <linux/string.h> /* memcpy() */
36437+#include <linux/stringify.h>
36438+
36439+#ifndef __HYPERVISOR_H__
36440+# error "please don't include this file directly"
36441+#endif
36442+
36443+#ifdef CONFIG_XEN
36444+#define HYPERCALL_STR(name) \
36445+ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)"
36446+#else
36447+#define HYPERCALL_STR(name) \
36448+ "mov hypercall_stubs,%%eax; " \
36449+ "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\
36450+ "call *%%eax"
36451+#endif
36452+
36453+#define _hypercall0(type, name) \
36454+({ \
36455+ type __res; \
36456+ asm volatile ( \
36457+ HYPERCALL_STR(name) \
36458+ : "=a" (__res) \
36459+ : \
36460+ : "memory" ); \
36461+ __res; \
36462+})
36463+
36464+#define _hypercall1(type, name, a1) \
36465+({ \
36466+ type __res; \
36467+ long __ign1; \
36468+ asm volatile ( \
36469+ HYPERCALL_STR(name) \
36470+ : "=a" (__res), "=b" (__ign1) \
36471+ : "1" ((long)(a1)) \
36472+ : "memory" ); \
36473+ __res; \
36474+})
36475+
36476+#define _hypercall2(type, name, a1, a2) \
36477+({ \
36478+ type __res; \
36479+ long __ign1, __ign2; \
36480+ asm volatile ( \
36481+ HYPERCALL_STR(name) \
36482+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
36483+ : "1" ((long)(a1)), "2" ((long)(a2)) \
36484+ : "memory" ); \
36485+ __res; \
36486+})
36487+
36488+#define _hypercall3(type, name, a1, a2, a3) \
36489+({ \
36490+ type __res; \
36491+ long __ign1, __ign2, __ign3; \
36492+ asm volatile ( \
36493+ HYPERCALL_STR(name) \
36494+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
36495+ "=d" (__ign3) \
36496+ : "1" ((long)(a1)), "2" ((long)(a2)), \
36497+ "3" ((long)(a3)) \
36498+ : "memory" ); \
36499+ __res; \
36500+})
36501+
36502+#define _hypercall4(type, name, a1, a2, a3, a4) \
36503+({ \
36504+ type __res; \
36505+ long __ign1, __ign2, __ign3, __ign4; \
36506+ asm volatile ( \
36507+ HYPERCALL_STR(name) \
36508+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
36509+ "=d" (__ign3), "=S" (__ign4) \
36510+ : "1" ((long)(a1)), "2" ((long)(a2)), \
36511+ "3" ((long)(a3)), "4" ((long)(a4)) \
36512+ : "memory" ); \
36513+ __res; \
36514+})
36515+
36516+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
36517+({ \
36518+ type __res; \
36519+ long __ign1, __ign2, __ign3, __ign4, __ign5; \
36520+ asm volatile ( \
36521+ HYPERCALL_STR(name) \
36522+ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
36523+ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
36524+ : "1" ((long)(a1)), "2" ((long)(a2)), \
36525+ "3" ((long)(a3)), "4" ((long)(a4)), \
36526+ "5" ((long)(a5)) \
36527+ : "memory" ); \
36528+ __res; \
36529+})
36530+
36531+static inline int __must_check
36532+HYPERVISOR_set_trap_table(
36533+ const trap_info_t *table)
36534+{
36535+ return _hypercall1(int, set_trap_table, table);
36536+}
36537+
36538+static inline int __must_check
36539+HYPERVISOR_mmu_update(
36540+ mmu_update_t *req, unsigned int count, unsigned int *success_count,
36541+ domid_t domid)
36542+{
36543+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
36544+}
36545+
36546+static inline int __must_check
36547+HYPERVISOR_mmuext_op(
36548+ struct mmuext_op *op, unsigned int count, unsigned int *success_count,
36549+ domid_t domid)
36550+{
36551+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
36552+}
36553+
36554+static inline int __must_check
36555+HYPERVISOR_set_gdt(
36556+ unsigned long *frame_list, unsigned int entries)
36557+{
36558+ return _hypercall2(int, set_gdt, frame_list, entries);
36559+}
36560+
36561+static inline int __must_check
36562+HYPERVISOR_stack_switch(
36563+ unsigned long ss, unsigned long esp)
36564+{
36565+ return _hypercall2(int, stack_switch, ss, esp);
36566+}
36567+
36568+static inline int __must_check
36569+HYPERVISOR_set_callbacks(
36570+ unsigned long event_selector, unsigned long event_address,
36571+ unsigned long failsafe_selector, unsigned long failsafe_address)
36572+{
36573+ return _hypercall4(int, set_callbacks,
36574+ event_selector, event_address,
36575+ failsafe_selector, failsafe_address);
36576+}
36577+
36578+static inline int
36579+HYPERVISOR_fpu_taskswitch(
36580+ int set)
36581+{
36582+ return _hypercall1(int, fpu_taskswitch, set);
36583+}
36584+
36585+static inline int __must_check
36586+HYPERVISOR_sched_op_compat(
36587+ int cmd, unsigned long arg)
36588+{
36589+ return _hypercall2(int, sched_op_compat, cmd, arg);
36590+}
36591+
36592+static inline int __must_check
36593+HYPERVISOR_sched_op(
36594+ int cmd, void *arg)
36595+{
36596+ return _hypercall2(int, sched_op, cmd, arg);
36597+}
36598+
36599+static inline long __must_check
36600+HYPERVISOR_set_timer_op(
36601+ u64 timeout)
36602+{
36603+ unsigned long timeout_hi = (unsigned long)(timeout>>32);
36604+ unsigned long timeout_lo = (unsigned long)timeout;
36605+ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
36606+}
36607+
36608+static inline int __must_check
36609+HYPERVISOR_platform_op(
36610+ struct xen_platform_op *platform_op)
36611+{
36612+ platform_op->interface_version = XENPF_INTERFACE_VERSION;
36613+ return _hypercall1(int, platform_op, platform_op);
36614+}
36615+
36616+static inline int __must_check
36617+HYPERVISOR_set_debugreg(
36618+ unsigned int reg, unsigned long value)
36619+{
36620+ return _hypercall2(int, set_debugreg, reg, value);
36621+}
36622+
36623+static inline unsigned long __must_check
36624+HYPERVISOR_get_debugreg(
36625+ unsigned int reg)
36626+{
36627+ return _hypercall1(unsigned long, get_debugreg, reg);
36628+}
36629+
36630+static inline int __must_check
36631+HYPERVISOR_update_descriptor(
36632+ u64 ma, u64 desc)
36633+{
36634+ return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
36635+}
36636+
36637+static inline int __must_check
36638+HYPERVISOR_memory_op(
36639+ unsigned int cmd, void *arg)
36640+{
36641+ return _hypercall2(int, memory_op, cmd, arg);
36642+}
36643+
36644+static inline int __must_check
36645+HYPERVISOR_multicall(
36646+ multicall_entry_t *call_list, unsigned int nr_calls)
36647+{
36648+ return _hypercall2(int, multicall, call_list, nr_calls);
36649+}
36650+
36651+static inline int __must_check
36652+HYPERVISOR_update_va_mapping(
36653+ unsigned long va, pte_t new_val, unsigned long flags)
36654+{
36655+ unsigned long pte_hi = 0;
36656+#ifdef CONFIG_X86_PAE
36657+ pte_hi = new_val.pte_high;
36658+#endif
36659+ return _hypercall4(int, update_va_mapping, va,
36660+ new_val.pte_low, pte_hi, flags);
36661+}
36662+
36663+static inline int __must_check
36664+HYPERVISOR_event_channel_op(
36665+ int cmd, void *arg)
36666+{
36667+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
36668+
36669+#if CONFIG_XEN_COMPAT <= 0x030002
36670+ if (unlikely(rc == -ENOSYS)) {
36671+ struct evtchn_op op;
36672+ op.cmd = cmd;
36673+ memcpy(&op.u, arg, sizeof(op.u));
36674+ rc = _hypercall1(int, event_channel_op_compat, &op);
36675+ memcpy(arg, &op.u, sizeof(op.u));
36676+ }
36677+#endif
36678+
36679+ return rc;
36680+}
36681+
36682+static inline int __must_check
36683+HYPERVISOR_xen_version(
36684+ int cmd, void *arg)
36685+{
36686+ return _hypercall2(int, xen_version, cmd, arg);
36687+}
36688+
36689+static inline int __must_check
36690+HYPERVISOR_console_io(
36691+ int cmd, unsigned int count, char *str)
36692+{
36693+ return _hypercall3(int, console_io, cmd, count, str);
36694+}
36695+
36696+static inline int __must_check
36697+HYPERVISOR_physdev_op(
36698+ int cmd, void *arg)
36699+{
36700+ int rc = _hypercall2(int, physdev_op, cmd, arg);
36701+
36702+#if CONFIG_XEN_COMPAT <= 0x030002
36703+ if (unlikely(rc == -ENOSYS)) {
36704+ struct physdev_op op;
36705+ op.cmd = cmd;
36706+ memcpy(&op.u, arg, sizeof(op.u));
36707+ rc = _hypercall1(int, physdev_op_compat, &op);
36708+ memcpy(arg, &op.u, sizeof(op.u));
36709+ }
36710+#endif
36711+
36712+ return rc;
36713+}
36714+
36715+static inline int __must_check
36716+HYPERVISOR_grant_table_op(
36717+ unsigned int cmd, void *uop, unsigned int count)
36718+{
36719+ return _hypercall3(int, grant_table_op, cmd, uop, count);
36720+}
36721+
36722+static inline int __must_check
36723+HYPERVISOR_update_va_mapping_otherdomain(
36724+ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
36725+{
36726+ unsigned long pte_hi = 0;
36727+#ifdef CONFIG_X86_PAE
36728+ pte_hi = new_val.pte_high;
36729+#endif
36730+ return _hypercall5(int, update_va_mapping_otherdomain, va,
36731+ new_val.pte_low, pte_hi, flags, domid);
36732+}
36733+
36734+static inline int __must_check
36735+HYPERVISOR_vm_assist(
36736+ unsigned int cmd, unsigned int type)
36737+{
36738+ return _hypercall2(int, vm_assist, cmd, type);
36739+}
36740+
36741+static inline int __must_check
36742+HYPERVISOR_vcpu_op(
36743+ int cmd, unsigned int vcpuid, void *extra_args)
36744+{
36745+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
36746+}
36747+
36748+static inline int __must_check
36749+HYPERVISOR_suspend(
36750+ unsigned long srec)
36751+{
36752+ struct sched_shutdown sched_shutdown = {
36753+ .reason = SHUTDOWN_suspend
36754+ };
36755+
36756+ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
36757+ &sched_shutdown, srec);
36758+
36759+#if CONFIG_XEN_COMPAT <= 0x030002
36760+ if (rc == -ENOSYS)
36761+ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
36762+ SHUTDOWN_suspend, srec);
36763+#endif
36764+
36765+ return rc;
36766+}
36767+
36768+#if CONFIG_XEN_COMPAT <= 0x030002
36769+static inline int
36770+HYPERVISOR_nmi_op(
36771+ unsigned long op, void *arg)
36772+{
36773+ return _hypercall2(int, nmi_op, op, arg);
36774+}
36775+#endif
36776+
36777+#ifndef CONFIG_XEN
36778+static inline unsigned long __must_check
36779+HYPERVISOR_hvm_op(
36780+ int op, void *arg)
36781+{
36782+ return _hypercall2(unsigned long, hvm_op, op, arg);
36783+}
36784+#endif
36785+
36786+static inline int __must_check
36787+HYPERVISOR_callback_op(
36788+ int cmd, const void *arg)
36789+{
36790+ return _hypercall2(int, callback_op, cmd, arg);
36791+}
36792+
36793+static inline int __must_check
36794+HYPERVISOR_xenoprof_op(
36795+ int op, void *arg)
36796+{
36797+ return _hypercall2(int, xenoprof_op, op, arg);
36798+}
36799+
36800+static inline int __must_check
36801+HYPERVISOR_kexec_op(
36802+ unsigned long op, void *args)
36803+{
36804+ return _hypercall2(int, kexec_op, op, args);
36805+}
36806+
36807+
36808+
36809+#endif /* __HYPERCALL_H__ */
36810Index: head-2008-11-25/include/asm-x86/mach-xen/asm/hypervisor.h
36811===================================================================
36812--- /dev/null 1970-01-01 00:00:00.000000000 +0000
36813+++ head-2008-11-25/include/asm-x86/mach-xen/asm/hypervisor.h 2008-02-20 09:32:49.000000000 +0100
36814@@ -0,0 +1,259 @@
36815+/******************************************************************************
36816+ * hypervisor.h
36817+ *
36818+ * Linux-specific hypervisor handling.
36819+ *
36820+ * Copyright (c) 2002-2004, K A Fraser
36821+ *
36822+ * This program is free software; you can redistribute it and/or
36823+ * modify it under the terms of the GNU General Public License version 2
36824+ * as published by the Free Software Foundation; or, when distributed
36825+ * separately from the Linux kernel or incorporated into other
36826+ * software packages, subject to the following license:
36827+ *
36828+ * Permission is hereby granted, free of charge, to any person obtaining a copy
36829+ * of this source file (the "Software"), to deal in the Software without
36830+ * restriction, including without limitation the rights to use, copy, modify,
36831+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
36832+ * and to permit persons to whom the Software is furnished to do so, subject to
36833+ * the following conditions:
36834+ *
36835+ * The above copyright notice and this permission notice shall be included in
36836+ * all copies or substantial portions of the Software.
36837+ *
36838+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
36839+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
36840+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
36841+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36842+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
36843+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36844+ * IN THE SOFTWARE.
36845+ */
36846+
36847+#ifndef __HYPERVISOR_H__
36848+#define __HYPERVISOR_H__
36849+
36850+#include <linux/types.h>
36851+#include <linux/kernel.h>
36852+#include <linux/version.h>
36853+#include <linux/errno.h>
36854+#include <xen/interface/xen.h>
36855+#include <xen/interface/platform.h>
36856+#include <xen/interface/event_channel.h>
36857+#include <xen/interface/physdev.h>
36858+#include <xen/interface/sched.h>
36859+#include <xen/interface/nmi.h>
36860+#include <asm/ptrace.h>
36861+#include <asm/page.h>
36862+#if defined(__i386__)
36863+# ifdef CONFIG_X86_PAE
36864+# include <asm-generic/pgtable-nopud.h>
36865+# else
36866+# include <asm-generic/pgtable-nopmd.h>
36867+# endif
36868+#elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
36869+# include <asm-generic/pgtable-nopud.h>
36870+#endif
36871+
36872+extern shared_info_t *HYPERVISOR_shared_info;
36873+
36874+#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
36875+#ifdef CONFIG_SMP
36876+#define current_vcpu_info() vcpu_info(smp_processor_id())
36877+#else
36878+#define current_vcpu_info() vcpu_info(0)
36879+#endif
36880+
36881+#ifdef CONFIG_X86_32
36882+extern unsigned long hypervisor_virt_start;
36883+#endif
36884+
36885+/* arch/xen/i386/kernel/setup.c */
36886+extern start_info_t *xen_start_info;
36887+#ifdef CONFIG_XEN_PRIVILEGED_GUEST
36888+#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
36889+#else
36890+#define is_initial_xendomain() 0
36891+#endif
36892+
36893+/* arch/xen/kernel/evtchn.c */
36894+/* Force a proper event-channel callback from Xen. */
36895+void force_evtchn_callback(void);
36896+
36897+/* arch/xen/kernel/process.c */
36898+void xen_cpu_idle (void);
36899+
36900+/* arch/xen/i386/kernel/hypervisor.c */
36901+void do_hypervisor_callback(struct pt_regs *regs);
36902+
36903+/* arch/xen/i386/mm/hypervisor.c */
36904+/*
36905+ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
36906+ * be MACHINE addresses.
36907+ */
36908+
36909+void xen_pt_switch(unsigned long ptr);
36910+void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
36911+void xen_load_gs(unsigned int selector); /* x86_64 only */
36912+void xen_tlb_flush(void);
36913+void xen_invlpg(unsigned long ptr);
36914+
36915+void xen_l1_entry_update(pte_t *ptr, pte_t val);
36916+void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
36917+void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
36918+void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
36919+void xen_pgd_pin(unsigned long ptr);
36920+void xen_pgd_unpin(unsigned long ptr);
36921+
36922+void xen_set_ldt(const void *ptr, unsigned int ents);
36923+
36924+#ifdef CONFIG_SMP
36925+#include <linux/cpumask.h>
36926+void xen_tlb_flush_all(void);
36927+void xen_invlpg_all(unsigned long ptr);
36928+void xen_tlb_flush_mask(cpumask_t *mask);
36929+void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
36930+#endif
36931+
36932+/* Returns zero on success else negative errno. */
36933+int xen_create_contiguous_region(
36934+ unsigned long vstart, unsigned int order, unsigned int address_bits);
36935+void xen_destroy_contiguous_region(
36936+ unsigned long vstart, unsigned int order);
36937+
36938+struct page;
36939+
36940+int xen_limit_pages_to_max_mfn(
36941+ struct page *pages, unsigned int order, unsigned int address_bits);
36942+
36943+/* Turn jiffies into Xen system time. */
36944+u64 jiffies_to_st(unsigned long jiffies);
36945+
36946+#ifdef CONFIG_XEN_SCRUB_PAGES
36947+void scrub_pages(void *, unsigned int);
36948+#else
36949+#define scrub_pages(_p,_n) ((void)0)
36950+#endif
36951+
36952+#include <xen/hypercall.h>
36953+
36954+#if defined(CONFIG_X86_64)
36955+#define MULTI_UVMFLAGS_INDEX 2
36956+#define MULTI_UVMDOMID_INDEX 3
36957+#else
36958+#define MULTI_UVMFLAGS_INDEX 3
36959+#define MULTI_UVMDOMID_INDEX 4
36960+#endif
36961+
36962+#ifdef CONFIG_XEN
36963+#define is_running_on_xen() 1
36964+#else
36965+extern char *hypercall_stubs;
36966+#define is_running_on_xen() (!!hypercall_stubs)
36967+#endif
36968+
36969+static inline int
36970+HYPERVISOR_yield(
36971+ void)
36972+{
36973+ int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
36974+
36975+#if CONFIG_XEN_COMPAT <= 0x030002
36976+ if (rc == -ENOSYS)
36977+ rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
36978+#endif
36979+
36980+ return rc;
36981+}
36982+
36983+static inline int
36984+HYPERVISOR_block(
36985+ void)
36986+{
36987+ int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
36988+
36989+#if CONFIG_XEN_COMPAT <= 0x030002
36990+ if (rc == -ENOSYS)
36991+ rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0);
36992+#endif
36993+
36994+ return rc;
36995+}
36996+
36997+static inline void /*__noreturn*/
36998+HYPERVISOR_shutdown(
36999+ unsigned int reason)
37000+{
37001+ struct sched_shutdown sched_shutdown = {
37002+ .reason = reason
37003+ };
37004+
37005+ VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown));
37006+#if CONFIG_XEN_COMPAT <= 0x030002
37007+ VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason));
37008+#endif
37009+ /* Don't recurse needlessly. */
37010+ BUG_ON(reason != SHUTDOWN_crash);
37011+ for(;;);
37012+}
37013+
37014+static inline int __must_check
37015+HYPERVISOR_poll(
37016+ evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
37017+{
37018+ int rc;
37019+ struct sched_poll sched_poll = {
37020+ .nr_ports = nr_ports,
37021+ .timeout = jiffies_to_st(timeout)
37022+ };
37023+ set_xen_guest_handle(sched_poll.ports, ports);
37024+
37025+ rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
37026+#if CONFIG_XEN_COMPAT <= 0x030002
37027+ if (rc == -ENOSYS)
37028+ rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
37029+#endif
37030+
37031+ return rc;
37032+}
37033+
37034+#ifdef CONFIG_XEN
37035+
37036+static inline void
37037+MULTI_update_va_mapping(
37038+ multicall_entry_t *mcl, unsigned long va,
37039+ pte_t new_val, unsigned long flags)
37040+{
37041+ mcl->op = __HYPERVISOR_update_va_mapping;
37042+ mcl->args[0] = va;
37043+#if defined(CONFIG_X86_64)
37044+ mcl->args[1] = new_val.pte;
37045+#elif defined(CONFIG_X86_PAE)
37046+ mcl->args[1] = new_val.pte_low;
37047+ mcl->args[2] = new_val.pte_high;
37048+#else
37049+ mcl->args[1] = new_val.pte_low;
37050+ mcl->args[2] = 0;
37051+#endif
37052+ mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
37053+}
37054+
37055+static inline void
37056+MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
37057+ void *uop, unsigned int count)
37058+{
37059+ mcl->op = __HYPERVISOR_grant_table_op;
37060+ mcl->args[0] = cmd;
37061+ mcl->args[1] = (unsigned long)uop;
37062+ mcl->args[2] = count;
37063+}
37064+
37065+#else /* !defined(CONFIG_XEN) */
37066+
37067+/* Multicalls not supported for HVM guests. */
37068+#define MULTI_update_va_mapping(a,b,c,d) ((void)0)
37069+#define MULTI_grant_table_op(a,b,c,d) ((void)0)
37070+
37071+#endif
37072+
37073+#endif /* __HYPERVISOR_H__ */
37074Index: head-2008-11-25/include/asm-x86/mach-xen/asm/irqflags_32.h
37075===================================================================
37076--- /dev/null 1970-01-01 00:00:00.000000000 +0000
37077+++ head-2008-11-25/include/asm-x86/mach-xen/asm/irqflags_32.h 2007-06-12 13:14:02.000000000 +0200
37078@@ -0,0 +1,127 @@
37079+/*
37080+ * include/asm-i386/irqflags.h
37081+ *
37082+ * IRQ flags handling
37083+ *
37084+ * This file gets included from lowlevel asm headers too, to provide
37085+ * wrapped versions of the local_irq_*() APIs, based on the
37086+ * raw_local_irq_*() functions from the lowlevel headers.
37087+ */
37088+#ifndef _ASM_IRQFLAGS_H
37089+#define _ASM_IRQFLAGS_H
37090+
37091+#ifndef __ASSEMBLY__
37092+
37093+/*
37094+ * The use of 'barrier' in the following reflects their use as local-lock
37095+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
37096+ * critical operations are executed. All critical operations must complete
37097+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
37098+ * includes these barriers, for example.
37099+ */
37100+
37101+#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
37102+
37103+#define raw_local_save_flags(flags) \
37104+ do { (flags) = __raw_local_save_flags(); } while (0)
37105+
37106+#define raw_local_irq_restore(x) \
37107+do { \
37108+ vcpu_info_t *_vcpu; \
37109+ barrier(); \
37110+ _vcpu = current_vcpu_info(); \
37111+ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
37112+ barrier(); /* unmask then check (avoid races) */ \
37113+ if (unlikely(_vcpu->evtchn_upcall_pending)) \
37114+ force_evtchn_callback(); \
37115+ } \
37116+} while (0)
37117+
37118+#define raw_local_irq_disable() \
37119+do { \
37120+ current_vcpu_info()->evtchn_upcall_mask = 1; \
37121+ barrier(); \
37122+} while (0)
37123+
37124+#define raw_local_irq_enable() \
37125+do { \
37126+ vcpu_info_t *_vcpu; \
37127+ barrier(); \
37128+ _vcpu = current_vcpu_info(); \
37129+ _vcpu->evtchn_upcall_mask = 0; \
37130+ barrier(); /* unmask then check (avoid races) */ \
37131+ if (unlikely(_vcpu->evtchn_upcall_pending)) \
37132+ force_evtchn_callback(); \
37133+} while (0)
37134+
37135+/*
37136+ * Used in the idle loop; sti takes one instruction cycle
37137+ * to complete:
37138+ */
37139+void raw_safe_halt(void);
37140+
37141+/*
37142+ * Used when interrupts are already enabled or to
37143+ * shutdown the processor:
37144+ */
37145+void halt(void);
37146+
37147+static inline int raw_irqs_disabled_flags(unsigned long flags)
37148+{
37149+ return (flags != 0);
37150+}
37151+
37152+#define raw_irqs_disabled() \
37153+({ \
37154+ unsigned long flags = __raw_local_save_flags(); \
37155+ \
37156+ raw_irqs_disabled_flags(flags); \
37157+})
37158+
37159+/*
37160+ * For spinlocks, etc:
37161+ */
37162+#define __raw_local_irq_save() \
37163+({ \
37164+ unsigned long flags = __raw_local_save_flags(); \
37165+ \
37166+ raw_local_irq_disable(); \
37167+ \
37168+ flags; \
37169+})
37170+
37171+#define raw_local_irq_save(flags) \
37172+ do { (flags) = __raw_local_irq_save(); } while (0)
37173+
37174+#endif /* __ASSEMBLY__ */
37175+
37176+/*
37177+ * Do the CPU's IRQ-state tracing from assembly code. We call a
37178+ * C function, so save all the C-clobbered registers:
37179+ */
37180+#ifdef CONFIG_TRACE_IRQFLAGS
37181+
37182+# define TRACE_IRQS_ON \
37183+ pushl %eax; \
37184+ pushl %ecx; \
37185+ pushl %edx; \
37186+ call trace_hardirqs_on; \
37187+ popl %edx; \
37188+ popl %ecx; \
37189+ popl %eax;
37190+
37191+# define TRACE_IRQS_OFF \
37192+ pushl %eax; \
37193+ pushl %ecx; \
37194+ pushl %edx; \
37195+ call trace_hardirqs_off; \
37196+ popl %edx; \
37197+ popl %ecx; \
37198+ popl %eax;
37199+
37200+#else
37201+# define TRACE_IRQS_ON
37202+# define TRACE_IRQS_OFF
37203+#endif
37204+
37205+#endif
37206Index: head-2008-11-25/include/asm-x86/mach-xen/asm/maddr_32.h
37207===================================================================
37208--- /dev/null 1970-01-01 00:00:00.000000000 +0000
37209+++ head-2008-11-25/include/asm-x86/mach-xen/asm/maddr_32.h 2008-04-02 12:34:02.000000000 +0200
37210@@ -0,0 +1,193 @@
37211+#ifndef _I386_MADDR_H
37212+#define _I386_MADDR_H
37213+
37214+#include <xen/features.h>
37215+#include <xen/interface/xen.h>
37216+
37217+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
37218+#define INVALID_P2M_ENTRY (~0UL)
37219+#define FOREIGN_FRAME_BIT (1UL<<31)
37220+#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
37221+
37222+/* Definitions for machine and pseudophysical addresses. */
37223+#ifdef CONFIG_X86_PAE
37224+typedef unsigned long long paddr_t;
37225+typedef unsigned long long maddr_t;
37226+#else
37227+typedef unsigned long paddr_t;
37228+typedef unsigned long maddr_t;
37229+#endif
37230+
37231+#ifdef CONFIG_XEN
37232+
37233+extern unsigned long *phys_to_machine_mapping;
37234+extern unsigned long max_mapnr;
37235+
37236+#undef machine_to_phys_mapping
37237+extern unsigned long *machine_to_phys_mapping;
37238+extern unsigned int machine_to_phys_order;
37239+
37240+static inline unsigned long pfn_to_mfn(unsigned long pfn)
37241+{
37242+ if (xen_feature(XENFEAT_auto_translated_physmap))
37243+ return pfn;
37244+ BUG_ON(max_mapnr && pfn >= max_mapnr);
37245+ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
37246+}
37247+
37248+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
37249+{
37250+ if (xen_feature(XENFEAT_auto_translated_physmap))
37251+ return 1;
37252+ BUG_ON(max_mapnr && pfn >= max_mapnr);
37253+ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
37254+}
37255+
37256+static inline unsigned long mfn_to_pfn(unsigned long mfn)
37257+{
37258+ unsigned long pfn;
37259+
37260+ if (xen_feature(XENFEAT_auto_translated_physmap))
37261+ return mfn;
37262+
37263+ if (unlikely((mfn >> machine_to_phys_order) != 0))
37264+ return max_mapnr;
37265+
37266+ /* The array access can fail (e.g., device space beyond end of RAM). */
37267+ asm (
37268+ "1: movl %1,%0\n"
37269+ "2:\n"
37270+ ".section .fixup,\"ax\"\n"
37271+ "3: movl %2,%0\n"
37272+ " jmp 2b\n"
37273+ ".previous\n"
37274+ ".section __ex_table,\"a\"\n"
37275+ " .align 4\n"
37276+ " .long 1b,3b\n"
37277+ ".previous"
37278+ : "=r" (pfn)
37279+ : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) );
37280+
37281+ return pfn;
37282+}
37283+
37284+/*
37285+ * We detect special mappings in one of two ways:
37286+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
37287+ * to be outside our maximum possible pseudophys range.
37288+ * 2. If the MFN belongs to a different domain then we will certainly
37289+ * not have MFN in our p2m table. Conversely, if the page is ours,
37290+ * then we'll have p2m(m2p(MFN))==MFN.
37291+ * If we detect a special mapping then it doesn't have a 'struct page'.
37292+ * We force !pfn_valid() by returning an out-of-range pointer.
37293+ *
37294+ * NB. These checks require that, for any MFN that is not in our reservation,
37295+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
37296+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
37297+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
37298+ *
37299+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
37300+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
37301+ * require. In all the cases we care about, the FOREIGN_FRAME bit is
37302+ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
37303+ */
37304+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
37305+{
37306+ unsigned long pfn = mfn_to_pfn(mfn);
37307+ if ((pfn < max_mapnr)
37308+ && !xen_feature(XENFEAT_auto_translated_physmap)
37309+ && (phys_to_machine_mapping[pfn] != mfn))
37310+ return max_mapnr; /* force !pfn_valid() */
37311+ return pfn;
37312+}
37313+
37314+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
37315+{
37316+ BUG_ON(max_mapnr && pfn >= max_mapnr);
37317+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
37318+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
37319+ return;
37320+ }
37321+ phys_to_machine_mapping[pfn] = mfn;
37322+}
37323+
37324+static inline maddr_t phys_to_machine(paddr_t phys)
37325+{
37326+ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
37327+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
37328+ return machine;
37329+}
37330+
37331+static inline paddr_t machine_to_phys(maddr_t machine)
37332+{
37333+ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
37334+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
37335+ return phys;
37336+}
37337+
37338+#ifdef CONFIG_X86_PAE
37339+static inline paddr_t pte_phys_to_machine(paddr_t phys)
37340+{
37341+ /*
37342+ * In PAE mode, the NX bit needs to be dealt with in the value
37343+ * passed to pfn_to_mfn(). On x86_64, we need to mask it off,
37344+ * but for i386 the conversion to ulong for the argument will
37345+ * clip it off.
37346+ */
37347+ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
37348+ machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
37349+ return machine;
37350+}
37351+
37352+static inline paddr_t pte_machine_to_phys(maddr_t machine)
37353+{
37354+ /*
37355+ * In PAE mode, the NX bit needs to be dealt with in the value
37356+ * passed to mfn_to_pfn(). On x86_64, we need to mask it off,
37357+ * but for i386 the conversion to ulong for the argument will
37358+ * clip it off.
37359+ */
37360+ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
37361+ phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
37362+ return phys;
37363+}
37364+#endif
37365+
37366+#ifdef CONFIG_X86_PAE
37367+#define __pte_ma(x) ((pte_t) { (x), (maddr_t)(x) >> 32 } )
37368+static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
37369+{
37370+ pte_t pte;
37371+
37372+ pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
37373+ (pgprot_val(pgprot) >> 32);
37374+ pte.pte_high &= (__supported_pte_mask >> 32);
37375+ pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
37376+ __supported_pte_mask;
37377+ return pte;
37378+}
37379+#else
37380+#define __pte_ma(x) ((pte_t) { (x) } )
37381+#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
37382+#endif
37383+
37384+#else /* !CONFIG_XEN */
37385+
37386+#define pfn_to_mfn(pfn) (pfn)
37387+#define mfn_to_pfn(mfn) (mfn)
37388+#define mfn_to_local_pfn(mfn) (mfn)
37389+#define set_phys_to_machine(pfn, mfn) ((void)0)
37390+#define phys_to_machine_mapping_valid(pfn) (1)
37391+#define phys_to_machine(phys) ((maddr_t)(phys))
37392+#define machine_to_phys(mach) ((paddr_t)(mach))
37393+#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
37394+#define __pte_ma(x) __pte(x)
37395+
37396+#endif /* !CONFIG_XEN */
37397+
37398+/* VIRT <-> MACHINE conversion */
37399+#define virt_to_machine(v) (phys_to_machine(__pa(v)))
37400+#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
37401+#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
37402+
37403+#endif /* _I386_MADDR_H */
37404Index: head-2008-11-25/include/asm-x86/mach-xen/asm/mmu_context_32.h
37405===================================================================
37406--- /dev/null 1970-01-01 00:00:00.000000000 +0000
37407+++ head-2008-11-25/include/asm-x86/mach-xen/asm/mmu_context_32.h 2007-06-12 13:14:02.000000000 +0200
37408@@ -0,0 +1,108 @@
37409+#ifndef __I386_SCHED_H
37410+#define __I386_SCHED_H
37411+
37412+#include <asm/desc.h>
37413+#include <asm/atomic.h>
37414+#include <asm/pgalloc.h>
37415+#include <asm/tlbflush.h>
37416+
37417+/*
37418+ * Used for LDT copy/destruction.
37419+ */
37420+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
37421+void destroy_context(struct mm_struct *mm);
37422+
37423+
37424+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
37425+{
37426+#if 0 /* XEN: no lazy tlb */
37427+ unsigned cpu = smp_processor_id();
37428+ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
37429+ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
37430+#endif
37431+}
37432+
37433+#define prepare_arch_switch(next) __prepare_arch_switch()
37434+
37435+static inline void __prepare_arch_switch(void)
37436+{
37437+ /*
37438+ * Save away %fs and %gs. No need to save %es and %ds, as those
37439+ * are always kernel segments while inside the kernel. Must
37440+ * happen before reload of cr3/ldt (i.e., not in __switch_to).
37441+ */
37442+ asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
37443+ : "=m" (current->thread.fs),
37444+ "=m" (current->thread.gs));
37445+ asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
37446+ : : "r" (0) );
37447+}
37448+
37449+extern void mm_pin(struct mm_struct *mm);
37450+extern void mm_unpin(struct mm_struct *mm);
37451+void mm_pin_all(void);
37452+
37453+static inline void switch_mm(struct mm_struct *prev,
37454+ struct mm_struct *next,
37455+ struct task_struct *tsk)
37456+{
37457+ int cpu = smp_processor_id();
37458+ struct mmuext_op _op[2], *op = _op;
37459+
37460+ if (likely(prev != next)) {
37461+ BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
37462+ !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
37463+
37464+ /* stop flush ipis for the previous mm */
37465+ cpu_clear(cpu, prev->cpu_vm_mask);
37466+#if 0 /* XEN: no lazy tlb */
37467+ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
37468+ per_cpu(cpu_tlbstate, cpu).active_mm = next;
37469+#endif
37470+ cpu_set(cpu, next->cpu_vm_mask);
37471+
37472+ /* Re-load page tables: load_cr3(next->pgd) */
37473+ op->cmd = MMUEXT_NEW_BASEPTR;
37474+ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
37475+ op++;
37476+
37477+ /*
37478+ * load the LDT, if the LDT is different:
37479+ */
37480+ if (unlikely(prev->context.ldt != next->context.ldt)) {
37481+ /* load_LDT_nolock(&next->context, cpu) */
37482+ op->cmd = MMUEXT_SET_LDT;
37483+ op->arg1.linear_addr = (unsigned long)next->context.ldt;
37484+ op->arg2.nr_ents = next->context.size;
37485+ op++;
37486+ }
37487+
37488+ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
37489+ }
37490+#if 0 /* XEN: no lazy tlb */
37491+ else {
37492+ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
37493+ BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
37494+
37495+ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
37496+ /* We were in lazy tlb mode and leave_mm disabled
37497+ * tlb flush IPI delivery. We must reload %cr3.
37498+ */
37499+ load_cr3(next->pgd);
37500+ load_LDT_nolock(&next->context, cpu);
37501+ }
37502+ }
37503+#endif
37504+}
37505+
37506+#define deactivate_mm(tsk, mm) \
37507+ asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
37508+
37509+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
37510+{
37511+ if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
37512+ mm_pin(next);
37513+ switch_mm(prev, next, NULL);
37514+}
37515+
37516+#endif
37517Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pci_32.h
37518===================================================================
37519--- /dev/null 1970-01-01 00:00:00.000000000 +0000
37520+++ head-2008-11-25/include/asm-x86/mach-xen/asm/pci_32.h 2007-09-14 11:14:51.000000000 +0200
37521@@ -0,0 +1,148 @@
37522+#ifndef __i386_PCI_H
37523+#define __i386_PCI_H
37524+
37525+
37526+#ifdef __KERNEL__
37527+#include <linux/mm.h> /* for struct page */
37528+
37529+/* Can be used to override the logic in pci_scan_bus for skipping
37530+ already-configured bus numbers - to be used for buggy BIOSes
37531+ or architectures with incomplete PCI setup by the loader */
37532+
37533+#ifdef CONFIG_PCI
37534+extern unsigned int pcibios_assign_all_busses(void);
37535+#else
37536+#define pcibios_assign_all_busses() 0
37537+#endif
37538+
37539+#include <asm/hypervisor.h>
37540+#define pcibios_scan_all_fns(a, b) (!is_initial_xendomain())
37541+
37542+extern unsigned long pci_mem_start;
37543+#define PCIBIOS_MIN_IO 0x1000
37544+#define PCIBIOS_MIN_MEM (pci_mem_start)
37545+
37546+#define PCIBIOS_MIN_CARDBUS_IO 0x4000
37547+
37548+void pcibios_config_init(void);
37549+struct pci_bus * pcibios_scan_root(int bus);
37550+
37551+void pcibios_set_master(struct pci_dev *dev);
37552+void pcibios_penalize_isa_irq(int irq, int active);
37553+struct irq_routing_table *pcibios_get_irq_routing_table(void);
37554+int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
37555+
37556+/* Dynamic DMA mapping stuff.
37557+ * i386 has everything mapped statically.
37558+ */
37559+
37560+#include <linux/types.h>
37561+#include <linux/slab.h>
37562+#include <asm/scatterlist.h>
37563+#include <linux/string.h>
37564+#include <asm/io.h>
37565+
37566+struct pci_dev;
37567+
37568+#ifdef CONFIG_SWIOTLB
37569+
37570+
37571+/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
37572+#define PCI_DMA_BUS_IS_PHYS (0)
37573+
37574+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
37575+ dma_addr_t ADDR_NAME;
37576+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
37577+ __u32 LEN_NAME;
37578+#define pci_unmap_addr(PTR, ADDR_NAME) \
37579+ ((PTR)->ADDR_NAME)
37580+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
37581+ (((PTR)->ADDR_NAME) = (VAL))
37582+#define pci_unmap_len(PTR, LEN_NAME) \
37583+ ((PTR)->LEN_NAME)
37584+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
37585+ (((PTR)->LEN_NAME) = (VAL))
37586+
37587+#else
37588+
37589+/* The PCI address space does equal the physical memory
37590+ * address space. The networking and block device layers use
37591+ * this boolean for bounce buffer decisions.
37592+ */
37593+#define PCI_DMA_BUS_IS_PHYS (1)
37594+
37595+/* pci_unmap_{page,single} is a nop so... */
37596+#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
37597+#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
37598+#define pci_unmap_addr(PTR, ADDR_NAME) (0)
37599+#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
37600+#define pci_unmap_len(PTR, LEN_NAME) (0)
37601+#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
37602+
37603+#endif
37604+
37605+/* This is always fine. */
37606+#define pci_dac_dma_supported(pci_dev, mask) (1)
37607+
37608+static inline dma64_addr_t
37609+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
37610+{
37611+ return ((dma64_addr_t) page_to_phys(page) +
37612+ (dma64_addr_t) offset);
37613+}
37614+
37615+static inline struct page *
37616+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
37617+{
37618+ return pfn_to_page(dma_addr >> PAGE_SHIFT);
37619+}
37620+
37621+static inline unsigned long
37622+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
37623+{
37624+ return (dma_addr & ~PAGE_MASK);
37625+}
37626+
37627+static inline void
37628+pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
37629+{
37630+}
37631+
37632+static inline void
37633+pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
37634+{
37635+ flush_write_buffers();
37636+}
37637+
37638+#define HAVE_PCI_MMAP
37639+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
37640+ enum pci_mmap_state mmap_state, int write_combine);
37641+
37642+
37643+static inline void pcibios_add_platform_entries(struct pci_dev *dev)
37644+{
37645+}
37646+
37647+#ifdef CONFIG_PCI
37648+static inline void pci_dma_burst_advice(struct pci_dev *pdev,
37649+ enum pci_dma_burst_strategy *strat,
37650+ unsigned long *strategy_parameter)
37651+{
37652+ *strat = PCI_DMA_BURST_INFINITY;
37653+ *strategy_parameter = ~0UL;
37654+}
37655+#endif
37656+
37657+#endif /* __KERNEL__ */
37658+
37659+#ifdef CONFIG_XEN_PCIDEV_FRONTEND
37660+#include <xen/pcifront.h>
37661+#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
37662+
37663+/* implement the pci_ DMA API in terms of the generic device dma_ one */
37664+#include <asm-generic/pci-dma-compat.h>
37665+
37666+/* generic pci stuff */
37667+#include <asm-generic/pci.h>
37668+
37669+#endif /* __i386_PCI_H */
37670Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgalloc_32.h
37671===================================================================
37672--- /dev/null 1970-01-01 00:00:00.000000000 +0000
37673+++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgalloc_32.h 2008-07-21 11:00:33.000000000 +0200
37674@@ -0,0 +1,59 @@
37675+#ifndef _I386_PGALLOC_H
37676+#define _I386_PGALLOC_H
37677+
37678+#include <asm/fixmap.h>
37679+#include <linux/threads.h>
37680+#include <linux/mm.h> /* for struct page */
37681+#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
37682+
37683+#define pmd_populate_kernel(mm, pmd, pte) \
37684+ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
37685+
37686+#define pmd_populate(mm, pmd, pte) \
37687+do { \
37688+ unsigned long pfn = page_to_pfn(pte); \
37689+ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) { \
37690+ if (!PageHighMem(pte)) \
37691+ BUG_ON(HYPERVISOR_update_va_mapping( \
37692+ (unsigned long)__va(pfn << PAGE_SHIFT), \
37693+ pfn_pte(pfn, PAGE_KERNEL_RO), 0)); \
37694+ else if (!test_and_set_bit(PG_pinned, &pte->flags)) \
37695+ kmap_flush_unused(); \
37696+ set_pmd(pmd, \
37697+ __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT))); \
37698+ } else \
37699+ *(pmd) = __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT)); \
37700+} while (0)
37701+
37702+/*
37703+ * Allocate and free page tables.
37704+ */
37705+extern pgd_t *pgd_alloc(struct mm_struct *);
37706+extern void pgd_free(pgd_t *pgd);
37707+
37708+extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
37709+extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
37710+
37711+static inline void pte_free_kernel(pte_t *pte)
37712+{
37713+ make_lowmem_page_writable(pte, XENFEAT_writable_page_tables);
37714+ free_page((unsigned long)pte);
37715+}
37716+
37717+extern void pte_free(struct page *pte);
37718+
37719+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
37720+
37721+#ifdef CONFIG_X86_PAE
37722+/*
37723+ * In the PAE case we free the pmds as part of the pgd.
37724+ */
37725+#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
37726+#define pmd_free(x) do { } while (0)
37727+#define __pmd_free_tlb(tlb,x) do { } while (0)
37728+#define pud_populate(mm, pmd, pte) BUG()
37729+#endif
37730+
37731+#define check_pgt_cache() do { } while (0)
37732+
37733+#endif /* _I386_PGALLOC_H */
37734Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable-3level-defs.h
37735===================================================================
37736--- /dev/null 1970-01-01 00:00:00.000000000 +0000
37737+++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable-3level-defs.h 2007-06-12 13:14:02.000000000 +0200
37738@@ -0,0 +1,24 @@
37739+#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
37740+#define _I386_PGTABLE_3LEVEL_DEFS_H
37741+
37742+#define HAVE_SHARED_KERNEL_PMD 0
37743+
37744+/*
37745+ * PGDIR_SHIFT determines what a top-level page table entry can map
37746+ */
37747+#define PGDIR_SHIFT 30
37748+#define PTRS_PER_PGD 4
37749+
37750+/*
37751+ * PMD_SHIFT determines the size of the area a middle-level
37752+ * page table can map
37753+ */
37754+#define PMD_SHIFT 21
37755+#define PTRS_PER_PMD 512
37756+
37757+/*
37758+ * entries per page directory level
37759+ */
37760+#define PTRS_PER_PTE 512
37761+
37762+#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
37763Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable-3level.h
37764===================================================================
37765--- /dev/null 1970-01-01 00:00:00.000000000 +0000
37766+++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable-3level.h 2008-04-02 12:34:02.000000000 +0200
37767@@ -0,0 +1,211 @@
37768+#ifndef _I386_PGTABLE_3LEVEL_H
37769+#define _I386_PGTABLE_3LEVEL_H
37770+
37771+#include <asm-generic/pgtable-nopud.h>
37772+
37773+/*
37774+ * Intel Physical Address Extension (PAE) Mode - three-level page
37775+ * tables on PPro+ CPUs.
37776+ *
37777+ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
37778+ */
37779+
37780+#define pte_ERROR(e) \
37781+ printk("%s:%d: bad pte %p(%016Lx pfn %08lx).\n", __FILE__, __LINE__, \
37782+ &(e), __pte_val(e), pte_pfn(e))
37783+#define pmd_ERROR(e) \
37784+ printk("%s:%d: bad pmd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
37785+ &(e), __pmd_val(e), (pmd_val(e) & PTE_MASK) >> PAGE_SHIFT)
37786+#define pgd_ERROR(e) \
37787+ printk("%s:%d: bad pgd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
37788+ &(e), __pgd_val(e), (pgd_val(e) & PTE_MASK) >> PAGE_SHIFT)
37789+
37790+#define pud_none(pud) 0
37791+#define pud_bad(pud) 0
37792+#define pud_present(pud) 1
37793+
37794+/*
37795+ * Is the pte executable?
37796+ */
37797+static inline int pte_x(pte_t pte)
37798+{
37799+ return !(__pte_val(pte) & _PAGE_NX);
37800+}
37801+
37802+/*
37803+ * All present user-pages with !NX bit are user-executable:
37804+ */
37805+static inline int pte_exec(pte_t pte)
37806+{
37807+ return pte_user(pte) && pte_x(pte);
37808+}
37809+/*
37810+ * All present pages with !NX bit are kernel-executable:
37811+ */
37812+static inline int pte_exec_kernel(pte_t pte)
37813+{
37814+ return pte_x(pte);
37815+}
37816+
37817+/* Rules for using set_pte: the pte being assigned *must* be
37818+ * either not present or in a state where the hardware will
37819+ * not attempt to update the pte. In places where this is
37820+ * not possible, use pte_get_and_clear to obtain the old pte
37821+ * value and then use set_pte to update it. -ben
37822+ */
37823+#define __HAVE_ARCH_SET_PTE_ATOMIC
37824+
37825+static inline void set_pte(pte_t *ptep, pte_t pte)
37826+{
37827+ ptep->pte_high = pte.pte_high;
37828+ smp_wmb();
37829+ ptep->pte_low = pte.pte_low;
37830+}
37831+#define set_pte_atomic(pteptr,pteval) \
37832+ set_64bit((unsigned long long *)(pteptr),__pte_val(pteval))
37833+
37834+#define set_pte_at(_mm,addr,ptep,pteval) do { \
37835+ if (((_mm) != current->mm && (_mm) != &init_mm) || \
37836+ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
37837+ set_pte((ptep), (pteval)); \
37838+} while (0)
37839+
37840+#define set_pte_at_sync(_mm,addr,ptep,pteval) do { \
37841+ if (((_mm) != current->mm && (_mm) != &init_mm) || \
37842+ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
37843+ set_pte((ptep), (pteval)); \
37844+ xen_invlpg((addr)); \
37845+ } \
37846+} while (0)
37847+
37848+#define set_pmd(pmdptr,pmdval) \
37849+ xen_l2_entry_update((pmdptr), (pmdval))
37850+#define set_pud(pudptr,pudval) \
37851+ xen_l3_entry_update((pudptr), (pudval))
37852+
37853+/*
37854+ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
37855+ * the TLB via cr3 if the top-level pgd is changed...
37856+ * We do not let the generic code free and clear pgd entries due to
37857+ * this erratum.
37858+ */
37859+static inline void pud_clear (pud_t * pud) { }
37860+
37861+#define pud_page(pud) \
37862+((struct page *) __va(pud_val(pud) & PAGE_MASK))
37863+
37864+#define pud_page_kernel(pud) \
37865+((unsigned long) __va(pud_val(pud) & PAGE_MASK))
37866+
37867+
37868+/* Find an entry in the second-level page table.. */
37869+#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
37870+ pmd_index(address))
37871+
37872+static inline int pte_none(pte_t pte)
37873+{
37874+ return !(pte.pte_low | pte.pte_high);
37875+}
37876+
37877+/*
37878+ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
37879+ * entry, so clear the bottom half first and enforce ordering with a compiler
37880+ * barrier.
37881+ */
37882+static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
37883+{
37884+ if ((mm != current->mm && mm != &init_mm)
37885+ || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
37886+ ptep->pte_low = 0;
37887+ smp_wmb();
37888+ ptep->pte_high = 0;
37889+ }
37890+}
37891+
37892+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
37893+
37894+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
37895+{
37896+ pte_t pte = *ptep;
37897+ if (!pte_none(pte)) {
37898+ if ((mm != &init_mm) ||
37899+ HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
37900+ uint64_t val = __pte_val(pte);
37901+ if (__cmpxchg64(ptep, val, 0) != val) {
37902+ /* xchg acts as a barrier before the setting of the high bits */
37903+ pte.pte_low = xchg(&ptep->pte_low, 0);
37904+ pte.pte_high = ptep->pte_high;
37905+ ptep->pte_high = 0;
37906+ }
37907+ }
37908+ }
37909+ return pte;
37910+}
37911+
37912+#define ptep_clear_flush(vma, addr, ptep) \
37913+({ \
37914+ pte_t *__ptep = (ptep); \
37915+ pte_t __res = *__ptep; \
37916+ if (!pte_none(__res) && \
37917+ ((vma)->vm_mm != current->mm || \
37918+ HYPERVISOR_update_va_mapping(addr, __pte(0), \
37919+ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
37920+ UVMF_INVLPG|UVMF_MULTI))) { \
37921+ __ptep->pte_low = 0; \
37922+ smp_wmb(); \
37923+ __ptep->pte_high = 0; \
37924+ flush_tlb_page(vma, addr); \
37925+ } \
37926+ __res; \
37927+})
37928+
37929+static inline int pte_same(pte_t a, pte_t b)
37930+{
37931+ return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
37932+}
37933+
37934+#define pte_page(x) pfn_to_page(pte_pfn(x))
37935+
37936+#define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
37937+ ((_pte).pte_high << (32-PAGE_SHIFT)))
37938+#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
37939+ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
37940+#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_IO ? max_mapnr : \
37941+ (_pte).pte_low & _PAGE_PRESENT ? \
37942+ mfn_to_local_pfn(__pte_mfn(_pte)) : \
37943+ __pte_mfn(_pte))
37944+
37945+extern unsigned long long __supported_pte_mask;
37946+
37947+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
37948+{
37949+ return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
37950+ pgprot_val(pgprot)) & __supported_pte_mask);
37951+}
37952+
37953+static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
37954+{
37955+ return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
37956+ pgprot_val(pgprot)) & __supported_pte_mask);
37957+}
37958+
37959+/*
37960+ * Bits 0, 6 and 7 are taken in the low part of the pte,
37961+ * put the 32 bits of offset into the high part.
37962+ */
37963+#define pte_to_pgoff(pte) ((pte).pte_high)
37964+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
37965+#define PTE_FILE_MAX_BITS 32
37966+
37967+/* Encode and de-code a swap entry */
37968+#define __swp_type(x) (((x).val) & 0x1f)
37969+#define __swp_offset(x) ((x).val >> 5)
37970+#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
37971+#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
37972+#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
37973+
37974+#define __pmd_free_tlb(tlb, x) do { } while (0)
37975+
37976+void vmalloc_sync_all(void);
37977+
37978+#endif /* _I386_PGTABLE_3LEVEL_H */
37979Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable_32.h
37980===================================================================
37981--- /dev/null 1970-01-01 00:00:00.000000000 +0000
37982+++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable_32.h 2008-07-21 11:00:33.000000000 +0200
37983@@ -0,0 +1,537 @@
37984+#ifndef _I386_PGTABLE_H
37985+#define _I386_PGTABLE_H
37986+
37987+#include <asm/hypervisor.h>
37988+
37989+/*
37990+ * The Linux memory management assumes a three-level page table setup. On
37991+ * the i386, we use that, but "fold" the mid level into the top-level page
37992+ * table, so that we physically have the same two-level page table as the
37993+ * i386 mmu expects.
37994+ *
37995+ * This file contains the functions and defines necessary to modify and use
37996+ * the i386 page table tree.
37997+ */
37998+#ifndef __ASSEMBLY__
37999+#include <asm/processor.h>
38000+#include <asm/fixmap.h>
38001+#include <linux/threads.h>
38002+
38003+#ifndef _I386_BITOPS_H
38004+#include <asm/bitops.h>
38005+#endif
38006+
38007+#include <linux/slab.h>
38008+#include <linux/list.h>
38009+#include <linux/spinlock.h>
38010+
38011+/* Is this pagetable pinned? */
38012+#define PG_pinned PG_arch_1
38013+
38014+struct mm_struct;
38015+struct vm_area_struct;
38016+
38017+/*
38018+ * ZERO_PAGE is a global shared page that is always zero: used
38019+ * for zero-mapped memory areas etc..
38020+ */
38021+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
38022+extern unsigned long empty_zero_page[1024];
38023+extern pgd_t *swapper_pg_dir;
38024+extern kmem_cache_t *pgd_cache;
38025+extern kmem_cache_t *pmd_cache;
38026+extern spinlock_t pgd_lock;
38027+extern struct page *pgd_list;
38028+
38029+void pmd_ctor(void *, kmem_cache_t *, unsigned long);
38030+void pgd_ctor(void *, kmem_cache_t *, unsigned long);
38031+void pgd_dtor(void *, kmem_cache_t *, unsigned long);
38032+void pgtable_cache_init(void);
38033+void paging_init(void);
38034+
38035+/*
38036+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
38037+ * implements both the traditional 2-level x86 page tables and the
38038+ * newer 3-level PAE-mode page tables.
38039+ */
38040+#ifdef CONFIG_X86_PAE
38041+# include <asm/pgtable-3level-defs.h>
38042+# define PMD_SIZE (1UL << PMD_SHIFT)
38043+# define PMD_MASK (~(PMD_SIZE-1))
38044+#else
38045+# include <asm/pgtable-2level-defs.h>
38046+#endif
38047+
38048+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
38049+#define PGDIR_MASK (~(PGDIR_SIZE-1))
38050+
38051+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
38052+#define FIRST_USER_ADDRESS 0
38053+
38054+#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
38055+#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
38056+
38057+#define TWOLEVEL_PGDIR_SHIFT 22
38058+#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
38059+#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
38060+
38061+/* Just any arbitrary offset to the start of the vmalloc VM area: the
38062+ * current 8MB value just means that there will be a 8MB "hole" after the
38063+ * physical memory until the kernel virtual memory starts. That means that
38064+ * any out-of-bounds memory accesses will hopefully be caught.
38065+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
38066+ * area for the same reason. ;)
38067+ */
38068+#define VMALLOC_OFFSET (8*1024*1024)
38069+#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
38070+ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
38071+#ifdef CONFIG_HIGHMEM
38072+# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
38073+#else
38074+# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
38075+#endif
38076+
38077+/*
38078+ * _PAGE_PSE set in the page directory entry just means that
38079+ * the page directory entry points directly to a 4MB-aligned block of
38080+ * memory.
38081+ */
38082+#define _PAGE_BIT_PRESENT 0
38083+#define _PAGE_BIT_RW 1
38084+#define _PAGE_BIT_USER 2
38085+#define _PAGE_BIT_PWT 3
38086+#define _PAGE_BIT_PCD 4
38087+#define _PAGE_BIT_ACCESSED 5
38088+#define _PAGE_BIT_DIRTY 6
38089+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
38090+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
38091+/*#define _PAGE_BIT_UNUSED1 9*/ /* available for programmer */
38092+#define _PAGE_BIT_UNUSED2 10
38093+#define _PAGE_BIT_UNUSED3 11
38094+#define _PAGE_BIT_NX 63
38095+
38096+#define _PAGE_PRESENT 0x001
38097+#define _PAGE_RW 0x002
38098+#define _PAGE_USER 0x004
38099+#define _PAGE_PWT 0x008
38100+#define _PAGE_PCD 0x010
38101+#define _PAGE_ACCESSED 0x020
38102+#define _PAGE_DIRTY 0x040
38103+#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
38104+#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
38105+/*#define _PAGE_UNUSED1 0x200*/ /* available for programmer */
38106+#define _PAGE_UNUSED2 0x400
38107+#define _PAGE_UNUSED3 0x800
38108+
38109+/* If _PAGE_PRESENT is clear, we use these: */
38110+#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
38111+#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
38112+ pte_present gives true */
38113+#ifdef CONFIG_X86_PAE
38114+#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
38115+#else
38116+#define _PAGE_NX 0
38117+#endif
38118+
38119+/* Mapped page is I/O or foreign and has no associated page struct. */
38120+#define _PAGE_IO 0x200
38121+
38122+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
38123+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
38124+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_IO)
38125+
38126+#define PAGE_NONE \
38127+ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
38128+#define PAGE_SHARED \
38129+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
38130+
38131+#define PAGE_SHARED_EXEC \
38132+ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
38133+#define PAGE_COPY_NOEXEC \
38134+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
38135+#define PAGE_COPY_EXEC \
38136+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
38137+#define PAGE_COPY \
38138+ PAGE_COPY_NOEXEC
38139+#define PAGE_READONLY \
38140+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
38141+#define PAGE_READONLY_EXEC \
38142+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
38143+
38144+#define _PAGE_KERNEL \
38145+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
38146+#define _PAGE_KERNEL_EXEC \
38147+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
38148+
38149+extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
38150+#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
38151+#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
38152+#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
38153+#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
38154+
38155+#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
38156+#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
38157+#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
38158+#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
38159+#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
38160+#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
38161+
38162+/*
38163+ * The i386 can't do page protection for execute, and considers that
38164+ * the same are read. Also, write permissions imply read permissions.
38165+ * This is the closest we can get..
38166+ */
38167+#define __P000 PAGE_NONE
38168+#define __P001 PAGE_READONLY
38169+#define __P010 PAGE_COPY
38170+#define __P011 PAGE_COPY
38171+#define __P100 PAGE_READONLY_EXEC
38172+#define __P101 PAGE_READONLY_EXEC
38173+#define __P110 PAGE_COPY_EXEC
38174+#define __P111 PAGE_COPY_EXEC
38175+
38176+#define __S000 PAGE_NONE
38177+#define __S001 PAGE_READONLY
38178+#define __S010 PAGE_SHARED
38179+#define __S011 PAGE_SHARED
38180+#define __S100 PAGE_READONLY_EXEC
38181+#define __S101 PAGE_READONLY_EXEC
38182+#define __S110 PAGE_SHARED_EXEC
38183+#define __S111 PAGE_SHARED_EXEC
38184+
38185+/*
38186+ * Define this if things work differently on an i386 and an i486:
38187+ * it will (on an i486) warn about kernel memory accesses that are
38188+ * done without a 'access_ok(VERIFY_WRITE,..)'
38189+ */
38190+#undef TEST_ACCESS_OK
38191+
38192+/* The boot page tables (all created as a single array) */
38193+extern unsigned long pg0[];
38194+
38195+#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
38196+
38197+/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
38198+#define pmd_none(x) (!(unsigned long)__pmd_val(x))
38199+#if CONFIG_XEN_COMPAT <= 0x030002
38200+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
38201+ can temporarily clear it. */
38202+#define pmd_present(x) (__pmd_val(x))
38203+#define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
38204+#else
38205+#define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
38206+#define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
38207+#endif
38208+
38209+
38210+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
38211+
38212+/*
38213+ * The following only work if pte_present() is true.
38214+ * Undefined behaviour if not..
38215+ */
38216+static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
38217+static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
38218+static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
38219+static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
38220+static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
38221+static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
38222+
38223+/*
38224+ * The following only works if pte_present() is not true.
38225+ */
38226+static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
38227+
38228+static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
38229+static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
38230+static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
38231+static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
38232+static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
38233+static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
38234+static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
38235+static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
38236+static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
38237+static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
38238+static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
38239+
38240+#ifdef CONFIG_X86_PAE
38241+# include <asm/pgtable-3level.h>
38242+#else
38243+# include <asm/pgtable-2level.h>
38244+#endif
38245+
38246+#define ptep_test_and_clear_dirty(vma, addr, ptep) \
38247+({ \
38248+ pte_t __pte = *(ptep); \
38249+ int __ret = pte_dirty(__pte); \
38250+ if (__ret) { \
38251+ __pte = pte_mkclean(__pte); \
38252+ if ((vma)->vm_mm != current->mm || \
38253+ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \
38254+ (ptep)->pte_low = __pte.pte_low; \
38255+ } \
38256+ __ret; \
38257+})
38258+
38259+#define ptep_test_and_clear_young(vma, addr, ptep) \
38260+({ \
38261+ pte_t __pte = *(ptep); \
38262+ int __ret = pte_young(__pte); \
38263+ if (__ret) \
38264+ __pte = pte_mkold(__pte); \
38265+ if ((vma)->vm_mm != current->mm || \
38266+ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \
38267+ (ptep)->pte_low = __pte.pte_low; \
38268+ __ret; \
38269+})
38270+
38271+#define ptep_get_and_clear_full(mm, addr, ptep, full) \
38272+ ((full) ? ({ \
38273+ pte_t __res = *(ptep); \
38274+ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \
38275+ xen_l1_entry_update(ptep, __pte(0)); \
38276+ else \
38277+ *(ptep) = __pte(0); \
38278+ __res; \
38279+ }) : \
38280+ ptep_get_and_clear(mm, addr, ptep))
38281+
38282+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
38283+{
38284+ pte_t pte = *ptep;
38285+ if (pte_write(pte))
38286+ set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
38287+}
38288+
38289+/*
38290+ * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
38291+ *
38292+ * dst - pointer to pgd range anwhere on a pgd page
38293+ * src - ""
38294+ * count - the number of pgds to copy.
38295+ *
38296+ * dst and src can be on the same page, but the range must not overlap,
38297+ * and must not cross a page boundary.
38298+ */
38299+static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
38300+{
38301+ memcpy(dst, src, count * sizeof(pgd_t));
38302+}
38303+
38304+/*
38305+ * Macro to mark a page protection value as "uncacheable". On processors which do not support
38306+ * it, this is a no-op.
38307+ */
38308+#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
38309+ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
38310+
38311+/*
38312+ * Conversion functions: convert a page and protection to a page entry,
38313+ * and a page entry and page directory to the page they refer to.
38314+ */
38315+
38316+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
38317+
38318+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
38319+{
38320+ /*
38321+ * Since this might change the present bit (which controls whether
38322+ * a pte_t object has undergone p2m translation), we must use
38323+ * pte_val() on the input pte and __pte() for the return value.
38324+ */
38325+ paddr_t pteval = pte_val(pte);
38326+
38327+ pteval &= _PAGE_CHG_MASK;
38328+ pteval |= pgprot_val(newprot);
38329+#ifdef CONFIG_X86_PAE
38330+ pteval &= __supported_pte_mask;
38331+#endif
38332+ return __pte(pteval);
38333+}
38334+
38335+#define pmd_large(pmd) \
38336+((__pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
38337+
38338+/*
38339+ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
38340+ *
38341+ * this macro returns the index of the entry in the pgd page which would
38342+ * control the given virtual address
38343+ */
38344+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
38345+#define pgd_index_k(addr) pgd_index(addr)
38346+
38347+/*
38348+ * pgd_offset() returns a (pgd_t *)
38349+ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
38350+ */
38351+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
38352+
38353+/*
38354+ * a shortcut which implies the use of the kernel's pgd, instead
38355+ * of a process's
38356+ */
38357+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
38358+
38359+/*
38360+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
38361+ *
38362+ * this macro returns the index of the entry in the pmd page which would
38363+ * control the given virtual address
38364+ */
38365+#define pmd_index(address) \
38366+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
38367+
38368+/*
38369+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
38370+ *
38371+ * this macro returns the index of the entry in the pte page which would
38372+ * control the given virtual address
38373+ */
38374+#define pte_index(address) \
38375+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
38376+#define pte_offset_kernel(dir, address) \
38377+ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
38378+
38379+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
38380+
38381+#define pmd_page_kernel(pmd) \
38382+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
38383+
38384+/*
38385+ * Helper function that returns the kernel pagetable entry controlling
38386+ * the virtual address 'address'. NULL means no pagetable entry present.
38387+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
38388+ * as a pte too.
38389+ */
38390+extern pte_t *lookup_address(unsigned long address);
38391+
38392+/*
38393+ * Make a given kernel text page executable/non-executable.
38394+ * Returns the previous executability setting of that page (which
38395+ * is used to restore the previous state). Used by the SMP bootup code.
38396+ * NOTE: this is an __init function for security reasons.
38397+ */
38398+#ifdef CONFIG_X86_PAE
38399+ extern int set_kernel_exec(unsigned long vaddr, int enable);
38400+#else
38401+ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
38402+#endif
38403+
38404+extern void noexec_setup(const char *str);
38405+
38406+#if defined(CONFIG_HIGHPTE)
38407+#define pte_offset_map(dir, address) \
38408+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
38409+ pte_index(address))
38410+#define pte_offset_map_nested(dir, address) \
38411+ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
38412+ pte_index(address))
38413+#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
38414+#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
38415+#else
38416+#define pte_offset_map(dir, address) \
38417+ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
38418+#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
38419+#define pte_unmap(pte) do { } while (0)
38420+#define pte_unmap_nested(pte) do { } while (0)
38421+#endif
38422+
38423+#define __HAVE_ARCH_PTEP_ESTABLISH
38424+#define ptep_establish(vma, address, ptep, pteval) \
38425+ do { \
38426+ if ( likely((vma)->vm_mm == current->mm) ) { \
38427+ BUG_ON(HYPERVISOR_update_va_mapping(address, \
38428+ pteval, \
38429+ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
38430+ UVMF_INVLPG|UVMF_MULTI)); \
38431+ } else { \
38432+ xen_l1_entry_update(ptep, pteval); \
38433+ flush_tlb_page(vma, address); \
38434+ } \
38435+ } while (0)
38436+
38437+/*
38438+ * The i386 doesn't have any external MMU info: the kernel page
38439+ * tables contain all the necessary information.
38440+ *
38441+ * Also, we only update the dirty/accessed state if we set
38442+ * the dirty bit by hand in the kernel, since the hardware
38443+ * will do the accessed bit for us, and we don't want to
38444+ * race with other CPU's that might be updating the dirty
38445+ * bit at the same time.
38446+ */
38447+#define update_mmu_cache(vma,address,pte) do { } while (0)
38448+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
38449+#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
38450+ do { \
38451+ if (dirty) \
38452+ ptep_establish(vma, address, ptep, entry); \
38453+ } while (0)
38454+
38455+#include <xen/features.h>
38456+void make_lowmem_page_readonly(void *va, unsigned int feature);
38457+void make_lowmem_page_writable(void *va, unsigned int feature);
38458+void make_page_readonly(void *va, unsigned int feature);
38459+void make_page_writable(void *va, unsigned int feature);
38460+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
38461+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
38462+
38463+#define virt_to_ptep(va) \
38464+({ \
38465+ pte_t *__ptep = lookup_address((unsigned long)(va)); \
38466+ BUG_ON(!__ptep || !pte_present(*__ptep)); \
38467+ __ptep; \
38468+})
38469+
38470+#define arbitrary_virt_to_machine(va) \
38471+ (((maddr_t)pte_mfn(*virt_to_ptep(va)) << PAGE_SHIFT) \
38472+ | ((unsigned long)(va) & (PAGE_SIZE - 1)))
38473+
38474+#endif /* !__ASSEMBLY__ */
38475+
38476+#ifdef CONFIG_FLATMEM
38477+#define kern_addr_valid(addr) (1)
38478+#endif /* CONFIG_FLATMEM */
38479+
38480+int direct_remap_pfn_range(struct vm_area_struct *vma,
38481+ unsigned long address,
38482+ unsigned long mfn,
38483+ unsigned long size,
38484+ pgprot_t prot,
38485+ domid_t domid);
38486+int direct_kernel_remap_pfn_range(unsigned long address,
38487+ unsigned long mfn,
38488+ unsigned long size,
38489+ pgprot_t prot,
38490+ domid_t domid);
38491+int create_lookup_pte_addr(struct mm_struct *mm,
38492+ unsigned long address,
38493+ uint64_t *ptep);
38494+int touch_pte_range(struct mm_struct *mm,
38495+ unsigned long address,
38496+ unsigned long size);
38497+
38498+int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
38499+ unsigned long addr, unsigned long end, pgprot_t newprot);
38500+
38501+#define arch_change_pte_range(mm, pmd, addr, end, newprot) \
38502+ xen_change_pte_range(mm, pmd, addr, end, newprot)
38503+
38504+#define io_remap_pfn_range(vma,from,pfn,size,prot) \
38505+direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
38506+
38507+#define MK_IOSPACE_PFN(space, pfn) (pfn)
38508+#define GET_IOSPACE(pfn) 0
38509+#define GET_PFN(pfn) (pfn)
38510+
38511+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
38512+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
38513+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
38514+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
38515+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
38516+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
38517+#define __HAVE_ARCH_PTE_SAME
38518+#include <asm-generic/pgtable.h>
38519+
38520+#endif /* _I386_PGTABLE_H */
38521Index: head-2008-11-25/include/asm-x86/mach-xen/asm/processor_32.h
38522===================================================================
38523--- /dev/null 1970-01-01 00:00:00.000000000 +0000
38524+++ head-2008-11-25/include/asm-x86/mach-xen/asm/processor_32.h 2008-01-28 12:24:19.000000000 +0100
38525@@ -0,0 +1,743 @@
38526+/*
38527+ * include/asm-i386/processor.h
38528+ *
38529+ * Copyright (C) 1994 Linus Torvalds
38530+ */
38531+
38532+#ifndef __ASM_I386_PROCESSOR_H
38533+#define __ASM_I386_PROCESSOR_H
38534+
38535+#include <asm/vm86.h>
38536+#include <asm/math_emu.h>
38537+#include <asm/segment.h>
38538+#include <asm/page.h>
38539+#include <asm/types.h>
38540+#include <asm/sigcontext.h>
38541+#include <asm/cpufeature.h>
38542+#include <asm/msr.h>
38543+#include <asm/system.h>
38544+#include <linux/cache.h>
38545+#include <linux/threads.h>
38546+#include <asm/percpu.h>
38547+#include <linux/cpumask.h>
38548+#include <xen/interface/physdev.h>
38549+
38550+/* flag for disabling the tsc */
38551+extern int tsc_disable;
38552+
38553+struct desc_struct {
38554+ unsigned long a,b;
38555+};
38556+
38557+#define desc_empty(desc) \
38558+ (!((desc)->a | (desc)->b))
38559+
38560+#define desc_equal(desc1, desc2) \
38561+ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
38562+/*
38563+ * Default implementation of macro that returns current
38564+ * instruction pointer ("program counter").
38565+ */
38566+#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
38567+
38568+/*
38569+ * CPU type and hardware bug flags. Kept separately for each CPU.
38570+ * Members of this structure are referenced in head.S, so think twice
38571+ * before touching them. [mj]
38572+ */
38573+
38574+struct cpuinfo_x86 {
38575+ __u8 x86; /* CPU family */
38576+ __u8 x86_vendor; /* CPU vendor */
38577+ __u8 x86_model;
38578+ __u8 x86_mask;
38579+ char wp_works_ok; /* It doesn't on 386's */
38580+ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
38581+ char hard_math;
38582+ char rfu;
38583+ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
38584+ unsigned long x86_capability[NCAPINTS];
38585+ char x86_vendor_id[16];
38586+ char x86_model_id[64];
38587+ int x86_cache_size; /* in KB - valid for CPUS which support this
38588+ call */
38589+ int x86_cache_alignment; /* In bytes */
38590+ char fdiv_bug;
38591+ char f00f_bug;
38592+ char coma_bug;
38593+ char pad0;
38594+ int x86_power;
38595+ unsigned long loops_per_jiffy;
38596+#ifdef CONFIG_SMP
38597+ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
38598+#endif
38599+ unsigned char x86_max_cores; /* cpuid returned max cores value */
38600+ unsigned char apicid;
38601+#ifdef CONFIG_SMP
38602+ unsigned char booted_cores; /* number of cores as seen by OS */
38603+ __u8 phys_proc_id; /* Physical processor id. */
38604+ __u8 cpu_core_id; /* Core id */
38605+#endif
38606+} __attribute__((__aligned__(SMP_CACHE_BYTES)));
38607+
38608+#define X86_VENDOR_INTEL 0
38609+#define X86_VENDOR_CYRIX 1
38610+#define X86_VENDOR_AMD 2
38611+#define X86_VENDOR_UMC 3
38612+#define X86_VENDOR_NEXGEN 4
38613+#define X86_VENDOR_CENTAUR 5
38614+#define X86_VENDOR_RISE 6
38615+#define X86_VENDOR_TRANSMETA 7
38616+#define X86_VENDOR_NSC 8
38617+#define X86_VENDOR_NUM 9
38618+#define X86_VENDOR_UNKNOWN 0xff
38619+
38620+/*
38621+ * capabilities of CPUs
38622+ */
38623+
38624+extern struct cpuinfo_x86 boot_cpu_data;
38625+extern struct cpuinfo_x86 new_cpu_data;
38626+#ifndef CONFIG_X86_NO_TSS
38627+extern struct tss_struct doublefault_tss;
38628+DECLARE_PER_CPU(struct tss_struct, init_tss);
38629+#endif
38630+
38631+#ifdef CONFIG_SMP
38632+extern struct cpuinfo_x86 cpu_data[];
38633+#define current_cpu_data cpu_data[smp_processor_id()]
38634+#else
38635+#define cpu_data (&boot_cpu_data)
38636+#define current_cpu_data boot_cpu_data
38637+#endif
38638+
38639+extern int cpu_llc_id[NR_CPUS];
38640+extern char ignore_fpu_irq;
38641+
38642+extern void identify_cpu(struct cpuinfo_x86 *);
38643+extern void print_cpu_info(struct cpuinfo_x86 *);
38644+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
38645+extern unsigned short num_cache_leaves;
38646+
38647+#ifdef CONFIG_X86_HT
38648+extern void detect_ht(struct cpuinfo_x86 *c);
38649+#else
38650+static inline void detect_ht(struct cpuinfo_x86 *c) {}
38651+#endif
38652+
38653+/*
38654+ * EFLAGS bits
38655+ */
38656+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
38657+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
38658+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
38659+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
38660+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
38661+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
38662+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
38663+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
38664+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
38665+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
38666+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
38667+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
38668+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
38669+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
38670+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
38671+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
38672+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
38673+
38674+/*
38675+ * Generic CPUID function
38676+ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
38677+ * resulting in stale register contents being returned.
38678+ */
38679+static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
38680+{
38681+ __asm__(XEN_CPUID
38682+ : "=a" (*eax),
38683+ "=b" (*ebx),
38684+ "=c" (*ecx),
38685+ "=d" (*edx)
38686+ : "0" (op), "c"(0));
38687+}
38688+
38689+/* Some CPUID calls want 'count' to be placed in ecx */
38690+static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
38691+ int *edx)
38692+{
38693+ __asm__(XEN_CPUID
38694+ : "=a" (*eax),
38695+ "=b" (*ebx),
38696+ "=c" (*ecx),
38697+ "=d" (*edx)
38698+ : "0" (op), "c" (count));
38699+}
38700+
38701+/*
38702+ * CPUID functions returning a single datum
38703+ */
38704+static inline unsigned int cpuid_eax(unsigned int op)
38705+{
38706+ unsigned int eax;
38707+
38708+ __asm__(XEN_CPUID
38709+ : "=a" (eax)
38710+ : "0" (op)
38711+ : "bx", "cx", "dx");
38712+ return eax;
38713+}
38714+static inline unsigned int cpuid_ebx(unsigned int op)
38715+{
38716+ unsigned int eax, ebx;
38717+
38718+ __asm__(XEN_CPUID
38719+ : "=a" (eax), "=b" (ebx)
38720+ : "0" (op)
38721+ : "cx", "dx" );
38722+ return ebx;
38723+}
38724+static inline unsigned int cpuid_ecx(unsigned int op)
38725+{
38726+ unsigned int eax, ecx;
38727+
38728+ __asm__(XEN_CPUID
38729+ : "=a" (eax), "=c" (ecx)
38730+ : "0" (op)
38731+ : "bx", "dx" );
38732+ return ecx;
38733+}
38734+static inline unsigned int cpuid_edx(unsigned int op)
38735+{
38736+ unsigned int eax, edx;
38737+
38738+ __asm__(XEN_CPUID
38739+ : "=a" (eax), "=d" (edx)
38740+ : "0" (op)
38741+ : "bx", "cx");
38742+ return edx;
38743+}
38744+
38745+#define load_cr3(pgdir) write_cr3(__pa(pgdir))
38746+
38747+/*
38748+ * Intel CPU features in CR4
38749+ */
38750+#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
38751+#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
38752+#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
38753+#define X86_CR4_DE 0x0008 /* enable debugging extensions */
38754+#define X86_CR4_PSE 0x0010 /* enable page size extensions */
38755+#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
38756+#define X86_CR4_MCE 0x0040 /* Machine check enable */
38757+#define X86_CR4_PGE 0x0080 /* enable global pages */
38758+#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
38759+#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
38760+#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
38761+
38762+/*
38763+ * Save the cr4 feature set we're using (ie
38764+ * Pentium 4MB enable and PPro Global page
38765+ * enable), so that any CPU's that boot up
38766+ * after us can get the correct flags.
38767+ */
38768+extern unsigned long mmu_cr4_features;
38769+
38770+static inline void set_in_cr4 (unsigned long mask)
38771+{
38772+ unsigned cr4;
38773+ mmu_cr4_features |= mask;
38774+ cr4 = read_cr4();
38775+ cr4 |= mask;
38776+ write_cr4(cr4);
38777+}
38778+
38779+static inline void clear_in_cr4 (unsigned long mask)
38780+{
38781+ unsigned cr4;
38782+ mmu_cr4_features &= ~mask;
38783+ cr4 = read_cr4();
38784+ cr4 &= ~mask;
38785+ write_cr4(cr4);
38786+}
38787+
38788+/*
38789+ * NSC/Cyrix CPU configuration register indexes
38790+ */
38791+
38792+#define CX86_PCR0 0x20
38793+#define CX86_GCR 0xb8
38794+#define CX86_CCR0 0xc0
38795+#define CX86_CCR1 0xc1
38796+#define CX86_CCR2 0xc2
38797+#define CX86_CCR3 0xc3
38798+#define CX86_CCR4 0xe8
38799+#define CX86_CCR5 0xe9
38800+#define CX86_CCR6 0xea
38801+#define CX86_CCR7 0xeb
38802+#define CX86_PCR1 0xf0
38803+#define CX86_DIR0 0xfe
38804+#define CX86_DIR1 0xff
38805+#define CX86_ARR_BASE 0xc4
38806+#define CX86_RCR_BASE 0xdc
38807+
38808+/*
38809+ * NSC/Cyrix CPU indexed register access macros
38810+ */
38811+
38812+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
38813+
38814+#define setCx86(reg, data) do { \
38815+ outb((reg), 0x22); \
38816+ outb((data), 0x23); \
38817+} while (0)
38818+
38819+/* Stop speculative execution */
38820+static inline void sync_core(void)
38821+{
38822+ int tmp;
38823+ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
38824+}
38825+
38826+static inline void __monitor(const void *eax, unsigned long ecx,
38827+ unsigned long edx)
38828+{
38829+ /* "monitor %eax,%ecx,%edx;" */
38830+ asm volatile(
38831+ ".byte 0x0f,0x01,0xc8;"
38832+ : :"a" (eax), "c" (ecx), "d"(edx));
38833+}
38834+
38835+static inline void __mwait(unsigned long eax, unsigned long ecx)
38836+{
38837+ /* "mwait %eax,%ecx;" */
38838+ asm volatile(
38839+ ".byte 0x0f,0x01,0xc9;"
38840+ : :"a" (eax), "c" (ecx));
38841+}
38842+
38843+/* from system description table in BIOS. Mostly for MCA use, but
38844+others may find it useful. */
38845+extern unsigned int machine_id;
38846+extern unsigned int machine_submodel_id;
38847+extern unsigned int BIOS_revision;
38848+extern unsigned int mca_pentium_flag;
38849+
38850+/* Boot loader type from the setup header */
38851+extern int bootloader_type;
38852+
38853+/*
38854+ * User space process size: 3GB (default).
38855+ */
38856+#define TASK_SIZE (PAGE_OFFSET)
38857+
38858+/* This decides where the kernel will search for a free chunk of vm
38859+ * space during mmap's.
38860+ */
38861+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
38862+
38863+#define HAVE_ARCH_PICK_MMAP_LAYOUT
38864+
38865+/*
38866+ * Size of io_bitmap.
38867+ */
38868+#define IO_BITMAP_BITS 65536
38869+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
38870+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
38871+#ifndef CONFIG_X86_NO_TSS
38872+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
38873+#endif
38874+#define INVALID_IO_BITMAP_OFFSET 0x8000
38875+#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
38876+
38877+struct i387_fsave_struct {
38878+ long cwd;
38879+ long swd;
38880+ long twd;
38881+ long fip;
38882+ long fcs;
38883+ long foo;
38884+ long fos;
38885+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
38886+ long status; /* software status information */
38887+};
38888+
38889+struct i387_fxsave_struct {
38890+ unsigned short cwd;
38891+ unsigned short swd;
38892+ unsigned short twd;
38893+ unsigned short fop;
38894+ long fip;
38895+ long fcs;
38896+ long foo;
38897+ long fos;
38898+ long mxcsr;
38899+ long mxcsr_mask;
38900+ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
38901+ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
38902+ long padding[56];
38903+} __attribute__ ((aligned (16)));
38904+
38905+struct i387_soft_struct {
38906+ long cwd;
38907+ long swd;
38908+ long twd;
38909+ long fip;
38910+ long fcs;
38911+ long foo;
38912+ long fos;
38913+ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
38914+ unsigned char ftop, changed, lookahead, no_update, rm, alimit;
38915+ struct info *info;
38916+ unsigned long entry_eip;
38917+};
38918+
38919+union i387_union {
38920+ struct i387_fsave_struct fsave;
38921+ struct i387_fxsave_struct fxsave;
38922+ struct i387_soft_struct soft;
38923+};
38924+
38925+typedef struct {
38926+ unsigned long seg;
38927+} mm_segment_t;
38928+
38929+struct thread_struct;
38930+
38931+#ifndef CONFIG_X86_NO_TSS
38932+struct tss_struct {
38933+ unsigned short back_link,__blh;
38934+ unsigned long esp0;
38935+ unsigned short ss0,__ss0h;
38936+ unsigned long esp1;
38937+ unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
38938+ unsigned long esp2;
38939+ unsigned short ss2,__ss2h;
38940+ unsigned long __cr3;
38941+ unsigned long eip;
38942+ unsigned long eflags;
38943+ unsigned long eax,ecx,edx,ebx;
38944+ unsigned long esp;
38945+ unsigned long ebp;
38946+ unsigned long esi;
38947+ unsigned long edi;
38948+ unsigned short es, __esh;
38949+ unsigned short cs, __csh;
38950+ unsigned short ss, __ssh;
38951+ unsigned short ds, __dsh;
38952+ unsigned short fs, __fsh;
38953+ unsigned short gs, __gsh;
38954+ unsigned short ldt, __ldth;
38955+ unsigned short trace, io_bitmap_base;
38956+ /*
38957+ * The extra 1 is there because the CPU will access an
38958+ * additional byte beyond the end of the IO permission
38959+ * bitmap. The extra byte must be all 1 bits, and must
38960+ * be within the limit.
38961+ */
38962+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
38963+ /*
38964+ * Cache the current maximum and the last task that used the bitmap:
38965+ */
38966+ unsigned long io_bitmap_max;
38967+ struct thread_struct *io_bitmap_owner;
38968+ /*
38969+ * pads the TSS to be cacheline-aligned (size is 0x100)
38970+ */
38971+ unsigned long __cacheline_filler[35];
38972+ /*
38973+ * .. and then another 0x100 bytes for emergency kernel stack
38974+ */
38975+ unsigned long stack[64];
38976+} __attribute__((packed));
38977+#endif
38978+
38979+#define ARCH_MIN_TASKALIGN 16
38980+
38981+struct thread_struct {
38982+/* cached TLS descriptors. */
38983+ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
38984+ unsigned long esp0;
38985+ unsigned long sysenter_cs;
38986+ unsigned long eip;
38987+ unsigned long esp;
38988+ unsigned long fs;
38989+ unsigned long gs;
38990+/* Hardware debugging registers */
38991+ unsigned long debugreg[8]; /* %%db0-7 debug registers */
38992+/* fault info */
38993+ unsigned long cr2, trap_no, error_code;
38994+/* floating point info */
38995+ union i387_union i387;
38996+/* virtual 86 mode info */
38997+ struct vm86_struct __user * vm86_info;
38998+ unsigned long screen_bitmap;
38999+ unsigned long v86flags, v86mask, saved_esp0;
39000+ unsigned int saved_fs, saved_gs;
39001+/* IO permissions */
39002+ unsigned long *io_bitmap_ptr;
39003+ unsigned long iopl;
39004+/* max allowed port in the bitmap, in bytes: */
39005+ unsigned long io_bitmap_max;
39006+};
39007+
39008+#define INIT_THREAD { \
39009+ .vm86_info = NULL, \
39010+ .sysenter_cs = __KERNEL_CS, \
39011+ .io_bitmap_ptr = NULL, \
39012+}
39013+
39014+#ifndef CONFIG_X86_NO_TSS
39015+/*
39016+ * Note that the .io_bitmap member must be extra-big. This is because
39017+ * the CPU will access an additional byte beyond the end of the IO
39018+ * permission bitmap. The extra byte must be all 1 bits, and must
39019+ * be within the limit.
39020+ */
39021+#define INIT_TSS { \
39022+ .esp0 = sizeof(init_stack) + (long)&init_stack, \
39023+ .ss0 = __KERNEL_DS, \
39024+ .ss1 = __KERNEL_CS, \
39025+ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
39026+ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
39027+}
39028+
39029+static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
39030+{
39031+ tss->esp0 = thread->esp0;
39032+ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
39033+ if (unlikely(tss->ss1 != thread->sysenter_cs)) {
39034+ tss->ss1 = thread->sysenter_cs;
39035+ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
39036+ }
39037+}
39038+#define load_esp0(tss, thread) \
39039+ __load_esp0(tss, thread)
39040+#else
39041+#define load_esp0(tss, thread) do { \
39042+ if (HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)) \
39043+ BUG(); \
39044+} while (0)
39045+#endif
39046+
39047+#define start_thread(regs, new_eip, new_esp) do { \
39048+ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
39049+ set_fs(USER_DS); \
39050+ regs->xds = __USER_DS; \
39051+ regs->xes = __USER_DS; \
39052+ regs->xss = __USER_DS; \
39053+ regs->xcs = __USER_CS; \
39054+ regs->eip = new_eip; \
39055+ regs->esp = new_esp; \
39056+} while (0)
39057+
39058+/*
39059+ * These special macros can be used to get or set a debugging register
39060+ */
39061+#define get_debugreg(var, register) \
39062+ (var) = HYPERVISOR_get_debugreg((register))
39063+#define set_debugreg(value, register) \
39064+ WARN_ON(HYPERVISOR_set_debugreg((register), (value)))
39065+
39066+/*
39067+ * Set IOPL bits in EFLAGS from given mask
39068+ */
39069+static inline void set_iopl_mask(unsigned mask)
39070+{
39071+ struct physdev_set_iopl set_iopl;
39072+
39073+ /* Force the change at ring 0. */
39074+ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
39075+ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
39076+}
39077+
39078+/* Forward declaration, a strange C thing */
39079+struct task_struct;
39080+struct mm_struct;
39081+
39082+/* Free all resources held by a thread. */
39083+extern void release_thread(struct task_struct *);
39084+
39085+/* Prepare to copy thread state - unlazy all lazy status */
39086+extern void prepare_to_copy(struct task_struct *tsk);
39087+
39088+/*
39089+ * create a kernel thread without removing it from tasklists
39090+ */
39091+extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
39092+
39093+extern unsigned long thread_saved_pc(struct task_struct *tsk);
39094+void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
39095+
39096+unsigned long get_wchan(struct task_struct *p);
39097+
39098+#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
39099+#define KSTK_TOP(info) \
39100+({ \
39101+ unsigned long *__ptr = (unsigned long *)(info); \
39102+ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
39103+})
39104+
39105+/*
39106+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
39107+ * This is necessary to guarantee that the entire "struct pt_regs"
39108+ * is accessable even if the CPU haven't stored the SS/ESP registers
39109+ * on the stack (interrupt gate does not save these registers
39110+ * when switching to the same priv ring).
39111+ * Therefore beware: accessing the xss/esp fields of the
39112+ * "struct pt_regs" is possible, but they may contain the
39113+ * completely wrong values.
39114+ */
39115+#define task_pt_regs(task) \
39116+({ \
39117+ struct pt_regs *__regs__; \
39118+ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
39119+ __regs__ - 1; \
39120+})
39121+
39122+#define KSTK_EIP(task) (task_pt_regs(task)->eip)
39123+#define KSTK_ESP(task) (task_pt_regs(task)->esp)
39124+
39125+
39126+struct microcode_header {
39127+ unsigned int hdrver;
39128+ unsigned int rev;
39129+ unsigned int date;
39130+ unsigned int sig;
39131+ unsigned int cksum;
39132+ unsigned int ldrver;
39133+ unsigned int pf;
39134+ unsigned int datasize;
39135+ unsigned int totalsize;
39136+ unsigned int reserved[3];
39137+};
39138+
39139+struct microcode {
39140+ struct microcode_header hdr;
39141+ unsigned int bits[0];
39142+};
39143+
39144+typedef struct microcode microcode_t;
39145+typedef struct microcode_header microcode_header_t;
39146+
39147+/* microcode format is extended from prescott processors */
39148+struct extended_signature {
39149+ unsigned int sig;
39150+ unsigned int pf;
39151+ unsigned int cksum;
39152+};
39153+
39154+struct extended_sigtable {
39155+ unsigned int count;
39156+ unsigned int cksum;
39157+ unsigned int reserved[3];
39158+ struct extended_signature sigs[0];
39159+};
39160+
39161+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
39162+static inline void rep_nop(void)
39163+{
39164+ __asm__ __volatile__("rep;nop": : :"memory");
39165+}
39166+
39167+#define cpu_relax() rep_nop()
39168+
39169+/* generic versions from gas */
39170+#define GENERIC_NOP1 ".byte 0x90\n"
39171+#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
39172+#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
39173+#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
39174+#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
39175+#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
39176+#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
39177+#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
39178+
39179+/* Opteron nops */
39180+#define K8_NOP1 GENERIC_NOP1
39181+#define K8_NOP2 ".byte 0x66,0x90\n"
39182+#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
39183+#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
39184+#define K8_NOP5 K8_NOP3 K8_NOP2
39185+#define K8_NOP6 K8_NOP3 K8_NOP3
39186+#define K8_NOP7 K8_NOP4 K8_NOP3
39187+#define K8_NOP8 K8_NOP4 K8_NOP4
39188+
39189+/* K7 nops */
39190+/* uses eax dependencies (arbitary choice) */
39191+#define K7_NOP1 GENERIC_NOP1
39192+#define K7_NOP2 ".byte 0x8b,0xc0\n"
39193+#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
39194+#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
39195+#define K7_NOP5 K7_NOP4 ASM_NOP1
39196+#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
39197+#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
39198+#define K7_NOP8 K7_NOP7 ASM_NOP1
39199+
39200+#ifdef CONFIG_MK8
39201+#define ASM_NOP1 K8_NOP1
39202+#define ASM_NOP2 K8_NOP2
39203+#define ASM_NOP3 K8_NOP3
39204+#define ASM_NOP4 K8_NOP4
39205+#define ASM_NOP5 K8_NOP5
39206+#define ASM_NOP6 K8_NOP6
39207+#define ASM_NOP7 K8_NOP7
39208+#define ASM_NOP8 K8_NOP8
39209+#elif defined(CONFIG_MK7)
39210+#define ASM_NOP1 K7_NOP1
39211+#define ASM_NOP2 K7_NOP2
39212+#define ASM_NOP3 K7_NOP3
39213+#define ASM_NOP4 K7_NOP4
39214+#define ASM_NOP5 K7_NOP5
39215+#define ASM_NOP6 K7_NOP6
39216+#define ASM_NOP7 K7_NOP7
39217+#define ASM_NOP8 K7_NOP8
39218+#else
39219+#define ASM_NOP1 GENERIC_NOP1
39220+#define ASM_NOP2 GENERIC_NOP2
39221+#define ASM_NOP3 GENERIC_NOP3
39222+#define ASM_NOP4 GENERIC_NOP4
39223+#define ASM_NOP5 GENERIC_NOP5
39224+#define ASM_NOP6 GENERIC_NOP6
39225+#define ASM_NOP7 GENERIC_NOP7
39226+#define ASM_NOP8 GENERIC_NOP8
39227+#endif
39228+
39229+#define ASM_NOP_MAX 8
39230+
39231+/* Prefetch instructions for Pentium III and AMD Athlon */
39232+/* It's not worth to care about 3dnow! prefetches for the K6
39233+ because they are microcoded there and very slow.
39234+ However we don't do prefetches for pre XP Athlons currently
39235+ That should be fixed. */
39236+#define ARCH_HAS_PREFETCH
39237+static inline void prefetch(const void *x)
39238+{
39239+ alternative_input(ASM_NOP4,
39240+ "prefetchnta (%1)",
39241+ X86_FEATURE_XMM,
39242+ "r" (x));
39243+}
39244+
39245+#define ARCH_HAS_PREFETCH
39246+#define ARCH_HAS_PREFETCHW
39247+#define ARCH_HAS_SPINLOCK_PREFETCH
39248+
39249+/* 3dnow! prefetch to get an exclusive cache line. Useful for
39250+ spinlocks to avoid one state transition in the cache coherency protocol. */
39251+static inline void prefetchw(const void *x)
39252+{
39253+ alternative_input(ASM_NOP4,
39254+ "prefetchw (%1)",
39255+ X86_FEATURE_3DNOW,
39256+ "r" (x));
39257+}
39258+#define spin_lock_prefetch(x) prefetchw(x)
39259+
39260+extern void select_idle_routine(const struct cpuinfo_x86 *c);
39261+
39262+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
39263+
39264+extern unsigned long boot_option_idle_override;
39265+extern void enable_sep_cpu(void);
39266+extern int sysenter_setup(void);
39267+
39268+#endif /* __ASM_I386_PROCESSOR_H */
39269Index: head-2008-11-25/include/asm-x86/mach-xen/asm/segment_32.h
39270===================================================================
39271--- /dev/null 1970-01-01 00:00:00.000000000 +0000
39272+++ head-2008-11-25/include/asm-x86/mach-xen/asm/segment_32.h 2007-06-12 13:14:02.000000000 +0200
39273@@ -0,0 +1,117 @@
39274+#ifndef _ASM_SEGMENT_H
39275+#define _ASM_SEGMENT_H
39276+
39277+/*
39278+ * The layout of the per-CPU GDT under Linux:
39279+ *
39280+ * 0 - null
39281+ * 1 - reserved
39282+ * 2 - reserved
39283+ * 3 - reserved
39284+ *
39285+ * 4 - unused <==== new cacheline
39286+ * 5 - unused
39287+ *
39288+ * ------- start of TLS (Thread-Local Storage) segments:
39289+ *
39290+ * 6 - TLS segment #1 [ glibc's TLS segment ]
39291+ * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
39292+ * 8 - TLS segment #3
39293+ * 9 - reserved
39294+ * 10 - reserved
39295+ * 11 - reserved
39296+ *
39297+ * ------- start of kernel segments:
39298+ *
39299+ * 12 - kernel code segment <==== new cacheline
39300+ * 13 - kernel data segment
39301+ * 14 - default user CS
39302+ * 15 - default user DS
39303+ * 16 - TSS
39304+ * 17 - LDT
39305+ * 18 - PNPBIOS support (16->32 gate)
39306+ * 19 - PNPBIOS support
39307+ * 20 - PNPBIOS support
39308+ * 21 - PNPBIOS support
39309+ * 22 - PNPBIOS support
39310+ * 23 - APM BIOS support
39311+ * 24 - APM BIOS support
39312+ * 25 - APM BIOS support
39313+ *
39314+ * 26 - ESPFIX small SS
39315+ * 27 - unused
39316+ * 28 - unused
39317+ * 29 - unused
39318+ * 30 - unused
39319+ * 31 - TSS for double fault handler
39320+ */
39321+#define GDT_ENTRY_TLS_ENTRIES 3
39322+#define GDT_ENTRY_TLS_MIN 6
39323+#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
39324+
39325+#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
39326+
39327+#define GDT_ENTRY_DEFAULT_USER_CS 14
39328+#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
39329+
39330+#define GDT_ENTRY_DEFAULT_USER_DS 15
39331+#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
39332+
39333+#define GDT_ENTRY_KERNEL_BASE 12
39334+
39335+#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
39336+#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
39337+#define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
39338+
39339+#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
39340+#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
39341+#define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
39342+
39343+#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
39344+#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
39345+
39346+#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
39347+#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
39348+
39349+#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
39350+#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
39351+
39352+#define GDT_ENTRY_DOUBLEFAULT_TSS 31
39353+
39354+/*
39355+ * The GDT has 32 entries
39356+ */
39357+#define GDT_ENTRIES 32
39358+
39359+#define GDT_SIZE (GDT_ENTRIES * 8)
39360+
39361+/* Simple and small GDT entries for booting only */
39362+
39363+#define GDT_ENTRY_BOOT_CS 2
39364+#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
39365+
39366+#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
39367+#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
39368+
39369+/* The PnP BIOS entries in the GDT */
39370+#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
39371+#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
39372+#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
39373+#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
39374+#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
39375+
39376+/* The PnP BIOS selectors */
39377+#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
39378+#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
39379+#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
39380+#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
39381+#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
39382+
39383+/*
39384+ * The interrupt descriptor table has room for 256 idt's,
39385+ * the global descriptor table is dependent on the number
39386+ * of tasks we can have..
39387+ */
39388+#define IDT_ENTRIES 256
39389+
39390+#endif
39391Index: head-2008-11-25/include/asm-x86/mach-xen/asm/smp_32.h
39392===================================================================
39393--- /dev/null 1970-01-01 00:00:00.000000000 +0000
39394+++ head-2008-11-25/include/asm-x86/mach-xen/asm/smp_32.h 2007-06-12 13:14:02.000000000 +0200
39395@@ -0,0 +1,103 @@
39396+#ifndef __ASM_SMP_H
39397+#define __ASM_SMP_H
39398+
39399+/*
39400+ * We need the APIC definitions automatically as part of 'smp.h'
39401+ */
39402+#ifndef __ASSEMBLY__
39403+#include <linux/kernel.h>
39404+#include <linux/threads.h>
39405+#include <linux/cpumask.h>
39406+#endif
39407+
39408+#ifdef CONFIG_X86_LOCAL_APIC
39409+#ifndef __ASSEMBLY__
39410+#include <asm/fixmap.h>
39411+#include <asm/bitops.h>
39412+#include <asm/mpspec.h>
39413+#ifdef CONFIG_X86_IO_APIC
39414+#include <asm/io_apic.h>
39415+#endif
39416+#include <asm/apic.h>
39417+#endif
39418+#endif
39419+
39420+#define BAD_APICID 0xFFu
39421+#ifdef CONFIG_SMP
39422+#ifndef __ASSEMBLY__
39423+
39424+/*
39425+ * Private routines/data
39426+ */
39427+
39428+extern void smp_alloc_memory(void);
39429+extern int pic_mode;
39430+extern int smp_num_siblings;
39431+extern cpumask_t cpu_sibling_map[];
39432+extern cpumask_t cpu_core_map[];
39433+
39434+extern void (*mtrr_hook) (void);
39435+extern void zap_low_mappings (void);
39436+extern void lock_ipi_call_lock(void);
39437+extern void unlock_ipi_call_lock(void);
39438+
39439+#define MAX_APICID 256
39440+extern u8 x86_cpu_to_apicid[];
39441+
39442+#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
39443+
39444+#ifdef CONFIG_HOTPLUG_CPU
39445+extern void cpu_exit_clear(void);
39446+extern void cpu_uninit(void);
39447+#endif
39448+
39449+/*
39450+ * This function is needed by all SMP systems. It must _always_ be valid
39451+ * from the initial startup. We map APIC_BASE very early in page_setup(),
39452+ * so this is correct in the x86 case.
39453+ */
39454+#define raw_smp_processor_id() (current_thread_info()->cpu)
39455+
39456+extern cpumask_t cpu_possible_map;
39457+#define cpu_callin_map cpu_possible_map
39458+
39459+/* We don't mark CPUs online until __cpu_up(), so we need another measure */
39460+static inline int num_booting_cpus(void)
39461+{
39462+ return cpus_weight(cpu_possible_map);
39463+}
39464+
39465+#ifdef CONFIG_X86_LOCAL_APIC
39466+
39467+#ifdef APIC_DEFINITION
39468+extern int hard_smp_processor_id(void);
39469+#else
39470+#include <mach_apicdef.h>
39471+static inline int hard_smp_processor_id(void)
39472+{
39473+ /* we don't want to mark this access volatile - bad code generation */
39474+ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
39475+}
39476+#endif
39477+
39478+static __inline int logical_smp_processor_id(void)
39479+{
39480+ /* we don't want to mark this access volatile - bad code generation */
39481+ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
39482+}
39483+
39484+#endif
39485+
39486+extern int __cpu_disable(void);
39487+extern void __cpu_die(unsigned int cpu);
39488+extern void prefill_possible_map(void);
39489+#endif /* !__ASSEMBLY__ */
39490+
39491+#else /* CONFIG_SMP */
39492+
39493+#define cpu_physical_id(cpu) boot_cpu_physical_apicid
39494+
39495+#define NO_PROC_ID 0xFF /* No processor magic marker */
39496+
39497+#endif
39498+#endif
39499Index: head-2008-11-25/include/asm-x86/mach-xen/asm/swiotlb_32.h
39500===================================================================
39501--- /dev/null 1970-01-01 00:00:00.000000000 +0000
39502+++ head-2008-11-25/include/asm-x86/mach-xen/asm/swiotlb_32.h 2007-06-12 13:14:02.000000000 +0200
39503@@ -0,0 +1,43 @@
39504+#ifndef _ASM_SWIOTLB_H
39505+#define _ASM_SWIOTLB_H 1
39506+
39507+/* SWIOTLB interface */
39508+
39509+extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
39510+ int dir);
39511+extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
39512+ size_t size, int dir);
39513+extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
39514+ dma_addr_t dev_addr,
39515+ size_t size, int dir);
39516+extern void swiotlb_sync_single_for_device(struct device *hwdev,
39517+ dma_addr_t dev_addr,
39518+ size_t size, int dir);
39519+extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
39520+ struct scatterlist *sg, int nelems,
39521+ int dir);
39522+extern void swiotlb_sync_sg_for_device(struct device *hwdev,
39523+ struct scatterlist *sg, int nelems,
39524+ int dir);
39525+extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
39526+ int nents, int direction);
39527+extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
39528+ int nents, int direction);
39529+extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
39530+#ifdef CONFIG_HIGHMEM
39531+extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
39532+ unsigned long offset, size_t size,
39533+ enum dma_data_direction direction);
39534+extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
39535+ size_t size, enum dma_data_direction direction);
39536+#endif
39537+extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
39538+extern void swiotlb_init(void);
39539+
39540+#ifdef CONFIG_SWIOTLB
39541+extern int swiotlb;
39542+#else
39543+#define swiotlb 0
39544+#endif
39545+
39546+#endif
39547Index: head-2008-11-25/include/asm-x86/mach-xen/asm/synch_bitops.h
39548===================================================================
39549--- /dev/null 1970-01-01 00:00:00.000000000 +0000
39550+++ head-2008-11-25/include/asm-x86/mach-xen/asm/synch_bitops.h 2008-04-02 12:34:02.000000000 +0200
39551@@ -0,0 +1,126 @@
39552+#ifndef __XEN_SYNCH_BITOPS_H__
39553+#define __XEN_SYNCH_BITOPS_H__
39554+
39555+/*
39556+ * Copyright 1992, Linus Torvalds.
39557+ * Heavily modified to provide guaranteed strong synchronisation
39558+ * when communicating with Xen or other guest OSes running on other CPUs.
39559+ */
39560+
39561+#ifdef HAVE_XEN_PLATFORM_COMPAT_H
39562+#include <xen/platform-compat.h>
39563+#endif
39564+
39565+#define ADDR (*(volatile long *) addr)
39566+
39567+static __inline__ void synch_set_bit(int nr, volatile void * addr)
39568+{
39569+ __asm__ __volatile__ (
39570+ "lock btsl %1,%0"
39571+ : "+m" (ADDR) : "Ir" (nr) : "memory" );
39572+}
39573+
39574+static __inline__ void synch_clear_bit(int nr, volatile void * addr)
39575+{
39576+ __asm__ __volatile__ (
39577+ "lock btrl %1,%0"
39578+ : "+m" (ADDR) : "Ir" (nr) : "memory" );
39579+}
39580+
39581+static __inline__ void synch_change_bit(int nr, volatile void * addr)
39582+{
39583+ __asm__ __volatile__ (
39584+ "lock btcl %1,%0"
39585+ : "+m" (ADDR) : "Ir" (nr) : "memory" );
39586+}
39587+
39588+static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
39589+{
39590+ int oldbit;
39591+ __asm__ __volatile__ (
39592+ "lock btsl %2,%1\n\tsbbl %0,%0"
39593+ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
39594+ return oldbit;
39595+}
39596+
39597+static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
39598+{
39599+ int oldbit;
39600+ __asm__ __volatile__ (
39601+ "lock btrl %2,%1\n\tsbbl %0,%0"
39602+ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
39603+ return oldbit;
39604+}
39605+
39606+static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
39607+{
39608+ int oldbit;
39609+
39610+ __asm__ __volatile__ (
39611+ "lock btcl %2,%1\n\tsbbl %0,%0"
39612+ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
39613+ return oldbit;
39614+}
39615+
39616+struct __synch_xchg_dummy { unsigned long a[100]; };
39617+#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
39618+
39619+#define synch_cmpxchg(ptr, old, new) \
39620+((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
39621+ (unsigned long)(old), \
39622+ (unsigned long)(new), \
39623+ sizeof(*(ptr))))
39624+
39625+static inline unsigned long __synch_cmpxchg(volatile void *ptr,
39626+ unsigned long old,
39627+ unsigned long new, int size)
39628+{
39629+ unsigned long prev;
39630+ switch (size) {
39631+ case 1:
39632+ __asm__ __volatile__("lock; cmpxchgb %b1,%2"
39633+ : "=a"(prev)
39634+ : "q"(new), "m"(*__synch_xg(ptr)),
39635+ "0"(old)
39636+ : "memory");
39637+ return prev;
39638+ case 2:
39639+ __asm__ __volatile__("lock; cmpxchgw %w1,%2"
39640+ : "=a"(prev)
39641+ : "r"(new), "m"(*__synch_xg(ptr)),
39642+ "0"(old)
39643+ : "memory");
39644+ return prev;
39645+#ifdef CONFIG_X86_64
39646+ case 4:
39647+ __asm__ __volatile__("lock; cmpxchgl %k1,%2"
39648+ : "=a"(prev)
39649+ : "r"(new), "m"(*__synch_xg(ptr)),
39650+ "0"(old)
39651+ : "memory");
39652+ return prev;
39653+ case 8:
39654+ __asm__ __volatile__("lock; cmpxchgq %1,%2"
39655+ : "=a"(prev)
39656+ : "r"(new), "m"(*__synch_xg(ptr)),
39657+ "0"(old)
39658+ : "memory");
39659+ return prev;
39660+#else
39661+ case 4:
39662+ __asm__ __volatile__("lock; cmpxchgl %1,%2"
39663+ : "=a"(prev)
39664+ : "r"(new), "m"(*__synch_xg(ptr)),
39665+ "0"(old)
39666+ : "memory");
39667+ return prev;
39668+#endif
39669+ }
39670+ return old;
39671+}
39672+
39673+#define synch_test_bit test_bit
39674+
39675+#define synch_cmpxchg_subword synch_cmpxchg
39676+
39677+#endif /* __XEN_SYNCH_BITOPS_H__ */
39678Index: head-2008-11-25/include/asm-x86/mach-xen/asm/system_32.h
39679===================================================================
39680--- /dev/null 1970-01-01 00:00:00.000000000 +0000
39681+++ head-2008-11-25/include/asm-x86/mach-xen/asm/system_32.h 2007-06-12 13:14:02.000000000 +0200
39682@@ -0,0 +1,488 @@
39683+#ifndef __ASM_SYSTEM_H
39684+#define __ASM_SYSTEM_H
39685+
39686+#include <linux/kernel.h>
39687+#include <asm/segment.h>
39688+#include <asm/cpufeature.h>
39689+#include <linux/bitops.h> /* for LOCK_PREFIX */
39690+#include <asm/synch_bitops.h>
39691+#include <asm/hypervisor.h>
39692+
39693+#ifdef __KERNEL__
39694+
39695+struct task_struct; /* one of the stranger aspects of C forward declarations.. */
39696+extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
39697+
39698+/*
39699+ * Saving eflags is important. It switches not only IOPL between tasks,
39700+ * it also protects other tasks from NT leaking through sysenter etc.
39701+ */
39702+#define switch_to(prev,next,last) do { \
39703+ unsigned long esi,edi; \
39704+ asm volatile("pushfl\n\t" /* Save flags */ \
39705+ "pushl %%ebp\n\t" \
39706+ "movl %%esp,%0\n\t" /* save ESP */ \
39707+ "movl %5,%%esp\n\t" /* restore ESP */ \
39708+ "movl $1f,%1\n\t" /* save EIP */ \
39709+ "pushl %6\n\t" /* restore EIP */ \
39710+ "jmp __switch_to\n" \
39711+ "1:\t" \
39712+ "popl %%ebp\n\t" \
39713+ "popfl" \
39714+ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
39715+ "=a" (last),"=S" (esi),"=D" (edi) \
39716+ :"m" (next->thread.esp),"m" (next->thread.eip), \
39717+ "2" (prev), "d" (next)); \
39718+} while (0)
39719+
39720+#define _set_base(addr,base) do { unsigned long __pr; \
39721+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
39722+ "rorl $16,%%edx\n\t" \
39723+ "movb %%dl,%2\n\t" \
39724+ "movb %%dh,%3" \
39725+ :"=&d" (__pr) \
39726+ :"m" (*((addr)+2)), \
39727+ "m" (*((addr)+4)), \
39728+ "m" (*((addr)+7)), \
39729+ "0" (base) \
39730+ ); } while(0)
39731+
39732+#define _set_limit(addr,limit) do { unsigned long __lr; \
39733+__asm__ __volatile__ ("movw %%dx,%1\n\t" \
39734+ "rorl $16,%%edx\n\t" \
39735+ "movb %2,%%dh\n\t" \
39736+ "andb $0xf0,%%dh\n\t" \
39737+ "orb %%dh,%%dl\n\t" \
39738+ "movb %%dl,%2" \
39739+ :"=&d" (__lr) \
39740+ :"m" (*(addr)), \
39741+ "m" (*((addr)+6)), \
39742+ "0" (limit) \
39743+ ); } while(0)
39744+
39745+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
39746+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
39747+
39748+/*
39749+ * Load a segment. Fall back on loading the zero
39750+ * segment if something goes wrong..
39751+ */
39752+#define loadsegment(seg,value) \
39753+ asm volatile("\n" \
39754+ "1:\t" \
39755+ "mov %0,%%" #seg "\n" \
39756+ "2:\n" \
39757+ ".section .fixup,\"ax\"\n" \
39758+ "3:\t" \
39759+ "pushl $0\n\t" \
39760+ "popl %%" #seg "\n\t" \
39761+ "jmp 2b\n" \
39762+ ".previous\n" \
39763+ ".section __ex_table,\"a\"\n\t" \
39764+ ".align 4\n\t" \
39765+ ".long 1b,3b\n" \
39766+ ".previous" \
39767+ : :"rm" (value))
39768+
39769+/*
39770+ * Save a segment register away
39771+ */
39772+#define savesegment(seg, value) \
39773+ asm volatile("mov %%" #seg ",%0":"=rm" (value))
39774+
39775+#define read_cr0() ({ \
39776+ unsigned int __dummy; \
39777+ __asm__ __volatile__( \
39778+ "movl %%cr0,%0\n\t" \
39779+ :"=r" (__dummy)); \
39780+ __dummy; \
39781+})
39782+#define write_cr0(x) \
39783+ __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
39784+
39785+#define read_cr2() (current_vcpu_info()->arch.cr2)
39786+#define write_cr2(x) \
39787+ __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
39788+
39789+#define read_cr3() ({ \
39790+ unsigned int __dummy; \
39791+ __asm__ ( \
39792+ "movl %%cr3,%0\n\t" \
39793+ :"=r" (__dummy)); \
39794+ __dummy = xen_cr3_to_pfn(__dummy); \
39795+ mfn_to_pfn(__dummy) << PAGE_SHIFT; \
39796+})
39797+#define write_cr3(x) ({ \
39798+ unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT); \
39799+ __dummy = xen_pfn_to_cr3(__dummy); \
39800+ __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy)); \
39801+})
39802+#define read_cr4() ({ \
39803+ unsigned int __dummy; \
39804+ __asm__( \
39805+ "movl %%cr4,%0\n\t" \
39806+ :"=r" (__dummy)); \
39807+ __dummy; \
39808+})
39809+#define read_cr4_safe() ({ \
39810+ unsigned int __dummy; \
39811+ /* This could fault if %cr4 does not exist */ \
39812+ __asm__("1: movl %%cr4, %0 \n" \
39813+ "2: \n" \
39814+ ".section __ex_table,\"a\" \n" \
39815+ ".long 1b,2b \n" \
39816+ ".previous \n" \
39817+ : "=r" (__dummy): "0" (0)); \
39818+ __dummy; \
39819+})
39820+
39821+#define write_cr4(x) \
39822+ __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
39823+
39824+/*
39825+ * Clear and set 'TS' bit respectively
39826+ */
39827+#define clts() (HYPERVISOR_fpu_taskswitch(0))
39828+#define stts() (HYPERVISOR_fpu_taskswitch(1))
39829+
39830+#endif /* __KERNEL__ */
39831+
39832+#define wbinvd() \
39833+ __asm__ __volatile__ ("wbinvd": : :"memory")
39834+
39835+static inline unsigned long get_limit(unsigned long segment)
39836+{
39837+ unsigned long __limit;
39838+ __asm__("lsll %1,%0"
39839+ :"=r" (__limit):"r" (segment));
39840+ return __limit+1;
39841+}
39842+
39843+#define nop() __asm__ __volatile__ ("nop")
39844+
39845+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
39846+
39847+#define tas(ptr) (xchg((ptr),1))
39848+
39849+struct __xchg_dummy { unsigned long a[100]; };
39850+#define __xg(x) ((struct __xchg_dummy *)(x))
39851+
39852+
39853+#ifdef CONFIG_X86_CMPXCHG64
39854+
39855+/*
39856+ * The semantics of XCHGCMP8B are a bit strange, this is why
39857+ * there is a loop and the loading of %%eax and %%edx has to
39858+ * be inside. This inlines well in most cases, the cached
39859+ * cost is around ~38 cycles. (in the future we might want
39860+ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
39861+ * might have an implicit FPU-save as a cost, so it's not
39862+ * clear which path to go.)
39863+ *
39864+ * cmpxchg8b must be used with the lock prefix here to allow
39865+ * the instruction to be executed atomically, see page 3-102
39866+ * of the instruction set reference 24319102.pdf. We need
39867+ * the reader side to see the coherent 64bit value.
39868+ */
39869+static inline void __set_64bit (unsigned long long * ptr,
39870+ unsigned int low, unsigned int high)
39871+{
39872+ __asm__ __volatile__ (
39873+ "\n1:\t"
39874+ "movl (%0), %%eax\n\t"
39875+ "movl 4(%0), %%edx\n\t"
39876+ "lock cmpxchg8b (%0)\n\t"
39877+ "jnz 1b"
39878+ : /* no outputs */
39879+ : "D"(ptr),
39880+ "b"(low),
39881+ "c"(high)
39882+ : "ax","dx","memory");
39883+}
39884+
39885+static inline void __set_64bit_constant (unsigned long long *ptr,
39886+ unsigned long long value)
39887+{
39888+ __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
39889+}
39890+#define ll_low(x) *(((unsigned int*)&(x))+0)
39891+#define ll_high(x) *(((unsigned int*)&(x))+1)
39892+
39893+static inline void __set_64bit_var (unsigned long long *ptr,
39894+ unsigned long long value)
39895+{
39896+ __set_64bit(ptr,ll_low(value), ll_high(value));
39897+}
39898+
39899+#define set_64bit(ptr,value) \
39900+(__builtin_constant_p(value) ? \
39901+ __set_64bit_constant(ptr, value) : \
39902+ __set_64bit_var(ptr, value) )
39903+
39904+#define _set_64bit(ptr,value) \
39905+(__builtin_constant_p(value) ? \
39906+ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
39907+ __set_64bit(ptr, ll_low(value), ll_high(value)) )
39908+
39909+#endif
39910+
39911+/*
39912+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
39913+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
39914+ * but generally the primitive is invalid, *ptr is output argument. --ANK
39915+ */
39916+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
39917+{
39918+ switch (size) {
39919+ case 1:
39920+ __asm__ __volatile__("xchgb %b0,%1"
39921+ :"=q" (x)
39922+ :"m" (*__xg(ptr)), "0" (x)
39923+ :"memory");
39924+ break;
39925+ case 2:
39926+ __asm__ __volatile__("xchgw %w0,%1"
39927+ :"=r" (x)
39928+ :"m" (*__xg(ptr)), "0" (x)
39929+ :"memory");
39930+ break;
39931+ case 4:
39932+ __asm__ __volatile__("xchgl %0,%1"
39933+ :"=r" (x)
39934+ :"m" (*__xg(ptr)), "0" (x)
39935+ :"memory");
39936+ break;
39937+ }
39938+ return x;
39939+}
39940+
39941+/*
39942+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
39943+ * store NEW in MEM. Return the initial value in MEM. Success is
39944+ * indicated by comparing RETURN with OLD.
39945+ */
39946+
39947+#ifdef CONFIG_X86_CMPXCHG
39948+#define __HAVE_ARCH_CMPXCHG 1
39949+#define cmpxchg(ptr,o,n)\
39950+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
39951+ (unsigned long)(n),sizeof(*(ptr))))
39952+#endif
39953+
39954+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
39955+ unsigned long new, int size)
39956+{
39957+ unsigned long prev;
39958+ switch (size) {
39959+ case 1:
39960+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
39961+ : "=a"(prev)
39962+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
39963+ : "memory");
39964+ return prev;
39965+ case 2:
39966+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
39967+ : "=a"(prev)
39968+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
39969+ : "memory");
39970+ return prev;
39971+ case 4:
39972+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
39973+ : "=a"(prev)
39974+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
39975+ : "memory");
39976+ return prev;
39977+ }
39978+ return old;
39979+}
39980+
39981+#ifndef CONFIG_X86_CMPXCHG
39982+/*
39983+ * Building a kernel capable running on 80386. It may be necessary to
39984+ * simulate the cmpxchg on the 80386 CPU. For that purpose we define
39985+ * a function for each of the sizes we support.
39986+ */
39987+
39988+extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
39989+extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
39990+extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
39991+
39992+static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
39993+ unsigned long new, int size)
39994+{
39995+ switch (size) {
39996+ case 1:
39997+ return cmpxchg_386_u8(ptr, old, new);
39998+ case 2:
39999+ return cmpxchg_386_u16(ptr, old, new);
40000+ case 4:
40001+ return cmpxchg_386_u32(ptr, old, new);
40002+ }
40003+ return old;
40004+}
40005+
40006+#define cmpxchg(ptr,o,n) \
40007+({ \
40008+ __typeof__(*(ptr)) __ret; \
40009+ if (likely(boot_cpu_data.x86 > 3)) \
40010+ __ret = __cmpxchg((ptr), (unsigned long)(o), \
40011+ (unsigned long)(n), sizeof(*(ptr))); \
40012+ else \
40013+ __ret = cmpxchg_386((ptr), (unsigned long)(o), \
40014+ (unsigned long)(n), sizeof(*(ptr))); \
40015+ __ret; \
40016+})
40017+#endif
40018+
40019+#ifdef CONFIG_X86_CMPXCHG64
40020+
40021+static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
40022+ unsigned long long new)
40023+{
40024+ unsigned long long prev;
40025+ __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
40026+ : "=A"(prev)
40027+ : "b"((unsigned long)new),
40028+ "c"((unsigned long)(new >> 32)),
40029+ "m"(*__xg(ptr)),
40030+ "0"(old)
40031+ : "memory");
40032+ return prev;
40033+}
40034+
40035+#define cmpxchg64(ptr,o,n)\
40036+ ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
40037+ (unsigned long long)(n)))
40038+
40039+#endif
40040+
40041+/*
40042+ * Force strict CPU ordering.
40043+ * And yes, this is required on UP too when we're talking
40044+ * to devices.
40045+ *
40046+ * For now, "wmb()" doesn't actually do anything, as all
40047+ * Intel CPU's follow what Intel calls a *Processor Order*,
40048+ * in which all writes are seen in the program order even
40049+ * outside the CPU.
40050+ *
40051+ * I expect future Intel CPU's to have a weaker ordering,
40052+ * but I'd also expect them to finally get their act together
40053+ * and add some real memory barriers if so.
40054+ *
40055+ * Some non intel clones support out of order store. wmb() ceases to be a
40056+ * nop for these.
40057+ */
40058+
40059+
40060+/*
40061+ * Actually only lfence would be needed for mb() because all stores done
40062+ * by the kernel should be already ordered. But keep a full barrier for now.
40063+ */
40064+
40065+#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
40066+#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
40067+
40068+/**
40069+ * read_barrier_depends - Flush all pending reads that subsequents reads
40070+ * depend on.
40071+ *
40072+ * No data-dependent reads from memory-like regions are ever reordered
40073+ * over this barrier. All reads preceding this primitive are guaranteed
40074+ * to access memory (but not necessarily other CPUs' caches) before any
40075+ * reads following this primitive that depend on the data return by
40076+ * any of the preceding reads. This primitive is much lighter weight than
40077+ * rmb() on most CPUs, and is never heavier weight than is
40078+ * rmb().
40079+ *
40080+ * These ordering constraints are respected by both the local CPU
40081+ * and the compiler.
40082+ *
40083+ * Ordering is not guaranteed by anything other than these primitives,
40084+ * not even by data dependencies. See the documentation for
40085+ * memory_barrier() for examples and URLs to more information.
40086+ *
40087+ * For example, the following code would force ordering (the initial
40088+ * value of "a" is zero, "b" is one, and "p" is "&a"):
40089+ *
40090+ * <programlisting>
40091+ * CPU 0 CPU 1
40092+ *
40093+ * b = 2;
40094+ * memory_barrier();
40095+ * p = &b; q = p;
40096+ * read_barrier_depends();
40097+ * d = *q;
40098+ * </programlisting>
40099+ *
40100+ * because the read of "*q" depends on the read of "p" and these
40101+ * two reads are separated by a read_barrier_depends(). However,
40102+ * the following code, with the same initial values for "a" and "b":
40103+ *
40104+ * <programlisting>
40105+ * CPU 0 CPU 1
40106+ *
40107+ * a = 2;
40108+ * memory_barrier();
40109+ * b = 3; y = b;
40110+ * read_barrier_depends();
40111+ * x = a;
40112+ * </programlisting>
40113+ *
40114+ * does not enforce ordering, since there is no data dependency between
40115+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
40116+ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
40117+ * in cases like this where there are no data dependencies.
40118+ **/
40119+
40120+#define read_barrier_depends() do { } while(0)
40121+
40122+#ifdef CONFIG_X86_OOSTORE
40123+/* Actually there are no OOO store capable CPUs for now that do SSE,
40124+ but make it already an possibility. */
40125+#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
40126+#else
40127+#define wmb() __asm__ __volatile__ ("": : :"memory")
40128+#endif
40129+
40130+#ifdef CONFIG_SMP
40131+#define smp_mb() mb()
40132+#define smp_rmb() rmb()
40133+#define smp_wmb() wmb()
40134+#define smp_read_barrier_depends() read_barrier_depends()
40135+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
40136+#else
40137+#define smp_mb() barrier()
40138+#define smp_rmb() barrier()
40139+#define smp_wmb() barrier()
40140+#define smp_read_barrier_depends() do { } while(0)
40141+#define set_mb(var, value) do { var = value; barrier(); } while (0)
40142+#endif
40143+
40144+#include <linux/irqflags.h>
40145+
40146+/*
40147+ * disable hlt during certain critical i/o operations
40148+ */
40149+#define HAVE_DISABLE_HLT
40150+void disable_hlt(void);
40151+void enable_hlt(void);
40152+
40153+extern int es7000_plat;
40154+void cpu_idle_wait(void);
40155+
40156+/*
40157+ * On SMP systems, when the scheduler does migration-cost autodetection,
40158+ * it needs a way to flush as much of the CPU's caches as possible:
40159+ */
40160+static inline void sched_cacheflush(void)
40161+{
40162+ wbinvd();
40163+}
40164+
40165+extern unsigned long arch_align_stack(unsigned long sp);
40166+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
40167+
40168+void default_idle(void);
40169+
40170+#endif
40171Index: head-2008-11-25/include/asm-x86/mach-xen/asm/tlbflush_32.h
40172===================================================================
40173--- /dev/null 1970-01-01 00:00:00.000000000 +0000
40174+++ head-2008-11-25/include/asm-x86/mach-xen/asm/tlbflush_32.h 2007-11-26 16:59:25.000000000 +0100
40175@@ -0,0 +1,101 @@
40176+#ifndef _I386_TLBFLUSH_H
40177+#define _I386_TLBFLUSH_H
40178+
40179+#include <linux/mm.h>
40180+#include <asm/processor.h>
40181+
40182+#define __flush_tlb() xen_tlb_flush()
40183+#define __flush_tlb_global() xen_tlb_flush()
40184+#define __flush_tlb_all() xen_tlb_flush()
40185+
40186+extern unsigned long pgkern_mask;
40187+
40188+#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
40189+
40190+#define __flush_tlb_single(addr) xen_invlpg(addr)
40191+
40192+#define __flush_tlb_one(addr) __flush_tlb_single(addr)
40193+
40194+/*
40195+ * TLB flushing:
40196+ *
40197+ * - flush_tlb() flushes the current mm struct TLBs
40198+ * - flush_tlb_all() flushes all processes TLBs
40199+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
40200+ * - flush_tlb_page(vma, vmaddr) flushes one page
40201+ * - flush_tlb_range(vma, start, end) flushes a range of pages
40202+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
40203+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
40204+ *
40205+ * ..but the i386 has somewhat limited tlb flushing capabilities,
40206+ * and page-granular flushes are available only on i486 and up.
40207+ */
40208+
40209+#ifndef CONFIG_SMP
40210+
40211+#define flush_tlb() __flush_tlb()
40212+#define flush_tlb_all() __flush_tlb_all()
40213+#define local_flush_tlb() __flush_tlb()
40214+
40215+static inline void flush_tlb_mm(struct mm_struct *mm)
40216+{
40217+ if (mm == current->active_mm)
40218+ __flush_tlb();
40219+}
40220+
40221+static inline void flush_tlb_page(struct vm_area_struct *vma,
40222+ unsigned long addr)
40223+{
40224+ if (vma->vm_mm == current->active_mm)
40225+ __flush_tlb_one(addr);
40226+}
40227+
40228+static inline void flush_tlb_range(struct vm_area_struct *vma,
40229+ unsigned long start, unsigned long end)
40230+{
40231+ if (vma->vm_mm == current->active_mm)
40232+ __flush_tlb();
40233+}
40234+
40235+#else
40236+
40237+#include <asm/smp.h>
40238+
40239+#define local_flush_tlb() \
40240+ __flush_tlb()
40241+
40242+#define flush_tlb_all xen_tlb_flush_all
40243+#define flush_tlb_current_task() xen_tlb_flush_mask(&current->mm->cpu_vm_mask)
40244+#define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask)
40245+#define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va)
40246+
40247+#define flush_tlb() flush_tlb_current_task()
40248+
40249+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
40250+{
40251+ flush_tlb_mm(vma->vm_mm);
40252+}
40253+
40254+#define TLBSTATE_OK 1
40255+#define TLBSTATE_LAZY 2
40256+
40257+struct tlb_state
40258+{
40259+ struct mm_struct *active_mm;
40260+ int state;
40261+ char __cacheline_padding[L1_CACHE_BYTES-8];
40262+};
40263+DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
40264+
40265+
40266+#endif
40267+
40268+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
40269+
40270+static inline void flush_tlb_pgtables(struct mm_struct *mm,
40271+ unsigned long start, unsigned long end)
40272+{
40273+ /* i386 does not keep any page table caches in TLB */
40274+}
40275+
40276+#endif /* _I386_TLBFLUSH_H */
40277Index: head-2008-11-25/include/asm-x86/mach-xen/asm/vga.h
40278===================================================================
40279--- /dev/null 1970-01-01 00:00:00.000000000 +0000
40280+++ head-2008-11-25/include/asm-x86/mach-xen/asm/vga.h 2007-06-12 13:14:02.000000000 +0200
40281@@ -0,0 +1,20 @@
40282+/*
40283+ * Access to VGA videoram
40284+ *
40285+ * (c) 1998 Martin Mares <mj@ucw.cz>
40286+ */
40287+
40288+#ifndef _LINUX_ASM_VGA_H_
40289+#define _LINUX_ASM_VGA_H_
40290+
40291+/*
40292+ * On the PC, we can just recalculate addresses and then
40293+ * access the videoram directly without any black magic.
40294+ */
40295+
40296+#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
40297+
40298+#define vga_readb(x) (*(x))
40299+#define vga_writeb(x,y) (*(y) = (x))
40300+
40301+#endif
40302Index: head-2008-11-25/include/asm-x86/mach-xen/asm/xenoprof.h
40303===================================================================
40304--- /dev/null 1970-01-01 00:00:00.000000000 +0000
40305+++ head-2008-11-25/include/asm-x86/mach-xen/asm/xenoprof.h 2007-06-12 13:14:02.000000000 +0200
40306@@ -0,0 +1,48 @@
40307+/******************************************************************************
40308+ * asm-i386/mach-xen/asm/xenoprof.h
40309+ *
40310+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
40311+ * VA Linux Systems Japan K.K.
40312+ *
40313+ * This program is free software; you can redistribute it and/or modify
40314+ * it under the terms of the GNU General Public License as published by
40315+ * the Free Software Foundation; either version 2 of the License, or
40316+ * (at your option) any later version.
40317+ *
40318+ * This program is distributed in the hope that it will be useful,
40319+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
40320+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
40321+ * GNU General Public License for more details.
40322+ *
40323+ * You should have received a copy of the GNU General Public License
40324+ * along with this program; if not, write to the Free Software
40325+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
40326+ *
40327+ */
40328+#ifndef __ASM_XENOPROF_H__
40329+#define __ASM_XENOPROF_H__
40330+#ifdef CONFIG_XEN
40331+
40332+struct super_block;
40333+struct dentry;
40334+int xenoprof_create_files(struct super_block * sb, struct dentry * root);
40335+#define HAVE_XENOPROF_CREATE_FILES
40336+
40337+struct xenoprof_init;
40338+void xenoprof_arch_init_counter(struct xenoprof_init *init);
40339+void xenoprof_arch_counter(void);
40340+void xenoprof_arch_start(void);
40341+void xenoprof_arch_stop(void);
40342+
40343+struct xenoprof_arch_shared_buffer {
40344+ /* nothing */
40345+};
40346+struct xenoprof_shared_buffer;
40347+void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
40348+struct xenoprof_get_buffer;
40349+int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer, struct xenoprof_shared_buffer* sbuf);
40350+struct xenoprof_passive;
40351+int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain, struct xenoprof_shared_buffer* sbuf);
40352+
40353+#endif /* CONFIG_XEN */
40354+#endif /* __ASM_XENOPROF_H__ */
40355Index: head-2008-11-25/include/asm-x86/mach-xen/irq_vectors.h
40356===================================================================
40357--- /dev/null 1970-01-01 00:00:00.000000000 +0000
40358+++ head-2008-11-25/include/asm-x86/mach-xen/irq_vectors.h 2008-09-25 13:55:32.000000000 +0200
40359@@ -0,0 +1,125 @@
40360+/*
40361+ * This file should contain #defines for all of the interrupt vector
40362+ * numbers used by this architecture.
40363+ *
40364+ * In addition, there are some standard defines:
40365+ *
40366+ * FIRST_EXTERNAL_VECTOR:
40367+ * The first free place for external interrupts
40368+ *
40369+ * SYSCALL_VECTOR:
40370+ * The IRQ vector a syscall makes the user to kernel transition
40371+ * under.
40372+ *
40373+ * TIMER_IRQ:
40374+ * The IRQ number the timer interrupt comes in at.
40375+ *
40376+ * NR_IRQS:
40377+ * The total number of interrupt vectors (including all the
40378+ * architecture specific interrupts) needed.
40379+ *
40380+ */
40381+#ifndef _ASM_IRQ_VECTORS_H
40382+#define _ASM_IRQ_VECTORS_H
40383+
40384+/*
40385+ * IDT vectors usable for external interrupt sources start
40386+ * at 0x20:
40387+ */
40388+#define FIRST_EXTERNAL_VECTOR 0x20
40389+
40390+#define SYSCALL_VECTOR 0x80
40391+
40392+/*
40393+ * Vectors 0x20-0x2f are used for ISA interrupts.
40394+ */
40395+
40396+#if 0
40397+/*
40398+ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
40399+ *
40400+ * some of the following vectors are 'rare', they are merged
40401+ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
40402+ * TLB, reschedule and local APIC vectors are performance-critical.
40403+ *
40404+ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
40405+ */
40406+#define SPURIOUS_APIC_VECTOR 0xff
40407+#define ERROR_APIC_VECTOR 0xfe
40408+#define INVALIDATE_TLB_VECTOR 0xfd
40409+#define RESCHEDULE_VECTOR 0xfc
40410+#define CALL_FUNCTION_VECTOR 0xfb
40411+
40412+#define THERMAL_APIC_VECTOR 0xf0
40413+/*
40414+ * Local APIC timer IRQ vector is on a different priority level,
40415+ * to work around the 'lost local interrupt if more than 2 IRQ
40416+ * sources per level' errata.
40417+ */
40418+#define LOCAL_TIMER_VECTOR 0xef
40419+#endif
40420+
40421+#define SPURIOUS_APIC_VECTOR 0xff
40422+#define ERROR_APIC_VECTOR 0xfe
40423+
40424+/*
40425+ * First APIC vector available to drivers: (vectors 0x30-0xee)
40426+ * we start at 0x31 to spread out vectors evenly between priority
40427+ * levels. (0x80 is the syscall vector)
40428+ */
40429+#define FIRST_DEVICE_VECTOR 0x31
40430+#define FIRST_SYSTEM_VECTOR 0xef
40431+
40432+/*
40433+ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
40434+ * Right now the APIC is mostly only used for SMP.
40435+ * 256 vectors is an architectural limit. (we can have
40436+ * more than 256 devices theoretically, but they will
40437+ * have to use shared interrupts)
40438+ * Since vectors 0x00-0x1f are used/reserved for the CPU,
40439+ * the usable vector space is 0x20-0xff (224 vectors)
40440+ */
40441+
40442+#define RESCHEDULE_VECTOR 0
40443+#define CALL_FUNCTION_VECTOR 1
40444+#define NR_IPIS 2
40445+
40446+/*
40447+ * The maximum number of vectors supported by i386 processors
40448+ * is limited to 256. For processors other than i386, NR_VECTORS
40449+ * should be changed accordingly.
40450+ */
40451+#define NR_VECTORS 256
40452+
40453+#define FPU_IRQ 13
40454+
40455+#define FIRST_VM86_IRQ 3
40456+#define LAST_VM86_IRQ 15
40457+#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
40458+
40459+/*
40460+ * The flat IRQ space is divided into two regions:
40461+ * 1. A one-to-one mapping of real physical IRQs. This space is only used
40462+ * if we have physical device-access privilege. This region is at the
40463+ * start of the IRQ space so that existing device drivers do not need
40464+ * to be modified to translate physical IRQ numbers into our IRQ space.
40465+ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
40466+ * are bound using the provided bind/unbind functions.
40467+ */
40468+
40469+#define PIRQ_BASE 0
40470+#if !defined(MAX_IO_APICS)
40471+# define NR_PIRQS (NR_VECTORS + 32 * NR_CPUS)
40472+#elif NR_CPUS < MAX_IO_APICS
40473+# define NR_PIRQS (NR_VECTORS + 32 * NR_CPUS)
40474+#else
40475+# define NR_PIRQS (NR_VECTORS + 32 * MAX_IO_APICS)
40476+#endif
40477+
40478+#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
40479+#define NR_DYNIRQS 256
40480+
40481+#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
40482+#define NR_IRQ_VECTORS NR_IRQS
40483+
40484+#endif /* _ASM_IRQ_VECTORS_H */
40485Index: head-2008-11-25/include/asm-x86/mach-xen/mach_traps.h
40486===================================================================
40487--- /dev/null 1970-01-01 00:00:00.000000000 +0000
40488+++ head-2008-11-25/include/asm-x86/mach-xen/mach_traps.h 2007-06-12 13:14:02.000000000 +0200
40489@@ -0,0 +1,33 @@
40490+/*
40491+ * include/asm-xen/asm-i386/mach-xen/mach_traps.h
40492+ *
40493+ * Machine specific NMI handling for Xen
40494+ */
40495+#ifndef _MACH_TRAPS_H
40496+#define _MACH_TRAPS_H
40497+
40498+#include <linux/bitops.h>
40499+#include <xen/interface/nmi.h>
40500+
40501+static inline void clear_mem_error(unsigned char reason) {}
40502+static inline void clear_io_check_error(unsigned char reason) {}
40503+
40504+static inline unsigned char get_nmi_reason(void)
40505+{
40506+ shared_info_t *s = HYPERVISOR_shared_info;
40507+ unsigned char reason = 0;
40508+
40509+ /* construct a value which looks like it came from
40510+ * port 0x61.
40511+ */
40512+ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
40513+ reason |= 0x40;
40514+ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
40515+ reason |= 0x80;
40516+
40517+ return reason;
40518+}
40519+
40520+static inline void reassert_nmi(void) {}
40521+
40522+#endif /* !_MACH_TRAPS_H */
40523Index: head-2008-11-25/include/asm-x86/mach-xen/setup_arch.h
40524===================================================================
40525--- /dev/null 1970-01-01 00:00:00.000000000 +0000
40526+++ head-2008-11-25/include/asm-x86/mach-xen/setup_arch.h 2007-06-12 13:14:02.000000000 +0200
40527@@ -0,0 +1,5 @@
40528+/* Hook to call BIOS initialisation function */
40529+
40530+#define ARCH_SETUP machine_specific_arch_setup();
40531+
40532+void __init machine_specific_arch_setup(void);
40533Index: head-2008-11-25/include/asm-x86/mach-xen/asm/desc_64.h
40534===================================================================
40535--- /dev/null 1970-01-01 00:00:00.000000000 +0000
40536+++ head-2008-11-25/include/asm-x86/mach-xen/asm/desc_64.h 2008-01-28 12:24:19.000000000 +0100
40537@@ -0,0 +1,265 @@
40538+/* Written 2000 by Andi Kleen */
40539+#ifndef __ARCH_DESC_H
40540+#define __ARCH_DESC_H
40541+
40542+#include <linux/threads.h>
40543+#include <asm/ldt.h>
40544+
40545+#ifndef __ASSEMBLY__
40546+
40547+#include <linux/string.h>
40548+#include <linux/smp.h>
40549+
40550+#include <asm/segment.h>
40551+#include <asm/mmu.h>
40552+
40553+// 8 byte segment descriptor
40554+struct desc_struct {
40555+ u16 limit0;
40556+ u16 base0;
40557+ unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
40558+ unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
40559+} __attribute__((packed));
40560+
40561+struct n_desc_struct {
40562+ unsigned int a,b;
40563+};
40564+
40565+enum {
40566+ GATE_INTERRUPT = 0xE,
40567+ GATE_TRAP = 0xF,
40568+ GATE_CALL = 0xC,
40569+};
40570+
40571+// 16byte gate
40572+struct gate_struct {
40573+ u16 offset_low;
40574+ u16 segment;
40575+ unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
40576+ u16 offset_middle;
40577+ u32 offset_high;
40578+ u32 zero1;
40579+} __attribute__((packed));
40580+
40581+#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
40582+#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
40583+#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
40584+
40585+enum {
40586+ DESC_TSS = 0x9,
40587+ DESC_LDT = 0x2,
40588+};
40589+
40590+// LDT or TSS descriptor in the GDT. 16 bytes.
40591+struct ldttss_desc {
40592+ u16 limit0;
40593+ u16 base0;
40594+ unsigned base1 : 8, type : 5, dpl : 2, p : 1;
40595+ unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
40596+ u32 base3;
40597+ u32 zero1;
40598+} __attribute__((packed));
40599+
40600+struct desc_ptr {
40601+ unsigned short size;
40602+ unsigned long address;
40603+} __attribute__((packed)) ;
40604+
40605+extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
40606+
40607+extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
40608+
40609+#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
40610+#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
40611+
40612+static inline void clear_LDT(void)
40613+{
40614+ int cpu = get_cpu();
40615+
40616+ /*
40617+ * NB. We load the default_ldt for lcall7/27 handling on demand, as
40618+ * it slows down context switching. Noone uses it anyway.
40619+ */
40620+ cpu = cpu; /* XXX avoid compiler warning */
40621+ xen_set_ldt(NULL, 0);
40622+ put_cpu();
40623+}
40624+
40625+/*
40626+ * This is the ldt that every process will get unless we need
40627+ * something other than this.
40628+ */
40629+extern struct desc_struct default_ldt[];
40630+#ifndef CONFIG_X86_NO_IDT
40631+extern struct gate_struct idt_table[];
40632+#endif
40633+extern struct desc_ptr cpu_gdt_descr[];
40634+
40635+/* the cpu gdt accessor */
40636+#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
40637+
40638+static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
40639+{
40640+ struct gate_struct s;
40641+ s.offset_low = PTR_LOW(func);
40642+ s.segment = __KERNEL_CS;
40643+ s.ist = ist;
40644+ s.p = 1;
40645+ s.dpl = dpl;
40646+ s.zero0 = 0;
40647+ s.zero1 = 0;
40648+ s.type = type;
40649+ s.offset_middle = PTR_MIDDLE(func);
40650+ s.offset_high = PTR_HIGH(func);
40651+ /* does not need to be atomic because it is only done once at setup time */
40652+ memcpy(adr, &s, 16);
40653+}
40654+
40655+#ifndef CONFIG_X86_NO_IDT
40656+static inline void set_intr_gate(int nr, void *func)
40657+{
40658+ BUG_ON((unsigned)nr > 0xFF);
40659+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
40660+}
40661+
40662+static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
40663+{
40664+ BUG_ON((unsigned)nr > 0xFF);
40665+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
40666+}
40667+
40668+static inline void set_system_gate(int nr, void *func)
40669+{
40670+ BUG_ON((unsigned)nr > 0xFF);
40671+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
40672+}
40673+
40674+static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
40675+{
40676+ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
40677+}
40678+#endif
40679+
40680+static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
40681+ unsigned size)
40682+{
40683+ struct ldttss_desc d;
40684+ memset(&d,0,sizeof(d));
40685+ d.limit0 = size & 0xFFFF;
40686+ d.base0 = PTR_LOW(tss);
40687+ d.base1 = PTR_MIDDLE(tss) & 0xFF;
40688+ d.type = type;
40689+ d.p = 1;
40690+ d.limit1 = (size >> 16) & 0xF;
40691+ d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
40692+ d.base3 = PTR_HIGH(tss);
40693+ memcpy(ptr, &d, 16);
40694+}
40695+
40696+#ifndef CONFIG_X86_NO_TSS
40697+static inline void set_tss_desc(unsigned cpu, void *addr)
40698+{
40699+ /*
40700+ * sizeof(unsigned long) coming from an extra "long" at the end
40701+ * of the iobitmap. See tss_struct definition in processor.h
40702+ *
40703+ * -1? seg base+limit should be pointing to the address of the
40704+ * last valid byte
40705+ */
40706+ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
40707+ (unsigned long)addr, DESC_TSS,
40708+ IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
40709+}
40710+#endif
40711+
40712+static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
40713+{
40714+ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
40715+ DESC_LDT, size * 8 - 1);
40716+}
40717+
40718+static inline void set_seg_base(unsigned cpu, int entry, void *base)
40719+{
40720+ struct desc_struct *d = &cpu_gdt(cpu)[entry];
40721+ u32 addr = (u32)(u64)base;
40722+ BUG_ON((u64)base >> 32);
40723+ d->base0 = addr & 0xffff;
40724+ d->base1 = (addr >> 16) & 0xff;
40725+ d->base2 = (addr >> 24) & 0xff;
40726+}
40727+
40728+#define LDT_entry_a(info) \
40729+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
40730+/* Don't allow setting of the lm bit. It is useless anyways because
40731+ 64bit system calls require __USER_CS. */
40732+#define LDT_entry_b(info) \
40733+ (((info)->base_addr & 0xff000000) | \
40734+ (((info)->base_addr & 0x00ff0000) >> 16) | \
40735+ ((info)->limit & 0xf0000) | \
40736+ (((info)->read_exec_only ^ 1) << 9) | \
40737+ ((info)->contents << 10) | \
40738+ (((info)->seg_not_present ^ 1) << 15) | \
40739+ ((info)->seg_32bit << 22) | \
40740+ ((info)->limit_in_pages << 23) | \
40741+ ((info)->useable << 20) | \
40742+ /* ((info)->lm << 21) | */ \
40743+ 0x7000)
40744+
40745+#define LDT_empty(info) (\
40746+ (info)->base_addr == 0 && \
40747+ (info)->limit == 0 && \
40748+ (info)->contents == 0 && \
40749+ (info)->read_exec_only == 1 && \
40750+ (info)->seg_32bit == 0 && \
40751+ (info)->limit_in_pages == 0 && \
40752+ (info)->seg_not_present == 1 && \
40753+ (info)->useable == 0 && \
40754+ (info)->lm == 0)
40755+
40756+#if TLS_SIZE != 24
40757+# error update this code.
40758+#endif
40759+
40760+static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
40761+{
40762+#if 0
40763+ u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
40764+ gdt[0] = t->tls_array[0];
40765+ gdt[1] = t->tls_array[1];
40766+ gdt[2] = t->tls_array[2];
40767+#endif
40768+#define C(i) \
40769+ if (HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]), \
40770+ t->tls_array[i])) \
40771+ BUG();
40772+
40773+ C(0); C(1); C(2);
40774+#undef C
40775+}
40776+
40777+/*
40778+ * load one particular LDT into the current CPU
40779+ */
40780+static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
40781+{
40782+ void *segments = pc->ldt;
40783+ int count = pc->size;
40784+
40785+ if (likely(!count))
40786+ segments = NULL;
40787+
40788+ xen_set_ldt(segments, count);
40789+}
40790+
40791+static inline void load_LDT(mm_context_t *pc)
40792+{
40793+ int cpu = get_cpu();
40794+ load_LDT_nolock(pc, cpu);
40795+ put_cpu();
40796+}
40797+
40798+extern struct desc_ptr idt_descr;
40799+
40800+#endif /* !__ASSEMBLY__ */
40801+
40802+#endif
40803Index: head-2008-11-25/include/asm-x86/mach-xen/asm/dma-mapping_64.h
40804===================================================================
40805--- /dev/null 1970-01-01 00:00:00.000000000 +0000
40806+++ head-2008-11-25/include/asm-x86/mach-xen/asm/dma-mapping_64.h 2007-06-12 13:14:13.000000000 +0200
40807@@ -0,0 +1,207 @@
40808+#ifndef _X8664_DMA_MAPPING_H
40809+#define _X8664_DMA_MAPPING_H 1
40810+
40811+/*
40812+ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
40813+ * documentation.
40814+ */
40815+
40816+
40817+#include <asm/scatterlist.h>
40818+#include <asm/io.h>
40819+#include <asm/swiotlb.h>
40820+
40821+struct dma_mapping_ops {
40822+ int (*mapping_error)(dma_addr_t dma_addr);
40823+ void* (*alloc_coherent)(struct device *dev, size_t size,
40824+ dma_addr_t *dma_handle, gfp_t gfp);
40825+ void (*free_coherent)(struct device *dev, size_t size,
40826+ void *vaddr, dma_addr_t dma_handle);
40827+ dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
40828+ size_t size, int direction);
40829+ /* like map_single, but doesn't check the device mask */
40830+ dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
40831+ size_t size, int direction);
40832+ void (*unmap_single)(struct device *dev, dma_addr_t addr,
40833+ size_t size, int direction);
40834+ void (*sync_single_for_cpu)(struct device *hwdev,
40835+ dma_addr_t dma_handle, size_t size,
40836+ int direction);
40837+ void (*sync_single_for_device)(struct device *hwdev,
40838+ dma_addr_t dma_handle, size_t size,
40839+ int direction);
40840+ void (*sync_single_range_for_cpu)(struct device *hwdev,
40841+ dma_addr_t dma_handle, unsigned long offset,
40842+ size_t size, int direction);
40843+ void (*sync_single_range_for_device)(struct device *hwdev,
40844+ dma_addr_t dma_handle, unsigned long offset,
40845+ size_t size, int direction);
40846+ void (*sync_sg_for_cpu)(struct device *hwdev,
40847+ struct scatterlist *sg, int nelems,
40848+ int direction);
40849+ void (*sync_sg_for_device)(struct device *hwdev,
40850+ struct scatterlist *sg, int nelems,
40851+ int direction);
40852+ int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
40853+ int nents, int direction);
40854+ void (*unmap_sg)(struct device *hwdev,
40855+ struct scatterlist *sg, int nents,
40856+ int direction);
40857+ int (*dma_supported)(struct device *hwdev, u64 mask);
40858+ int is_phys;
40859+};
40860+
40861+extern dma_addr_t bad_dma_address;
40862+extern struct dma_mapping_ops* dma_ops;
40863+extern int iommu_merge;
40864+
40865+static inline int valid_dma_direction(int dma_direction)
40866+{
40867+ return ((dma_direction == DMA_BIDIRECTIONAL) ||
40868+ (dma_direction == DMA_TO_DEVICE) ||
40869+ (dma_direction == DMA_FROM_DEVICE));
40870+}
40871+
40872+#if 0
40873+static inline int dma_mapping_error(dma_addr_t dma_addr)
40874+{
40875+ if (dma_ops->mapping_error)
40876+ return dma_ops->mapping_error(dma_addr);
40877+
40878+ return (dma_addr == bad_dma_address);
40879+}
40880+
40881+extern void *dma_alloc_coherent(struct device *dev, size_t size,
40882+ dma_addr_t *dma_handle, gfp_t gfp);
40883+extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
40884+ dma_addr_t dma_handle);
40885+
40886+static inline dma_addr_t
40887+dma_map_single(struct device *hwdev, void *ptr, size_t size,
40888+ int direction)
40889+{
40890+ BUG_ON(!valid_dma_direction(direction));
40891+ return dma_ops->map_single(hwdev, ptr, size, direction);
40892+}
40893+
40894+static inline void
40895+dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
40896+ int direction)
40897+{
40898+ BUG_ON(!valid_dma_direction(direction));
40899+ dma_ops->unmap_single(dev, addr, size, direction);
40900+}
40901+
40902+#define dma_map_page(dev,page,offset,size,dir) \
40903+ dma_map_single((dev), page_address(page)+(offset), (size), (dir))
40904+
40905+#define dma_unmap_page dma_unmap_single
40906+
40907+static inline void
40908+dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
40909+ size_t size, int direction)
40910+{
40911+ BUG_ON(!valid_dma_direction(direction));
40912+ if (dma_ops->sync_single_for_cpu)
40913+ dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
40914+ direction);
40915+ flush_write_buffers();
40916+}
40917+
40918+static inline void
40919+dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
40920+ size_t size, int direction)
40921+{
40922+ BUG_ON(!valid_dma_direction(direction));
40923+ if (dma_ops->sync_single_for_device)
40924+ dma_ops->sync_single_for_device(hwdev, dma_handle, size,
40925+ direction);
40926+ flush_write_buffers();
40927+}
40928+
40929+static inline void
40930+dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
40931+ unsigned long offset, size_t size, int direction)
40932+{
40933+ BUG_ON(!valid_dma_direction(direction));
40934+ if (dma_ops->sync_single_range_for_cpu) {
40935+ dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
40936+ }
40937+
40938+ flush_write_buffers();
40939+}
40940+
40941+static inline void
40942+dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
40943+ unsigned long offset, size_t size, int direction)
40944+{
40945+ BUG_ON(!valid_dma_direction(direction));
40946+ if (dma_ops->sync_single_range_for_device)
40947+ dma_ops->sync_single_range_for_device(hwdev, dma_handle,
40948+ offset, size, direction);
40949+
40950+ flush_write_buffers();
40951+}
40952+
40953+static inline void
40954+dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
40955+ int nelems, int direction)
40956+{
40957+ BUG_ON(!valid_dma_direction(direction));
40958+ if (dma_ops->sync_sg_for_cpu)
40959+ dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
40960+ flush_write_buffers();
40961+}
40962+
40963+static inline void
40964+dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
40965+ int nelems, int direction)
40966+{
40967+ BUG_ON(!valid_dma_direction(direction));
40968+ if (dma_ops->sync_sg_for_device) {
40969+ dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
40970+ }
40971+
40972+ flush_write_buffers();
40973+}
40974+
40975+static inline int
40976+dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
40977+{
40978+ BUG_ON(!valid_dma_direction(direction));
40979+ return dma_ops->map_sg(hwdev, sg, nents, direction);
40980+}
40981+
40982+static inline void
40983+dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
40984+ int direction)
40985+{
40986+ BUG_ON(!valid_dma_direction(direction));
40987+ dma_ops->unmap_sg(hwdev, sg, nents, direction);
40988+}
40989+
40990+extern int dma_supported(struct device *hwdev, u64 mask);
40991+
40992+/* same for gart, swiotlb, and nommu */
40993+static inline int dma_get_cache_alignment(void)
40994+{
40995+ return boot_cpu_data.x86_clflush_size;
40996+}
40997+
40998+#define dma_is_consistent(h) 1
40999+
41000+extern int dma_set_mask(struct device *dev, u64 mask);
41001+
41002+static inline void
41003+dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
41004+{
41005+ flush_write_buffers();
41006+}
41007+
41008+extern struct device fallback_dev;
41009+extern int panic_on_overflow;
41010+#endif
41011+
41012+#endif /* _X8664_DMA_MAPPING_H */
41013+
41014+#include <asm-i386/mach-xen/asm/dma-mapping.h>
41015Index: head-2008-11-25/include/asm-x86/mach-xen/asm/fixmap_64.h
41016===================================================================
41017--- /dev/null 1970-01-01 00:00:00.000000000 +0000
41018+++ head-2008-11-25/include/asm-x86/mach-xen/asm/fixmap_64.h 2007-06-12 13:14:13.000000000 +0200
41019@@ -0,0 +1,112 @@
41020+/*
41021+ * fixmap.h: compile-time virtual memory allocation
41022+ *
41023+ * This file is subject to the terms and conditions of the GNU General Public
41024+ * License. See the file "COPYING" in the main directory of this archive
41025+ * for more details.
41026+ *
41027+ * Copyright (C) 1998 Ingo Molnar
41028+ */
41029+
41030+#ifndef _ASM_FIXMAP_H
41031+#define _ASM_FIXMAP_H
41032+
41033+#include <linux/kernel.h>
41034+#include <asm/apicdef.h>
41035+#include <asm/page.h>
41036+#include <asm/vsyscall.h>
41037+#include <asm/vsyscall32.h>
41038+#include <asm/acpi.h>
41039+
41040+/*
41041+ * Here we define all the compile-time 'special' virtual
41042+ * addresses. The point is to have a constant address at
41043+ * compile time, but to set the physical address only
41044+ * in the boot process.
41045+ *
41046+ * these 'compile-time allocated' memory buffers are
41047+ * fixed-size 4k pages. (or larger if used with an increment
41048+ * highger than 1) use fixmap_set(idx,phys) to associate
41049+ * physical memory with fixmap indices.
41050+ *
41051+ * TLB entries of such buffers will not be flushed across
41052+ * task switches.
41053+ */
41054+
41055+enum fixed_addresses {
41056+ VSYSCALL_LAST_PAGE,
41057+ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
41058+ VSYSCALL_HPET,
41059+ FIX_HPET_BASE,
41060+#ifdef CONFIG_X86_LOCAL_APIC
41061+ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
41062+#endif
41063+#ifdef CONFIG_X86_IO_APIC
41064+ FIX_IO_APIC_BASE_0,
41065+ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
41066+#endif
41067+#ifdef CONFIG_ACPI
41068+ FIX_ACPI_BEGIN,
41069+ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
41070+#endif
41071+ FIX_SHARED_INFO,
41072+#define NR_FIX_ISAMAPS 256
41073+ FIX_ISAMAP_END,
41074+ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
41075+ __end_of_permanent_fixed_addresses,
41076+ /* temporary boot-time mappings, used before ioremap() is functional */
41077+#define NR_FIX_BTMAPS 16
41078+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
41079+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
41080+ __end_of_fixed_addresses
41081+};
41082+
41083+extern void __set_fixmap (enum fixed_addresses idx,
41084+ unsigned long phys, pgprot_t flags);
41085+
41086+#define set_fixmap(idx, phys) \
41087+ __set_fixmap(idx, phys, PAGE_KERNEL)
41088+/*
41089+ * Some hardware wants to get fixmapped without caching.
41090+ */
41091+#define set_fixmap_nocache(idx, phys) \
41092+ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
41093+
41094+#define clear_fixmap(idx) \
41095+ __set_fixmap(idx, 0, __pgprot(0))
41096+
41097+#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
41098+#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
41099+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
41100+
41101+/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
41102+#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
41103+#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
41104+
41105+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
41106+
41107+extern void __this_fixmap_does_not_exist(void);
41108+
41109+/*
41110+ * 'index to address' translation. If anyone tries to use the idx
41111+ * directly without translation, we catch the bug with a NULL-deference
41112+ * kernel oops. Illegal ranges of incoming indices are caught too.
41113+ */
41114+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
41115+{
41116+ /*
41117+ * this branch gets completely eliminated after inlining,
41118+ * except when someone tries to use fixaddr indices in an
41119+ * illegal way. (such as mixing up address types or using
41120+ * out-of-range indices).
41121+ *
41122+ * If it doesn't get removed, the linker will complain
41123+ * loudly with a reasonably clear error message..
41124+ */
41125+ if (idx >= __end_of_fixed_addresses)
41126+ __this_fixmap_does_not_exist();
41127+
41128+ return __fix_to_virt(idx);
41129+}
41130+
41131+#endif
41132Index: head-2008-11-25/include/asm-x86/mach-xen/asm/hypercall_64.h
41133===================================================================
41134--- /dev/null 1970-01-01 00:00:00.000000000 +0000
41135+++ head-2008-11-25/include/asm-x86/mach-xen/asm/hypercall_64.h 2008-11-25 12:22:34.000000000 +0100
41136@@ -0,0 +1,408 @@
41137+/******************************************************************************
41138+ * hypercall.h
41139+ *
41140+ * Linux-specific hypervisor handling.
41141+ *
41142+ * Copyright (c) 2002-2004, K A Fraser
41143+ *
41144+ * 64-bit updates:
41145+ * Benjamin Liu <benjamin.liu@intel.com>
41146+ * Jun Nakajima <jun.nakajima@intel.com>
41147+ *
41148+ * This program is free software; you can redistribute it and/or
41149+ * modify it under the terms of the GNU General Public License version 2
41150+ * as published by the Free Software Foundation; or, when distributed
41151+ * separately from the Linux kernel or incorporated into other
41152+ * software packages, subject to the following license:
41153+ *
41154+ * Permission is hereby granted, free of charge, to any person obtaining a copy
41155+ * of this source file (the "Software"), to deal in the Software without
41156+ * restriction, including without limitation the rights to use, copy, modify,
41157+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
41158+ * and to permit persons to whom the Software is furnished to do so, subject to
41159+ * the following conditions:
41160+ *
41161+ * The above copyright notice and this permission notice shall be included in
41162+ * all copies or substantial portions of the Software.
41163+ *
41164+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
41165+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
41166+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
41167+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
41168+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
41169+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
41170+ * IN THE SOFTWARE.
41171+ */
41172+
41173+#ifndef __HYPERCALL_H__
41174+#define __HYPERCALL_H__
41175+
41176+#include <linux/string.h> /* memcpy() */
41177+#include <linux/stringify.h>
41178+
41179+#ifndef __HYPERVISOR_H__
41180+# error "please don't include this file directly"
41181+#endif
41182+
41183+#ifdef CONFIG_XEN
41184+#define HYPERCALL_STR(name) \
41185+ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)"
41186+#else
41187+#define HYPERCALL_STR(name) \
41188+ "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\
41189+ "add hypercall_stubs(%%rip),%%rax; " \
41190+ "call *%%rax"
41191+#endif
41192+
41193+#define _hypercall0(type, name) \
41194+({ \
41195+ type __res; \
41196+ asm volatile ( \
41197+ HYPERCALL_STR(name) \
41198+ : "=a" (__res) \
41199+ : \
41200+ : "memory" ); \
41201+ __res; \
41202+})
41203+
41204+#define _hypercall1(type, name, a1) \
41205+({ \
41206+ type __res; \
41207+ long __ign1; \
41208+ asm volatile ( \
41209+ HYPERCALL_STR(name) \
41210+ : "=a" (__res), "=D" (__ign1) \
41211+ : "1" ((long)(a1)) \
41212+ : "memory" ); \
41213+ __res; \
41214+})
41215+
41216+#define _hypercall2(type, name, a1, a2) \
41217+({ \
41218+ type __res; \
41219+ long __ign1, __ign2; \
41220+ asm volatile ( \
41221+ HYPERCALL_STR(name) \
41222+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
41223+ : "1" ((long)(a1)), "2" ((long)(a2)) \
41224+ : "memory" ); \
41225+ __res; \
41226+})
41227+
41228+#define _hypercall3(type, name, a1, a2, a3) \
41229+({ \
41230+ type __res; \
41231+ long __ign1, __ign2, __ign3; \
41232+ asm volatile ( \
41233+ HYPERCALL_STR(name) \
41234+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
41235+ "=d" (__ign3) \
41236+ : "1" ((long)(a1)), "2" ((long)(a2)), \
41237+ "3" ((long)(a3)) \
41238+ : "memory" ); \
41239+ __res; \
41240+})
41241+
41242+#define _hypercall4(type, name, a1, a2, a3, a4) \
41243+({ \
41244+ type __res; \
41245+ long __ign1, __ign2, __ign3; \
41246+ register long __arg4 asm("r10") = (long)(a4); \
41247+ asm volatile ( \
41248+ HYPERCALL_STR(name) \
41249+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
41250+ "=d" (__ign3), "+r" (__arg4) \
41251+ : "1" ((long)(a1)), "2" ((long)(a2)), \
41252+ "3" ((long)(a3)) \
41253+ : "memory" ); \
41254+ __res; \
41255+})
41256+
41257+#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
41258+({ \
41259+ type __res; \
41260+ long __ign1, __ign2, __ign3; \
41261+ register long __arg4 asm("r10") = (long)(a4); \
41262+ register long __arg5 asm("r8") = (long)(a5); \
41263+ asm volatile ( \
41264+ HYPERCALL_STR(name) \
41265+ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
41266+ "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \
41267+ : "1" ((long)(a1)), "2" ((long)(a2)), \
41268+ "3" ((long)(a3)) \
41269+ : "memory" ); \
41270+ __res; \
41271+})
41272+
41273+static inline int __must_check
41274+HYPERVISOR_set_trap_table(
41275+ const trap_info_t *table)
41276+{
41277+ return _hypercall1(int, set_trap_table, table);
41278+}
41279+
41280+static inline int __must_check
41281+HYPERVISOR_mmu_update(
41282+ mmu_update_t *req, unsigned int count, unsigned int *success_count,
41283+ domid_t domid)
41284+{
41285+ return _hypercall4(int, mmu_update, req, count, success_count, domid);
41286+}
41287+
41288+static inline int __must_check
41289+HYPERVISOR_mmuext_op(
41290+ struct mmuext_op *op, unsigned int count, unsigned int *success_count,
41291+ domid_t domid)
41292+{
41293+ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
41294+}
41295+
41296+static inline int __must_check
41297+HYPERVISOR_set_gdt(
41298+ unsigned long *frame_list, unsigned int entries)
41299+{
41300+ return _hypercall2(int, set_gdt, frame_list, entries);
41301+}
41302+
41303+static inline int __must_check
41304+HYPERVISOR_stack_switch(
41305+ unsigned long ss, unsigned long esp)
41306+{
41307+ return _hypercall2(int, stack_switch, ss, esp);
41308+}
41309+
41310+static inline int __must_check
41311+HYPERVISOR_set_callbacks(
41312+ unsigned long event_address, unsigned long failsafe_address,
41313+ unsigned long syscall_address)
41314+{
41315+ return _hypercall3(int, set_callbacks,
41316+ event_address, failsafe_address, syscall_address);
41317+}
41318+
41319+static inline int
41320+HYPERVISOR_fpu_taskswitch(
41321+ int set)
41322+{
41323+ return _hypercall1(int, fpu_taskswitch, set);
41324+}
41325+
41326+static inline int __must_check
41327+HYPERVISOR_sched_op_compat(
41328+ int cmd, unsigned long arg)
41329+{
41330+ return _hypercall2(int, sched_op_compat, cmd, arg);
41331+}
41332+
41333+static inline int __must_check
41334+HYPERVISOR_sched_op(
41335+ int cmd, void *arg)
41336+{
41337+ return _hypercall2(int, sched_op, cmd, arg);
41338+}
41339+
41340+static inline long __must_check
41341+HYPERVISOR_set_timer_op(
41342+ u64 timeout)
41343+{
41344+ return _hypercall1(long, set_timer_op, timeout);
41345+}
41346+
41347+static inline int __must_check
41348+HYPERVISOR_platform_op(
41349+ struct xen_platform_op *platform_op)
41350+{
41351+ platform_op->interface_version = XENPF_INTERFACE_VERSION;
41352+ return _hypercall1(int, platform_op, platform_op);
41353+}
41354+
41355+static inline int __must_check
41356+HYPERVISOR_set_debugreg(
41357+ unsigned int reg, unsigned long value)
41358+{
41359+ return _hypercall2(int, set_debugreg, reg, value);
41360+}
41361+
41362+static inline unsigned long __must_check
41363+HYPERVISOR_get_debugreg(
41364+ unsigned int reg)
41365+{
41366+ return _hypercall1(unsigned long, get_debugreg, reg);
41367+}
41368+
41369+static inline int __must_check
41370+HYPERVISOR_update_descriptor(
41371+ unsigned long ma, unsigned long word)
41372+{
41373+ return _hypercall2(int, update_descriptor, ma, word);
41374+}
41375+
41376+static inline int __must_check
41377+HYPERVISOR_memory_op(
41378+ unsigned int cmd, void *arg)
41379+{
41380+ return _hypercall2(int, memory_op, cmd, arg);
41381+}
41382+
41383+static inline int __must_check
41384+HYPERVISOR_multicall(
41385+ multicall_entry_t *call_list, unsigned int nr_calls)
41386+{
41387+ return _hypercall2(int, multicall, call_list, nr_calls);
41388+}
41389+
41390+static inline int __must_check
41391+HYPERVISOR_update_va_mapping(
41392+ unsigned long va, pte_t new_val, unsigned long flags)
41393+{
41394+ return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
41395+}
41396+
41397+static inline int __must_check
41398+HYPERVISOR_event_channel_op(
41399+ int cmd, void *arg)
41400+{
41401+ int rc = _hypercall2(int, event_channel_op, cmd, arg);
41402+
41403+#if CONFIG_XEN_COMPAT <= 0x030002
41404+ if (unlikely(rc == -ENOSYS)) {
41405+ struct evtchn_op op;
41406+ op.cmd = cmd;
41407+ memcpy(&op.u, arg, sizeof(op.u));
41408+ rc = _hypercall1(int, event_channel_op_compat, &op);
41409+ memcpy(arg, &op.u, sizeof(op.u));
41410+ }
41411+#endif
41412+
41413+ return rc;
41414+}
41415+
41416+static inline int __must_check
41417+HYPERVISOR_xen_version(
41418+ int cmd, void *arg)
41419+{
41420+ return _hypercall2(int, xen_version, cmd, arg);
41421+}
41422+
41423+static inline int __must_check
41424+HYPERVISOR_console_io(
41425+ int cmd, unsigned int count, char *str)
41426+{
41427+ return _hypercall3(int, console_io, cmd, count, str);
41428+}
41429+
41430+static inline int __must_check
41431+HYPERVISOR_physdev_op(
41432+ int cmd, void *arg)
41433+{
41434+ int rc = _hypercall2(int, physdev_op, cmd, arg);
41435+
41436+#if CONFIG_XEN_COMPAT <= 0x030002
41437+ if (unlikely(rc == -ENOSYS)) {
41438+ struct physdev_op op;
41439+ op.cmd = cmd;
41440+ memcpy(&op.u, arg, sizeof(op.u));
41441+ rc = _hypercall1(int, physdev_op_compat, &op);
41442+ memcpy(arg, &op.u, sizeof(op.u));
41443+ }
41444+#endif
41445+
41446+ return rc;
41447+}
41448+
41449+static inline int __must_check
41450+HYPERVISOR_grant_table_op(
41451+ unsigned int cmd, void *uop, unsigned int count)
41452+{
41453+ return _hypercall3(int, grant_table_op, cmd, uop, count);
41454+}
41455+
41456+static inline int __must_check
41457+HYPERVISOR_update_va_mapping_otherdomain(
41458+ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
41459+{
41460+ return _hypercall4(int, update_va_mapping_otherdomain, va,
41461+ new_val.pte, flags, domid);
41462+}
41463+
41464+static inline int __must_check
41465+HYPERVISOR_vm_assist(
41466+ unsigned int cmd, unsigned int type)
41467+{
41468+ return _hypercall2(int, vm_assist, cmd, type);
41469+}
41470+
41471+static inline int __must_check
41472+HYPERVISOR_vcpu_op(
41473+ int cmd, unsigned int vcpuid, void *extra_args)
41474+{
41475+ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
41476+}
41477+
41478+static inline int __must_check
41479+HYPERVISOR_set_segment_base(
41480+ int reg, unsigned long value)
41481+{
41482+ return _hypercall2(int, set_segment_base, reg, value);
41483+}
41484+
41485+static inline int __must_check
41486+HYPERVISOR_suspend(
41487+ unsigned long srec)
41488+{
41489+ struct sched_shutdown sched_shutdown = {
41490+ .reason = SHUTDOWN_suspend
41491+ };
41492+
41493+ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
41494+ &sched_shutdown, srec);
41495+
41496+#if CONFIG_XEN_COMPAT <= 0x030002
41497+ if (rc == -ENOSYS)
41498+ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
41499+ SHUTDOWN_suspend, srec);
41500+#endif
41501+
41502+ return rc;
41503+}
41504+
41505+#if CONFIG_XEN_COMPAT <= 0x030002
41506+static inline int
41507+HYPERVISOR_nmi_op(
41508+ unsigned long op, void *arg)
41509+{
41510+ return _hypercall2(int, nmi_op, op, arg);
41511+}
41512+#endif
41513+
41514+#ifndef CONFIG_XEN
41515+static inline unsigned long __must_check
41516+HYPERVISOR_hvm_op(
41517+ int op, void *arg)
41518+{
41519+ return _hypercall2(unsigned long, hvm_op, op, arg);
41520+}
41521+#endif
41522+
41523+static inline int __must_check
41524+HYPERVISOR_callback_op(
41525+ int cmd, const void *arg)
41526+{
41527+ return _hypercall2(int, callback_op, cmd, arg);
41528+}
41529+
41530+static inline int __must_check
41531+HYPERVISOR_xenoprof_op(
41532+ int op, void *arg)
41533+{
41534+ return _hypercall2(int, xenoprof_op, op, arg);
41535+}
41536+
41537+static inline int __must_check
41538+HYPERVISOR_kexec_op(
41539+ unsigned long op, void *args)
41540+{
41541+ return _hypercall2(int, kexec_op, op, args);
41542+}
41543+
41544+#endif /* __HYPERCALL_H__ */
41545Index: head-2008-11-25/include/asm-x86/mach-xen/asm/irqflags_64.h
41546===================================================================
41547--- /dev/null 1970-01-01 00:00:00.000000000 +0000
41548+++ head-2008-11-25/include/asm-x86/mach-xen/asm/irqflags_64.h 2007-06-12 13:14:13.000000000 +0200
41549@@ -0,0 +1,139 @@
41550+/*
41551+ * include/asm-x86_64/irqflags.h
41552+ *
41553+ * IRQ flags handling
41554+ *
41555+ * This file gets included from lowlevel asm headers too, to provide
41556+ * wrapped versions of the local_irq_*() APIs, based on the
41557+ * raw_local_irq_*() functions from the lowlevel headers.
41558+ */
41559+#ifndef _ASM_IRQFLAGS_H
41560+#define _ASM_IRQFLAGS_H
41561+
41562+#ifndef __ASSEMBLY__
41563+/*
41564+ * Interrupt control:
41565+ */
41566+
41567+/*
41568+ * The use of 'barrier' in the following reflects their use as local-lock
41569+ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
41570+ * critical operations are executed. All critical operations must complete
41571+ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
41572+ * includes these barriers, for example.
41573+ */
41574+
41575+#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
41576+
41577+#define raw_local_save_flags(flags) \
41578+ do { (flags) = __raw_local_save_flags(); } while (0)
41579+
41580+#define raw_local_irq_restore(x) \
41581+do { \
41582+ vcpu_info_t *_vcpu; \
41583+ barrier(); \
41584+ _vcpu = current_vcpu_info(); \
41585+ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
41586+ barrier(); /* unmask then check (avoid races) */ \
41587+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
41588+ force_evtchn_callback(); \
41589+ } \
41590+} while (0)
41591+
41592+#ifdef CONFIG_X86_VSMP
41593+
41594+/*
41595+ * Interrupt control for the VSMP architecture:
41596+ */
41597+
41598+static inline void raw_local_irq_disable(void)
41599+{
41600+ unsigned long flags = __raw_local_save_flags();
41601+
41602+ raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
41603+}
41604+
41605+static inline void raw_local_irq_enable(void)
41606+{
41607+ unsigned long flags = __raw_local_save_flags();
41608+
41609+ raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
41610+}
41611+
41612+static inline int raw_irqs_disabled_flags(unsigned long flags)
41613+{
41614+ return !(flags & (1<<9)) || (flags & (1 << 18));
41615+}
41616+
41617+#else /* CONFIG_X86_VSMP */
41618+
41619+#define raw_local_irq_disable() \
41620+do { \
41621+ current_vcpu_info()->evtchn_upcall_mask = 1; \
41622+ barrier(); \
41623+} while (0)
41624+
41625+#define raw_local_irq_enable() \
41626+do { \
41627+ vcpu_info_t *_vcpu; \
41628+ barrier(); \
41629+ _vcpu = current_vcpu_info(); \
41630+ _vcpu->evtchn_upcall_mask = 0; \
41631+ barrier(); /* unmask then check (avoid races) */ \
41632+ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
41633+ force_evtchn_callback(); \
41634+} while (0)
41635+
41636+static inline int raw_irqs_disabled_flags(unsigned long flags)
41637+{
41638+ return (flags != 0);
41639+}
41640+
41641+#endif
41642+
41643+/*
41644+ * For spinlocks, etc.:
41645+ */
41646+
41647+#define __raw_local_irq_save() \
41648+({ \
41649+ unsigned long flags = __raw_local_save_flags(); \
41650+ \
41651+ raw_local_irq_disable(); \
41652+ \
41653+ flags; \
41654+})
41655+
41656+#define raw_local_irq_save(flags) \
41657+ do { (flags) = __raw_local_irq_save(); } while (0)
41658+
41659+#define raw_irqs_disabled() \
41660+({ \
41661+ unsigned long flags = __raw_local_save_flags(); \
41662+ \
41663+ raw_irqs_disabled_flags(flags); \
41664+})
41665+
41666+/*
41667+ * Used in the idle loop; sti takes one instruction cycle
41668+ * to complete:
41669+ */
41670+void raw_safe_halt(void);
41671+
41672+/*
41673+ * Used when interrupts are already enabled or to
41674+ * shutdown the processor:
41675+ */
41676+void halt(void);
41677+
41678+#else /* __ASSEMBLY__: */
41679+# ifdef CONFIG_TRACE_IRQFLAGS
41680+# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
41681+# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
41682+# else
41683+# define TRACE_IRQS_ON
41684+# define TRACE_IRQS_OFF
41685+# endif
41686+#endif
41687+
41688+#endif
41689Index: head-2008-11-25/include/asm-x86/mach-xen/asm/maddr_64.h
41690===================================================================
41691--- /dev/null 1970-01-01 00:00:00.000000000 +0000
41692+++ head-2008-11-25/include/asm-x86/mach-xen/asm/maddr_64.h 2007-06-12 13:14:13.000000000 +0200
41693@@ -0,0 +1,161 @@
41694+#ifndef _X86_64_MADDR_H
41695+#define _X86_64_MADDR_H
41696+
41697+#include <xen/features.h>
41698+#include <xen/interface/xen.h>
41699+
41700+/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
41701+#define INVALID_P2M_ENTRY (~0UL)
41702+#define FOREIGN_FRAME_BIT (1UL<<63)
41703+#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
41704+
41705+/* Definitions for machine and pseudophysical addresses. */
41706+typedef unsigned long paddr_t;
41707+typedef unsigned long maddr_t;
41708+
41709+#ifdef CONFIG_XEN
41710+
41711+extern unsigned long *phys_to_machine_mapping;
41712+
41713+#undef machine_to_phys_mapping
41714+extern unsigned long *machine_to_phys_mapping;
41715+extern unsigned int machine_to_phys_order;
41716+
41717+static inline unsigned long pfn_to_mfn(unsigned long pfn)
41718+{
41719+ if (xen_feature(XENFEAT_auto_translated_physmap))
41720+ return pfn;
41721+ BUG_ON(end_pfn && pfn >= end_pfn);
41722+ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
41723+}
41724+
41725+static inline int phys_to_machine_mapping_valid(unsigned long pfn)
41726+{
41727+ if (xen_feature(XENFEAT_auto_translated_physmap))
41728+ return 1;
41729+ BUG_ON(end_pfn && pfn >= end_pfn);
41730+ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
41731+}
41732+
41733+static inline unsigned long mfn_to_pfn(unsigned long mfn)
41734+{
41735+ unsigned long pfn;
41736+
41737+ if (xen_feature(XENFEAT_auto_translated_physmap))
41738+ return mfn;
41739+
41740+ if (unlikely((mfn >> machine_to_phys_order) != 0))
41741+ return end_pfn;
41742+
41743+ /* The array access can fail (e.g., device space beyond end of RAM). */
41744+ asm (
41745+ "1: movq %1,%0\n"
41746+ "2:\n"
41747+ ".section .fixup,\"ax\"\n"
41748+ "3: movq %2,%0\n"
41749+ " jmp 2b\n"
41750+ ".previous\n"
41751+ ".section __ex_table,\"a\"\n"
41752+ " .align 8\n"
41753+ " .quad 1b,3b\n"
41754+ ".previous"
41755+ : "=r" (pfn)
41756+ : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) );
41757+
41758+ return pfn;
41759+}
41760+
41761+/*
41762+ * We detect special mappings in one of two ways:
41763+ * 1. If the MFN is an I/O page then Xen will set the m2p entry
41764+ * to be outside our maximum possible pseudophys range.
41765+ * 2. If the MFN belongs to a different domain then we will certainly
41766+ * not have MFN in our p2m table. Conversely, if the page is ours,
41767+ * then we'll have p2m(m2p(MFN))==MFN.
41768+ * If we detect a special mapping then it doesn't have a 'struct page'.
41769+ * We force !pfn_valid() by returning an out-of-range pointer.
41770+ *
41771+ * NB. These checks require that, for any MFN that is not in our reservation,
41772+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
41773+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
41774+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
41775+ *
41776+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
41777+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
41778+ * require. In all the cases we care about, the FOREIGN_FRAME bit is
41779+ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
41780+ */
41781+static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
41782+{
41783+ unsigned long pfn = mfn_to_pfn(mfn);
41784+ if ((pfn < end_pfn)
41785+ && !xen_feature(XENFEAT_auto_translated_physmap)
41786+ && (phys_to_machine_mapping[pfn] != mfn))
41787+ return end_pfn; /* force !pfn_valid() */
41788+ return pfn;
41789+}
41790+
41791+static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
41792+{
41793+ BUG_ON(end_pfn && pfn >= end_pfn);
41794+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
41795+ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
41796+ return;
41797+ }
41798+ phys_to_machine_mapping[pfn] = mfn;
41799+}
41800+
41801+static inline maddr_t phys_to_machine(paddr_t phys)
41802+{
41803+ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
41804+ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
41805+ return machine;
41806+}
41807+
41808+static inline paddr_t machine_to_phys(maddr_t machine)
41809+{
41810+ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
41811+ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
41812+ return phys;
41813+}
41814+
41815+static inline paddr_t pte_phys_to_machine(paddr_t phys)
41816+{
41817+ maddr_t machine;
41818+ machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
41819+ machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
41820+ return machine;
41821+}
41822+
41823+static inline paddr_t pte_machine_to_phys(maddr_t machine)
41824+{
41825+ paddr_t phys;
41826+ phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
41827+ phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
41828+ return phys;
41829+}
41830+
41831+#define __pte_ma(x) ((pte_t) { (x) } )
41832+#define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
41833+
41834+#else /* !CONFIG_XEN */
41835+
41836+#define pfn_to_mfn(pfn) (pfn)
41837+#define mfn_to_pfn(mfn) (mfn)
41838+#define mfn_to_local_pfn(mfn) (mfn)
41839+#define set_phys_to_machine(pfn, mfn) ((void)0)
41840+#define phys_to_machine_mapping_valid(pfn) (1)
41841+#define phys_to_machine(phys) ((maddr_t)(phys))
41842+#define machine_to_phys(mach) ((paddr_t)(mach))
41843+#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
41844+#define __pte_ma(x) __pte(x)
41845+
41846+#endif /* !CONFIG_XEN */
41847+
41848+/* VIRT <-> MACHINE conversion */
41849+#define virt_to_machine(v) (phys_to_machine(__pa(v)))
41850+#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
41851+#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
41852+
41853+#endif /* _X86_64_MADDR_H */
41854+
41855Index: head-2008-11-25/include/asm-x86/mach-xen/asm/mmu_context_64.h
41856===================================================================
41857--- /dev/null 1970-01-01 00:00:00.000000000 +0000
41858+++ head-2008-11-25/include/asm-x86/mach-xen/asm/mmu_context_64.h 2007-06-12 13:14:13.000000000 +0200
41859@@ -0,0 +1,136 @@
41860+#ifndef __X86_64_MMU_CONTEXT_H
41861+#define __X86_64_MMU_CONTEXT_H
41862+
41863+#include <asm/desc.h>
41864+#include <asm/atomic.h>
41865+#include <asm/pgalloc.h>
41866+#include <asm/page.h>
41867+#include <asm/pda.h>
41868+#include <asm/pgtable.h>
41869+#include <asm/tlbflush.h>
41870+
41871+/*
41872+ * possibly do the LDT unload here?
41873+ */
41874+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
41875+void destroy_context(struct mm_struct *mm);
41876+
41877+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
41878+{
41879+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
41880+ if (read_pda(mmu_state) == TLBSTATE_OK)
41881+ write_pda(mmu_state, TLBSTATE_LAZY);
41882+#endif
41883+}
41884+
41885+#define prepare_arch_switch(next) __prepare_arch_switch()
41886+
41887+static inline void __prepare_arch_switch(void)
41888+{
41889+ /*
41890+ * Save away %es, %ds, %fs and %gs. Must happen before reload
41891+ * of cr3/ldt (i.e., not in __switch_to).
41892+ */
41893+ __asm__ __volatile__ (
41894+ "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
41895+ : "=m" (current->thread.es),
41896+ "=m" (current->thread.ds),
41897+ "=m" (current->thread.fsindex),
41898+ "=m" (current->thread.gsindex) );
41899+
41900+ if (current->thread.ds)
41901+ __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
41902+
41903+ if (current->thread.es)
41904+ __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
41905+
41906+ if (current->thread.fsindex) {
41907+ __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
41908+ current->thread.fs = 0;
41909+ }
41910+
41911+ if (current->thread.gsindex) {
41912+ load_gs_index(0);
41913+ current->thread.gs = 0;
41914+ }
41915+}
41916+
41917+extern void mm_pin(struct mm_struct *mm);
41918+extern void mm_unpin(struct mm_struct *mm);
41919+void mm_pin_all(void);
41920+
41921+static inline void load_cr3(pgd_t *pgd)
41922+{
41923+ asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) :
41924+ "memory");
41925+}
41926+
41927+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
41928+ struct task_struct *tsk)
41929+{
41930+ unsigned cpu = smp_processor_id();
41931+ struct mmuext_op _op[3], *op = _op;
41932+
41933+ if (likely(prev != next)) {
41934+ BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
41935+ !next->context.pinned);
41936+
41937+ /* stop flush ipis for the previous mm */
41938+ cpu_clear(cpu, prev->cpu_vm_mask);
41939+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
41940+ write_pda(mmu_state, TLBSTATE_OK);
41941+ write_pda(active_mm, next);
41942+#endif
41943+ cpu_set(cpu, next->cpu_vm_mask);
41944+
41945+ /* load_cr3(next->pgd) */
41946+ op->cmd = MMUEXT_NEW_BASEPTR;
41947+ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
41948+ op++;
41949+
41950+ /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
41951+ op->cmd = MMUEXT_NEW_USER_BASEPTR;
41952+ op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
41953+ op++;
41954+
41955+ if (unlikely(next->context.ldt != prev->context.ldt)) {
41956+ /* load_LDT_nolock(&next->context, cpu) */
41957+ op->cmd = MMUEXT_SET_LDT;
41958+ op->arg1.linear_addr = (unsigned long)next->context.ldt;
41959+ op->arg2.nr_ents = next->context.size;
41960+ op++;
41961+ }
41962+
41963+ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
41964+ }
41965+#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
41966+ else {
41967+ write_pda(mmu_state, TLBSTATE_OK);
41968+ if (read_pda(active_mm) != next)
41969+ out_of_line_bug();
41970+ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
41971+ /* We were in lazy tlb mode and leave_mm disabled
41972+ * tlb flush IPI delivery. We must reload CR3
41973+ * to make sure to use no freed page tables.
41974+ */
41975+ load_cr3(next->pgd);
41976+ xen_new_user_pt(__pa(__user_pgd(next->pgd)));
41977+ load_LDT_nolock(&next->context, cpu);
41978+ }
41979+ }
41980+#endif
41981+}
41982+
41983+#define deactivate_mm(tsk,mm) do { \
41984+ load_gs_index(0); \
41985+ asm volatile("movl %0,%%fs"::"r"(0)); \
41986+} while(0)
41987+
41988+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
41989+{
41990+ if (!next->context.pinned)
41991+ mm_pin(next);
41992+ switch_mm(prev, next, NULL);
41993+}
41994+
41995+#endif
41996Index: head-2008-11-25/include/asm-x86/mach-xen/asm/page_64.h
41997===================================================================
41998--- /dev/null 1970-01-01 00:00:00.000000000 +0000
41999+++ head-2008-11-25/include/asm-x86/mach-xen/asm/page_64.h 2008-04-02 12:34:02.000000000 +0200
42000@@ -0,0 +1,212 @@
42001+#ifndef _X86_64_PAGE_H
42002+#define _X86_64_PAGE_H
42003+
42004+/* #include <linux/string.h> */
42005+#ifndef __ASSEMBLY__
42006+#include <linux/kernel.h>
42007+#include <linux/types.h>
42008+#include <asm/bug.h>
42009+#endif
42010+#include <xen/interface/xen.h>
42011+
42012+/*
42013+ * Need to repeat this here in order to not include pgtable.h (which in turn
42014+ * depends on definitions made here), but to be able to use the symbolic
42015+ * below. The preprocessor will warn if the two definitions aren't identical.
42016+ */
42017+#define _PAGE_PRESENT 0x001
42018+#define _PAGE_IO 0x200
42019+
42020+/* PAGE_SHIFT determines the page size */
42021+#define PAGE_SHIFT 12
42022+#ifdef __ASSEMBLY__
42023+#define PAGE_SIZE (0x1 << PAGE_SHIFT)
42024+#else
42025+#define PAGE_SIZE (1UL << PAGE_SHIFT)
42026+#endif
42027+#define PAGE_MASK (~(PAGE_SIZE-1))
42028+
42029+/* See Documentation/x86_64/mm.txt for a description of the memory map. */
42030+#define __PHYSICAL_MASK_SHIFT 46
42031+#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
42032+#define __VIRTUAL_MASK_SHIFT 48
42033+#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
42034+
42035+#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
42036+
42037+#define THREAD_ORDER 1
42038+#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
42039+#define CURRENT_MASK (~(THREAD_SIZE-1))
42040+
42041+#define EXCEPTION_STACK_ORDER 0
42042+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
42043+
42044+#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
42045+#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
42046+
42047+#define IRQSTACK_ORDER 2
42048+#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
42049+
42050+#define STACKFAULT_STACK 1
42051+#define DOUBLEFAULT_STACK 2
42052+#define NMI_STACK 3
42053+#define DEBUG_STACK 4
42054+#define MCE_STACK 5
42055+#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
42056+
42057+#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
42058+#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
42059+
42060+#define HPAGE_SHIFT PMD_SHIFT
42061+#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
42062+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
42063+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
42064+
42065+#ifdef __KERNEL__
42066+#ifndef __ASSEMBLY__
42067+
42068+extern unsigned long end_pfn;
42069+
42070+#include <asm/maddr.h>
42071+
42072+void clear_page(void *);
42073+void copy_page(void *, void *);
42074+
42075+#define clear_user_page(page, vaddr, pg) clear_page(page)
42076+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
42077+
42078+#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
42079+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
42080+
42081+/*
42082+ * These are used to make use of C type-checking..
42083+ */
42084+typedef struct { unsigned long pte; } pte_t;
42085+typedef struct { unsigned long pmd; } pmd_t;
42086+typedef struct { unsigned long pud; } pud_t;
42087+typedef struct { unsigned long pgd; } pgd_t;
42088+#define PTE_MASK PHYSICAL_PAGE_MASK
42089+
42090+typedef struct { unsigned long pgprot; } pgprot_t;
42091+
42092+#define __pte_val(x) ((x).pte)
42093+#define pte_val(x) ((__pte_val(x) & (_PAGE_PRESENT|_PAGE_IO)) \
42094+ == _PAGE_PRESENT ? \
42095+ pte_machine_to_phys(__pte_val(x)) : \
42096+ __pte_val(x))
42097+
42098+#define __pmd_val(x) ((x).pmd)
42099+static inline unsigned long pmd_val(pmd_t x)
42100+{
42101+ unsigned long ret = __pmd_val(x);
42102+#if CONFIG_XEN_COMPAT <= 0x030002
42103+ if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
42104+#else
42105+ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
42106+#endif
42107+ return ret;
42108+}
42109+
42110+#define __pud_val(x) ((x).pud)
42111+static inline unsigned long pud_val(pud_t x)
42112+{
42113+ unsigned long ret = __pud_val(x);
42114+ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
42115+ return ret;
42116+}
42117+
42118+#define __pgd_val(x) ((x).pgd)
42119+static inline unsigned long pgd_val(pgd_t x)
42120+{
42121+ unsigned long ret = __pgd_val(x);
42122+ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
42123+ return ret;
42124+}
42125+
42126+#define pgprot_val(x) ((x).pgprot)
42127+
42128+static inline pte_t __pte(unsigned long x)
42129+{
42130+ if ((x & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT)
42131+ x = pte_phys_to_machine(x);
42132+ return ((pte_t) { (x) });
42133+}
42134+
42135+static inline pmd_t __pmd(unsigned long x)
42136+{
42137+ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
42138+ return ((pmd_t) { (x) });
42139+}
42140+
42141+static inline pud_t __pud(unsigned long x)
42142+{
42143+ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
42144+ return ((pud_t) { (x) });
42145+}
42146+
42147+static inline pgd_t __pgd(unsigned long x)
42148+{
42149+ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
42150+ return ((pgd_t) { (x) });
42151+}
42152+
42153+#define __pgprot(x) ((pgprot_t) { (x) } )
42154+
42155+#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
42156+#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
42157+#define __START_KERNEL_map 0xffffffff80000000UL
42158+#define __PAGE_OFFSET 0xffff880000000000UL
42159+
42160+#else
42161+#define __PHYSICAL_START CONFIG_PHYSICAL_START
42162+#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
42163+#define __START_KERNEL_map 0xffffffff80000000
42164+#define __PAGE_OFFSET 0xffff880000000000
42165+#endif /* !__ASSEMBLY__ */
42166+
42167+#if CONFIG_XEN_COMPAT <= 0x030002
42168+#undef LOAD_OFFSET
42169+#define LOAD_OFFSET 0
42170+#endif
42171+
42172+/* to align the pointer to the (next) page boundary */
42173+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
42174+
42175+#define KERNEL_TEXT_SIZE (40UL*1024*1024)
42176+#define KERNEL_TEXT_START 0xffffffff80000000UL
42177+
42178+#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
42179+
42180+/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
42181+ Otherwise you risk miscompilation. */
42182+#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
42183+/* __pa_symbol should be used for C visible symbols.
42184+ This seems to be the official gcc blessed way to do such arithmetic. */
42185+#define __pa_symbol(x) \
42186+ ({unsigned long v; \
42187+ asm("" : "=r" (v) : "0" (x)); \
42188+ __pa(v); })
42189+
42190+#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
42191+#define __boot_va(x) __va(x)
42192+#define __boot_pa(x) __pa(x)
42193+#ifdef CONFIG_FLATMEM
42194+#define pfn_valid(pfn) ((pfn) < end_pfn)
42195+#endif
42196+
42197+#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
42198+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
42199+#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
42200+
42201+#define VM_DATA_DEFAULT_FLAGS \
42202+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
42203+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
42204+
42205+#define __HAVE_ARCH_GATE_AREA 1
42206+
42207+#include <asm-generic/memory_model.h>
42208+#include <asm-generic/page.h>
42209+
42210+#endif /* __KERNEL__ */
42211+
42212+#endif /* _X86_64_PAGE_H */
42213Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgalloc_64.h
42214===================================================================
42215--- /dev/null 1970-01-01 00:00:00.000000000 +0000
42216+++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgalloc_64.h 2007-06-18 08:38:13.000000000 +0200
42217@@ -0,0 +1,204 @@
42218+#ifndef _X86_64_PGALLOC_H
42219+#define _X86_64_PGALLOC_H
42220+
42221+#include <asm/fixmap.h>
42222+#include <asm/pda.h>
42223+#include <linux/threads.h>
42224+#include <linux/mm.h>
42225+#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
42226+
42227+#include <xen/features.h>
42228+void make_page_readonly(void *va, unsigned int feature);
42229+void make_page_writable(void *va, unsigned int feature);
42230+void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
42231+void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
42232+
42233+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
42234+
42235+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
42236+{
42237+ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
42238+}
42239+
42240+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
42241+{
42242+ if (unlikely((mm)->context.pinned)) {
42243+ BUG_ON(HYPERVISOR_update_va_mapping(
42244+ (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
42245+ pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
42246+ set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
42247+ } else {
42248+ *(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
42249+ }
42250+}
42251+
42252+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
42253+{
42254+ if (unlikely((mm)->context.pinned)) {
42255+ BUG_ON(HYPERVISOR_update_va_mapping(
42256+ (unsigned long)pmd,
42257+ pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
42258+ PAGE_KERNEL_RO), 0));
42259+ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
42260+ } else {
42261+ *(pud) = __pud(_PAGE_TABLE | __pa(pmd));
42262+ }
42263+}
42264+
42265+/*
42266+ * We need to use the batch mode here, but pgd_pupulate() won't be
42267+ * be called frequently.
42268+ */
42269+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
42270+{
42271+ if (unlikely((mm)->context.pinned)) {
42272+ BUG_ON(HYPERVISOR_update_va_mapping(
42273+ (unsigned long)pud,
42274+ pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
42275+ PAGE_KERNEL_RO), 0));
42276+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
42277+ set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
42278+ } else {
42279+ *(pgd) = __pgd(_PAGE_TABLE | __pa(pud));
42280+ *(__user_pgd(pgd)) = *(pgd);
42281+ }
42282+}
42283+
42284+extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
42285+extern void pte_free(struct page *pte);
42286+
42287+static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
42288+{
42289+ struct page *pg;
42290+
42291+ pg = pte_alloc_one(mm, addr);
42292+ return pg ? page_address(pg) : NULL;
42293+}
42294+
42295+static inline void pmd_free(pmd_t *pmd)
42296+{
42297+ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
42298+ pte_free(virt_to_page(pmd));
42299+}
42300+
42301+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
42302+{
42303+ struct page *pg;
42304+
42305+ pg = pte_alloc_one(mm, addr);
42306+ return pg ? page_address(pg) : NULL;
42307+}
42308+
42309+static inline void pud_free(pud_t *pud)
42310+{
42311+ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
42312+ pte_free(virt_to_page(pud));
42313+}
42314+
42315+static inline void pgd_list_add(pgd_t *pgd)
42316+{
42317+ struct page *page = virt_to_page(pgd);
42318+
42319+ spin_lock(&pgd_lock);
42320+ page->index = (pgoff_t)pgd_list;
42321+ if (pgd_list)
42322+ pgd_list->private = (unsigned long)&page->index;
42323+ pgd_list = page;
42324+ page->private = (unsigned long)&pgd_list;
42325+ spin_unlock(&pgd_lock);
42326+}
42327+
42328+static inline void pgd_list_del(pgd_t *pgd)
42329+{
42330+ struct page *next, **pprev, *page = virt_to_page(pgd);
42331+
42332+ spin_lock(&pgd_lock);
42333+ next = (struct page *)page->index;
42334+ pprev = (struct page **)page->private;
42335+ *pprev = next;
42336+ if (next)
42337+ next->private = (unsigned long)pprev;
42338+ spin_unlock(&pgd_lock);
42339+}
42340+
42341+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
42342+{
42343+ /*
42344+ * We allocate two contiguous pages for kernel and user.
42345+ */
42346+ unsigned boundary;
42347+ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
42348+ if (!pgd)
42349+ return NULL;
42350+ pgd_list_add(pgd);
42351+ /*
42352+ * Copy kernel pointers in from init.
42353+ * Could keep a freelist or slab cache of those because the kernel
42354+ * part never changes.
42355+ */
42356+ boundary = pgd_index(__PAGE_OFFSET);
42357+ memset(pgd, 0, boundary * sizeof(pgd_t));
42358+ memcpy(pgd + boundary,
42359+ init_level4_pgt + boundary,
42360+ (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
42361+
42362+ memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
42363+ /*
42364+ * Set level3_user_pgt for vsyscall area
42365+ */
42366+ __user_pgd(pgd)[pgd_index(VSYSCALL_START)] =
42367+ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
42368+ return pgd;
42369+}
42370+
42371+static inline void pgd_free(pgd_t *pgd)
42372+{
42373+ pte_t *ptep = virt_to_ptep(pgd);
42374+
42375+ if (!pte_write(*ptep)) {
42376+ xen_pgd_unpin(__pa(pgd));
42377+ BUG_ON(HYPERVISOR_update_va_mapping(
42378+ (unsigned long)pgd,
42379+ pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
42380+ 0));
42381+ }
42382+
42383+ ptep = virt_to_ptep(__user_pgd(pgd));
42384+
42385+ if (!pte_write(*ptep)) {
42386+ xen_pgd_unpin(__pa(__user_pgd(pgd)));
42387+ BUG_ON(HYPERVISOR_update_va_mapping(
42388+ (unsigned long)__user_pgd(pgd),
42389+ pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT,
42390+ PAGE_KERNEL),
42391+ 0));
42392+ }
42393+
42394+ pgd_list_del(pgd);
42395+ free_pages((unsigned long)pgd, 1);
42396+}
42397+
42398+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
42399+{
42400+ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
42401+ if (pte)
42402+ make_page_readonly(pte, XENFEAT_writable_page_tables);
42403+
42404+ return pte;
42405+}
42406+
42407+/* Should really implement gc for free page table pages. This could be
42408+ done with a reference count in struct page. */
42409+
42410+static inline void pte_free_kernel(pte_t *pte)
42411+{
42412+ BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
42413+ make_page_writable(pte, XENFEAT_writable_page_tables);
42414+ free_page((unsigned long)pte);
42415+}
42416+
42417+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
42418+#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
42419+#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
42420+
42421+#endif /* _X86_64_PGALLOC_H */
42422Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable_64.h
42423===================================================================
42424--- /dev/null 1970-01-01 00:00:00.000000000 +0000
42425+++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable_64.h 2008-07-21 11:00:33.000000000 +0200
42426@@ -0,0 +1,583 @@
42427+#ifndef _X86_64_PGTABLE_H
42428+#define _X86_64_PGTABLE_H
42429+
42430+/*
42431+ * This file contains the functions and defines necessary to modify and use
42432+ * the x86-64 page table tree.
42433+ */
42434+#include <asm/processor.h>
42435+#include <asm/fixmap.h>
42436+#include <asm/bitops.h>
42437+#include <linux/threads.h>
42438+#include <linux/sched.h>
42439+#include <asm/pda.h>
42440+#ifdef CONFIG_XEN
42441+#include <asm/hypervisor.h>
42442+
42443+extern pud_t level3_user_pgt[512];
42444+
42445+extern void xen_init_pt(void);
42446+
42447+extern pte_t *lookup_address(unsigned long address);
42448+
42449+#define virt_to_ptep(va) \
42450+({ \
42451+ pte_t *__ptep = lookup_address((unsigned long)(va)); \
42452+ BUG_ON(!__ptep || !pte_present(*__ptep)); \
42453+ __ptep; \
42454+})
42455+
42456+#define arbitrary_virt_to_machine(va) \
42457+ (((maddr_t)pte_mfn(*virt_to_ptep(va)) << PAGE_SHIFT) \
42458+ | ((unsigned long)(va) & (PAGE_SIZE - 1)))
42459+#endif
42460+
42461+extern pud_t level3_kernel_pgt[512];
42462+extern pud_t level3_physmem_pgt[512];
42463+extern pud_t level3_ident_pgt[512];
42464+extern pmd_t level2_kernel_pgt[512];
42465+extern pgd_t init_level4_pgt[];
42466+extern pgd_t boot_level4_pgt[];
42467+extern unsigned long __supported_pte_mask;
42468+
42469+#define swapper_pg_dir init_level4_pgt
42470+
42471+extern int nonx_setup(char *str);
42472+extern void paging_init(void);
42473+extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
42474+
42475+extern unsigned long pgkern_mask;
42476+
42477+/*
42478+ * ZERO_PAGE is a global shared page that is always zero: used
42479+ * for zero-mapped memory areas etc..
42480+ */
42481+extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
42482+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
42483+
42484+/*
42485+ * PGDIR_SHIFT determines what a top-level page table entry can map
42486+ */
42487+#define PGDIR_SHIFT 39
42488+#define PTRS_PER_PGD 512
42489+
42490+/*
42491+ * 3rd level page
42492+ */
42493+#define PUD_SHIFT 30
42494+#define PTRS_PER_PUD 512
42495+
42496+/*
42497+ * PMD_SHIFT determines the size of the area a middle-level
42498+ * page table can map
42499+ */
42500+#define PMD_SHIFT 21
42501+#define PTRS_PER_PMD 512
42502+
42503+/*
42504+ * entries per page directory level
42505+ */
42506+#define PTRS_PER_PTE 512
42507+
42508+#define pte_ERROR(e) \
42509+ printk("%s:%d: bad pte %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
42510+ &(e), __pte_val(e), pte_pfn(e))
42511+#define pmd_ERROR(e) \
42512+ printk("%s:%d: bad pmd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
42513+ &(e), __pmd_val(e), pmd_pfn(e))
42514+#define pud_ERROR(e) \
42515+ printk("%s:%d: bad pud %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
42516+ &(e), __pud_val(e), (pud_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
42517+#define pgd_ERROR(e) \
42518+ printk("%s:%d: bad pgd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
42519+ &(e), __pgd_val(e), (pgd_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
42520+
42521+#define pgd_none(x) (!__pgd_val(x))
42522+#define pud_none(x) (!__pud_val(x))
42523+
42524+static inline void set_pte(pte_t *dst, pte_t val)
42525+{
42526+ *dst = val;
42527+}
42528+
42529+#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
42530+#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
42531+#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
42532+
42533+static inline void pud_clear (pud_t * pud)
42534+{
42535+ set_pud(pud, __pud(0));
42536+}
42537+
42538+#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
42539+
42540+static inline void pgd_clear (pgd_t * pgd)
42541+{
42542+ set_pgd(pgd, __pgd(0));
42543+ set_pgd(__user_pgd(pgd), __pgd(0));
42544+}
42545+
42546+#define pud_page(pud) \
42547+ ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
42548+
42549+#define pte_same(a, b) ((a).pte == (b).pte)
42550+
42551+#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
42552+
42553+#define PMD_SIZE (1UL << PMD_SHIFT)
42554+#define PMD_MASK (~(PMD_SIZE-1))
42555+#define PUD_SIZE (1UL << PUD_SHIFT)
42556+#define PUD_MASK (~(PUD_SIZE-1))
42557+#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
42558+#define PGDIR_MASK (~(PGDIR_SIZE-1))
42559+
42560+#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
42561+#define FIRST_USER_ADDRESS 0
42562+
42563+#ifndef __ASSEMBLY__
42564+#define MAXMEM 0x3fffffffffffUL
42565+#define VMALLOC_START 0xffffc20000000000UL
42566+#define VMALLOC_END 0xffffe1ffffffffffUL
42567+#define MODULES_VADDR 0xffffffff88000000UL
42568+#define MODULES_END 0xfffffffffff00000UL
42569+#define MODULES_LEN (MODULES_END - MODULES_VADDR)
42570+
42571+#define _PAGE_BIT_PRESENT 0
42572+#define _PAGE_BIT_RW 1
42573+#define _PAGE_BIT_USER 2
42574+#define _PAGE_BIT_PWT 3
42575+#define _PAGE_BIT_PCD 4
42576+#define _PAGE_BIT_ACCESSED 5
42577+#define _PAGE_BIT_DIRTY 6
42578+#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
42579+#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
42580+#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
42581+
42582+#define _PAGE_PRESENT 0x001
42583+#define _PAGE_RW 0x002
42584+#define _PAGE_USER 0x004
42585+#define _PAGE_PWT 0x008
42586+#define _PAGE_PCD 0x010
42587+#define _PAGE_ACCESSED 0x020
42588+#define _PAGE_DIRTY 0x040
42589+#define _PAGE_PSE 0x080 /* 2MB page */
42590+#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
42591+#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
42592+
42593+#define _PAGE_PROTNONE 0x080 /* If not present */
42594+#define _PAGE_NX (1UL<<_PAGE_BIT_NX)
42595+
42596+/* Mapped page is I/O or foreign and has no associated page struct. */
42597+#define _PAGE_IO 0x200
42598+
42599+#if CONFIG_XEN_COMPAT <= 0x030002
42600+extern unsigned int __kernel_page_user;
42601+#else
42602+#define __kernel_page_user 0
42603+#endif
42604+
42605+#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
42606+#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
42607+
42608+#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_IO)
42609+
42610+#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
42611+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
42612+#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
42613+#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
42614+#define PAGE_COPY PAGE_COPY_NOEXEC
42615+#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
42616+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
42617+#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
42618+#define __PAGE_KERNEL \
42619+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
42620+#define __PAGE_KERNEL_EXEC \
42621+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
42622+#define __PAGE_KERNEL_NOCACHE \
42623+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
42624+#define __PAGE_KERNEL_RO \
42625+ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
42626+#define __PAGE_KERNEL_VSYSCALL \
42627+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
42628+#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
42629+ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
42630+#define __PAGE_KERNEL_LARGE \
42631+ (__PAGE_KERNEL | _PAGE_PSE)
42632+#define __PAGE_KERNEL_LARGE_EXEC \
42633+ (__PAGE_KERNEL_EXEC | _PAGE_PSE)
42634+
42635+/*
42636+ * We don't support GLOBAL page in xenolinux64
42637+ */
42638+#define MAKE_GLOBAL(x) __pgprot((x))
42639+
42640+#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
42641+#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
42642+#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
42643+#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
42644+#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
42645+#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
42646+#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
42647+#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
42648+
42649+/* xwr */
42650+#define __P000 PAGE_NONE
42651+#define __P001 PAGE_READONLY
42652+#define __P010 PAGE_COPY
42653+#define __P011 PAGE_COPY
42654+#define __P100 PAGE_READONLY_EXEC
42655+#define __P101 PAGE_READONLY_EXEC
42656+#define __P110 PAGE_COPY_EXEC
42657+#define __P111 PAGE_COPY_EXEC
42658+
42659+#define __S000 PAGE_NONE
42660+#define __S001 PAGE_READONLY
42661+#define __S010 PAGE_SHARED
42662+#define __S011 PAGE_SHARED
42663+#define __S100 PAGE_READONLY_EXEC
42664+#define __S101 PAGE_READONLY_EXEC
42665+#define __S110 PAGE_SHARED_EXEC
42666+#define __S111 PAGE_SHARED_EXEC
42667+
42668+static inline unsigned long pgd_bad(pgd_t pgd)
42669+{
42670+ unsigned long val = __pgd_val(pgd);
42671+ val &= ~PTE_MASK;
42672+ val &= ~(_PAGE_USER | _PAGE_DIRTY);
42673+ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
42674+}
42675+
42676+static inline unsigned long pud_bad(pud_t pud)
42677+{
42678+ unsigned long val = __pud_val(pud);
42679+ val &= ~PTE_MASK;
42680+ val &= ~(_PAGE_USER | _PAGE_DIRTY);
42681+ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
42682+}
42683+
42684+#define set_pte_at(_mm,addr,ptep,pteval) do { \
42685+ if (((_mm) != current->mm && (_mm) != &init_mm) || \
42686+ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
42687+ set_pte((ptep), (pteval)); \
42688+} while (0)
42689+
42690+#define pte_none(x) (!(x).pte)
42691+#define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
42692+#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
42693+
42694+#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
42695+
42696+#define __pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
42697+#define pte_mfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
42698+ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
42699+#define pte_pfn(_pte) ((_pte).pte & _PAGE_IO ? end_pfn : \
42700+ (_pte).pte & _PAGE_PRESENT ? \
42701+ mfn_to_local_pfn(__pte_mfn(_pte)) : \
42702+ __pte_mfn(_pte))
42703+
42704+#define pte_page(x) pfn_to_page(pte_pfn(x))
42705+
42706+static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
42707+{
42708+ unsigned long pte = page_nr << PAGE_SHIFT;
42709+ pte |= pgprot_val(pgprot);
42710+ pte &= __supported_pte_mask;
42711+ return __pte(pte);
42712+}
42713+
42714+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
42715+{
42716+ pte_t pte = *ptep;
42717+ if (!pte_none(pte)) {
42718+ if ((mm != &init_mm) ||
42719+ HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
42720+ pte = __pte_ma(xchg(&ptep->pte, 0));
42721+ }
42722+ return pte;
42723+}
42724+
42725+static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
42726+{
42727+ if (full) {
42728+ pte_t pte = *ptep;
42729+ if (mm->context.pinned)
42730+ xen_l1_entry_update(ptep, __pte(0));
42731+ else
42732+ *ptep = __pte(0);
42733+ return pte;
42734+ }
42735+ return ptep_get_and_clear(mm, addr, ptep);
42736+}
42737+
42738+#define ptep_clear_flush(vma, addr, ptep) \
42739+({ \
42740+ pte_t *__ptep = (ptep); \
42741+ pte_t __res = *__ptep; \
42742+ if (!pte_none(__res) && \
42743+ ((vma)->vm_mm != current->mm || \
42744+ HYPERVISOR_update_va_mapping(addr, __pte(0), \
42745+ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
42746+ UVMF_INVLPG|UVMF_MULTI))) { \
42747+ __ptep->pte = 0; \
42748+ flush_tlb_page(vma, addr); \
42749+ } \
42750+ __res; \
42751+})
42752+
42753+/*
42754+ * The following only work if pte_present() is true.
42755+ * Undefined behaviour if not..
42756+ */
42757+#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
42758+static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
42759+static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
42760+static inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
42761+static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
42762+static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
42763+static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
42764+static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
42765+static inline int pte_huge(pte_t pte) { return __pte_val(pte) & _PAGE_PSE; }
42766+
42767+static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
42768+static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
42769+static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
42770+static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
42771+static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
42772+static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
42773+static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
42774+static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
42775+static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
42776+static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
42777+static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
42778+
42779+#define ptep_test_and_clear_dirty(vma, addr, ptep) \
42780+({ \
42781+ pte_t __pte = *(ptep); \
42782+ int __ret = pte_dirty(__pte); \
42783+ if (__ret) \
42784+ set_pte_at((vma)->vm_mm, addr, ptep, pte_mkclean(__pte)); \
42785+ __ret; \
42786+})
42787+
42788+#define ptep_test_and_clear_young(vma, addr, ptep) \
42789+({ \
42790+ pte_t __pte = *(ptep); \
42791+ int __ret = pte_young(__pte); \
42792+ if (__ret) \
42793+ set_pte_at((vma)->vm_mm, addr, ptep, pte_mkold(__pte)); \
42794+ __ret; \
42795+})
42796+
42797+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
42798+{
42799+ pte_t pte = *ptep;
42800+ if (pte_write(pte))
42801+ set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
42802+}
42803+
42804+/*
42805+ * Macro to mark a page protection value as "uncacheable".
42806+ */
42807+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
42808+
42809+static inline int pmd_large(pmd_t pte) {
42810+ return (__pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
42811+}
42812+
42813+
42814+/*
42815+ * Conversion functions: convert a page and protection to a page entry,
42816+ * and a page entry and page directory to the page they refer to.
42817+ */
42818+
42819+/*
42820+ * Level 4 access.
42821+ * Never use these in the common code.
42822+ */
42823+#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
42824+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
42825+#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
42826+#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
42827+#define pgd_present(pgd) (__pgd_val(pgd) & _PAGE_PRESENT)
42828+#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
42829+
42830+/* PUD - Level3 access */
42831+/* to find an entry in a page-table-directory. */
42832+#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
42833+#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
42834+#define pud_present(pud) (__pud_val(pud) & _PAGE_PRESENT)
42835+
42836+/* PMD - Level 2 access */
42837+#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
42838+#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
42839+
42840+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
42841+#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
42842+ pmd_index(address))
42843+#define pmd_none(x) (!__pmd_val(x))
42844+#if CONFIG_XEN_COMPAT <= 0x030002
42845+/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
42846+ can temporarily clear it. */
42847+#define pmd_present(x) (__pmd_val(x))
42848+#else
42849+#define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
42850+#endif
42851+#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
42852+#define pmd_bad(x) ((__pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
42853+ != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
42854+#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
42855+#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
42856+
42857+#define pte_to_pgoff(pte) ((__pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
42858+#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
42859+#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
42860+
42861+/* PTE - Level 1 access. */
42862+
42863+/* page, protection -> pte */
42864+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
42865+#define mk_pte_huge(entry) (__pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
42866+
42867+/* physical address -> PTE */
42868+static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
42869+{
42870+ unsigned long pteval;
42871+ pteval = physpage | pgprot_val(pgprot);
42872+ return __pte(pteval);
42873+}
42874+
42875+/* Change flags of a PTE */
42876+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
42877+{
42878+ /*
42879+ * Since this might change the present bit (which controls whether
42880+ * a pte_t object has undergone p2m translation), we must use
42881+ * pte_val() on the input pte and __pte() for the return value.
42882+ */
42883+ unsigned long pteval = pte_val(pte);
42884+
42885+ pteval &= _PAGE_CHG_MASK;
42886+ pteval |= pgprot_val(newprot);
42887+ pteval &= __supported_pte_mask;
42888+ return __pte(pteval);
42889+}
42890+
42891+#define pte_index(address) \
42892+ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
42893+#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
42894+ pte_index(address))
42895+
42896+/* x86-64 always has all page tables mapped. */
42897+#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
42898+#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
42899+#define pte_unmap(pte) /* NOP */
42900+#define pte_unmap_nested(pte) /* NOP */
42901+
42902+#define update_mmu_cache(vma,address,pte) do { } while (0)
42903+
42904+/*
42905+ * Rules for using ptep_establish: the pte MUST be a user pte, and
42906+ * must be a present->present transition.
42907+ */
42908+#define __HAVE_ARCH_PTEP_ESTABLISH
42909+#define ptep_establish(vma, address, ptep, pteval) \
42910+ do { \
42911+ if ( likely((vma)->vm_mm == current->mm) ) { \
42912+ BUG_ON(HYPERVISOR_update_va_mapping(address, \
42913+ pteval, \
42914+ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
42915+ UVMF_INVLPG|UVMF_MULTI)); \
42916+ } else { \
42917+ xen_l1_entry_update(ptep, pteval); \
42918+ flush_tlb_page(vma, address); \
42919+ } \
42920+ } while (0)
42921+
42922+/* We only update the dirty/accessed state if we set
42923+ * the dirty bit by hand in the kernel, since the hardware
42924+ * will do the accessed bit for us, and we don't want to
42925+ * race with other CPU's that might be updating the dirty
42926+ * bit at the same time. */
42927+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
42928+#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
42929+ do { \
42930+ if (dirty) \
42931+ ptep_establish(vma, address, ptep, entry); \
42932+ } while (0)
42933+
42934+/* Encode and de-code a swap entry */
42935+#define __swp_type(x) (((x).val >> 1) & 0x3f)
42936+#define __swp_offset(x) ((x).val >> 8)
42937+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
42938+#define __pte_to_swp_entry(pte) ((swp_entry_t) { __pte_val(pte) })
42939+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
42940+
42941+extern spinlock_t pgd_lock;
42942+extern struct page *pgd_list;
42943+void vmalloc_sync_all(void);
42944+
42945+#endif /* !__ASSEMBLY__ */
42946+
42947+extern int kern_addr_valid(unsigned long addr);
42948+
42949+#define DOMID_LOCAL (0xFFFFU)
42950+
42951+struct vm_area_struct;
42952+
42953+int direct_remap_pfn_range(struct vm_area_struct *vma,
42954+ unsigned long address,
42955+ unsigned long mfn,
42956+ unsigned long size,
42957+ pgprot_t prot,
42958+ domid_t domid);
42959+
42960+int direct_kernel_remap_pfn_range(unsigned long address,
42961+ unsigned long mfn,
42962+ unsigned long size,
42963+ pgprot_t prot,
42964+ domid_t domid);
42965+
42966+int create_lookup_pte_addr(struct mm_struct *mm,
42967+ unsigned long address,
42968+ uint64_t *ptep);
42969+
42970+int touch_pte_range(struct mm_struct *mm,
42971+ unsigned long address,
42972+ unsigned long size);
42973+
42974+int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
42975+ unsigned long addr, unsigned long end, pgprot_t newprot);
42976+
42977+#define arch_change_pte_range(mm, pmd, addr, end, newprot) \
42978+ xen_change_pte_range(mm, pmd, addr, end, newprot)
42979+
42980+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
42981+ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
42982+
42983+#define MK_IOSPACE_PFN(space, pfn) (pfn)
42984+#define GET_IOSPACE(pfn) 0
42985+#define GET_PFN(pfn) (pfn)
42986+
42987+#define HAVE_ARCH_UNMAPPED_AREA
42988+
42989+#define pgtable_cache_init() do { } while (0)
42990+#define check_pgt_cache() do { } while (0)
42991+
42992+#define PAGE_AGP PAGE_KERNEL_NOCACHE
42993+#define HAVE_PAGE_AGP 1
42994+
42995+/* fs/proc/kcore.c */
42996+#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
42997+#define kc_offset_to_vaddr(o) \
42998+ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
42999+
43000+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
43001+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
43002+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
43003+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
43004+#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
43005+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
43006+#define __HAVE_ARCH_PTE_SAME
43007+#include <asm-generic/pgtable.h>
43008+
43009+#endif /* _X86_64_PGTABLE_H */
43010Index: head-2008-11-25/include/asm-x86/mach-xen/asm/processor_64.h
43011===================================================================
43012--- /dev/null 1970-01-01 00:00:00.000000000 +0000
43013+++ head-2008-11-25/include/asm-x86/mach-xen/asm/processor_64.h 2008-03-06 08:54:32.000000000 +0100
43014@@ -0,0 +1,502 @@
43015+/*
43016+ * include/asm-x86_64/processor.h
43017+ *
43018+ * Copyright (C) 1994 Linus Torvalds
43019+ */
43020+
43021+#ifndef __ASM_X86_64_PROCESSOR_H
43022+#define __ASM_X86_64_PROCESSOR_H
43023+
43024+#include <asm/segment.h>
43025+#include <asm/page.h>
43026+#include <asm/types.h>
43027+#include <asm/sigcontext.h>
43028+#include <asm/cpufeature.h>
43029+#include <linux/threads.h>
43030+#include <asm/msr.h>
43031+#include <asm/current.h>
43032+#include <asm/system.h>
43033+#include <asm/mmsegment.h>
43034+#include <asm/percpu.h>
43035+#include <linux/personality.h>
43036+#include <linux/cpumask.h>
43037+
43038+#define TF_MASK 0x00000100
43039+#define IF_MASK 0x00000200
43040+#define IOPL_MASK 0x00003000
43041+#define NT_MASK 0x00004000
43042+#define VM_MASK 0x00020000
43043+#define AC_MASK 0x00040000
43044+#define VIF_MASK 0x00080000 /* virtual interrupt flag */
43045+#define VIP_MASK 0x00100000 /* virtual interrupt pending */
43046+#define ID_MASK 0x00200000
43047+
43048+#define desc_empty(desc) \
43049+ (!((desc)->a | (desc)->b))
43050+
43051+#define desc_equal(desc1, desc2) \
43052+ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
43053+
43054+/*
43055+ * Default implementation of macro that returns current
43056+ * instruction pointer ("program counter").
43057+ */
43058+#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
43059+
43060+/*
43061+ * CPU type and hardware bug flags. Kept separately for each CPU.
43062+ */
43063+
43064+struct cpuinfo_x86 {
43065+ __u8 x86; /* CPU family */
43066+ __u8 x86_vendor; /* CPU vendor */
43067+ __u8 x86_model;
43068+ __u8 x86_mask;
43069+ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
43070+ __u32 x86_capability[NCAPINTS];
43071+ char x86_vendor_id[16];
43072+ char x86_model_id[64];
43073+ int x86_cache_size; /* in KB */
43074+ int x86_clflush_size;
43075+ int x86_cache_alignment;
43076+ int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
43077+ __u8 x86_virt_bits, x86_phys_bits;
43078+ __u8 x86_max_cores; /* cpuid returned max cores value */
43079+ __u32 x86_power;
43080+ __u32 extended_cpuid_level; /* Max extended CPUID function supported */
43081+ unsigned long loops_per_jiffy;
43082+#ifdef CONFIG_SMP
43083+ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
43084+#endif
43085+ __u8 apicid;
43086+#ifdef CONFIG_SMP
43087+ __u8 booted_cores; /* number of cores as seen by OS */
43088+ __u8 phys_proc_id; /* Physical Processor id. */
43089+ __u8 cpu_core_id; /* Core id. */
43090+#endif
43091+} ____cacheline_aligned;
43092+
43093+#define X86_VENDOR_INTEL 0
43094+#define X86_VENDOR_CYRIX 1
43095+#define X86_VENDOR_AMD 2
43096+#define X86_VENDOR_UMC 3
43097+#define X86_VENDOR_NEXGEN 4
43098+#define X86_VENDOR_CENTAUR 5
43099+#define X86_VENDOR_RISE 6
43100+#define X86_VENDOR_TRANSMETA 7
43101+#define X86_VENDOR_NUM 8
43102+#define X86_VENDOR_UNKNOWN 0xff
43103+
43104+#ifdef CONFIG_SMP
43105+extern struct cpuinfo_x86 cpu_data[];
43106+#define current_cpu_data cpu_data[smp_processor_id()]
43107+#else
43108+#define cpu_data (&boot_cpu_data)
43109+#define current_cpu_data boot_cpu_data
43110+#endif
43111+
43112+extern char ignore_irq13;
43113+
43114+extern void identify_cpu(struct cpuinfo_x86 *);
43115+extern void print_cpu_info(struct cpuinfo_x86 *);
43116+extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
43117+extern unsigned short num_cache_leaves;
43118+
43119+/*
43120+ * EFLAGS bits
43121+ */
43122+#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
43123+#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
43124+#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
43125+#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
43126+#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
43127+#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
43128+#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
43129+#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
43130+#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
43131+#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
43132+#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
43133+#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
43134+#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
43135+#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
43136+#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
43137+#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
43138+#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
43139+
43140+/*
43141+ * Intel CPU features in CR4
43142+ */
43143+#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
43144+#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
43145+#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
43146+#define X86_CR4_DE 0x0008 /* enable debugging extensions */
43147+#define X86_CR4_PSE 0x0010 /* enable page size extensions */
43148+#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
43149+#define X86_CR4_MCE 0x0040 /* Machine check enable */
43150+#define X86_CR4_PGE 0x0080 /* enable global pages */
43151+#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
43152+#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
43153+#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
43154+
43155+/*
43156+ * Save the cr4 feature set we're using (ie
43157+ * Pentium 4MB enable and PPro Global page
43158+ * enable), so that any CPU's that boot up
43159+ * after us can get the correct flags.
43160+ */
43161+extern unsigned long mmu_cr4_features;
43162+
43163+static inline void set_in_cr4 (unsigned long mask)
43164+{
43165+ mmu_cr4_features |= mask;
43166+ __asm__("movq %%cr4,%%rax\n\t"
43167+ "orq %0,%%rax\n\t"
43168+ "movq %%rax,%%cr4\n"
43169+ : : "irg" (mask)
43170+ :"ax");
43171+}
43172+
43173+static inline void clear_in_cr4 (unsigned long mask)
43174+{
43175+ mmu_cr4_features &= ~mask;
43176+ __asm__("movq %%cr4,%%rax\n\t"
43177+ "andq %0,%%rax\n\t"
43178+ "movq %%rax,%%cr4\n"
43179+ : : "irg" (~mask)
43180+ :"ax");
43181+}
43182+
43183+
43184+/*
43185+ * User space process size. 47bits minus one guard page.
43186+ */
43187+#define TASK_SIZE64 (0x800000000000UL - 4096)
43188+
43189+/* This decides where the kernel will search for a free chunk of vm
43190+ * space during mmap's.
43191+ */
43192+#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
43193+
43194+#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
43195+#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
43196+
43197+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
43198+
43199+/*
43200+ * Size of io_bitmap.
43201+ */
43202+#define IO_BITMAP_BITS 65536
43203+#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
43204+#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
43205+#ifndef CONFIG_X86_NO_TSS
43206+#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
43207+#endif
43208+#define INVALID_IO_BITMAP_OFFSET 0x8000
43209+
43210+struct i387_fxsave_struct {
43211+ u16 cwd;
43212+ u16 swd;
43213+ u16 twd;
43214+ u16 fop;
43215+ u64 rip;
43216+ u64 rdp;
43217+ u32 mxcsr;
43218+ u32 mxcsr_mask;
43219+ u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
43220+ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
43221+ u32 padding[24];
43222+} __attribute__ ((aligned (16)));
43223+
43224+union i387_union {
43225+ struct i387_fxsave_struct fxsave;
43226+};
43227+
43228+#ifndef CONFIG_X86_NO_TSS
43229+struct tss_struct {
43230+ u32 reserved1;
43231+ u64 rsp0;
43232+ u64 rsp1;
43233+ u64 rsp2;
43234+ u64 reserved2;
43235+ u64 ist[7];
43236+ u32 reserved3;
43237+ u32 reserved4;
43238+ u16 reserved5;
43239+ u16 io_bitmap_base;
43240+ /*
43241+ * The extra 1 is there because the CPU will access an
43242+ * additional byte beyond the end of the IO permission
43243+ * bitmap. The extra byte must be all 1 bits, and must
43244+ * be within the limit. Thus we have:
43245+ *
43246+ * 128 bytes, the bitmap itself, for ports 0..0x3ff
43247+ * 8 bytes, for an extra "long" of ~0UL
43248+ */
43249+ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
43250+} __attribute__((packed)) ____cacheline_aligned;
43251+
43252+DECLARE_PER_CPU(struct tss_struct,init_tss);
43253+#endif
43254+
43255+
43256+extern struct cpuinfo_x86 boot_cpu_data;
43257+#ifndef CONFIG_X86_NO_TSS
43258+/* Save the original ist values for checking stack pointers during debugging */
43259+struct orig_ist {
43260+ unsigned long ist[7];
43261+};
43262+DECLARE_PER_CPU(struct orig_ist, orig_ist);
43263+#endif
43264+
43265+#ifdef CONFIG_X86_VSMP
43266+#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
43267+#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
43268+#else
43269+#define ARCH_MIN_TASKALIGN 16
43270+#define ARCH_MIN_MMSTRUCT_ALIGN 0
43271+#endif
43272+
43273+struct thread_struct {
43274+ unsigned long rsp0;
43275+ unsigned long rsp;
43276+ unsigned long userrsp; /* Copy from PDA */
43277+ unsigned long fs;
43278+ unsigned long gs;
43279+ unsigned short es, ds, fsindex, gsindex;
43280+/* Hardware debugging registers */
43281+ unsigned long debugreg0;
43282+ unsigned long debugreg1;
43283+ unsigned long debugreg2;
43284+ unsigned long debugreg3;
43285+ unsigned long debugreg6;
43286+ unsigned long debugreg7;
43287+/* fault info */
43288+ unsigned long cr2, trap_no, error_code;
43289+/* floating point info */
43290+ union i387_union i387 __attribute__((aligned(16)));
43291+/* IO permissions. the bitmap could be moved into the GDT, that would make
43292+ switch faster for a limited number of ioperm using tasks. -AK */
43293+ int ioperm;
43294+ unsigned long *io_bitmap_ptr;
43295+ unsigned io_bitmap_max;
43296+/* cached TLS descriptors. */
43297+ u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
43298+ unsigned int iopl;
43299+} __attribute__((aligned(16)));
43300+
43301+#define INIT_THREAD { \
43302+ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
43303+}
43304+
43305+#ifndef CONFIG_X86_NO_TSS
43306+#define INIT_TSS { \
43307+ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
43308+}
43309+#endif
43310+
43311+#define INIT_MMAP \
43312+{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
43313+
43314+#define start_thread(regs,new_rip,new_rsp) do { \
43315+ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
43316+ load_gs_index(0); \
43317+ (regs)->rip = (new_rip); \
43318+ (regs)->rsp = (new_rsp); \
43319+ write_pda(oldrsp, (new_rsp)); \
43320+ (regs)->cs = __USER_CS; \
43321+ (regs)->ss = __USER_DS; \
43322+ (regs)->eflags = 0x200; \
43323+ set_fs(USER_DS); \
43324+} while(0)
43325+
43326+#define get_debugreg(var, register) \
43327+ var = HYPERVISOR_get_debugreg(register)
43328+#define set_debugreg(value, register) do { \
43329+ if (HYPERVISOR_set_debugreg(register, value)) \
43330+ BUG(); \
43331+} while (0)
43332+
43333+struct task_struct;
43334+struct mm_struct;
43335+
43336+/* Free all resources held by a thread. */
43337+extern void release_thread(struct task_struct *);
43338+
43339+/* Prepare to copy thread state - unlazy all lazy status */
43340+extern void prepare_to_copy(struct task_struct *tsk);
43341+
43342+/*
43343+ * create a kernel thread without removing it from tasklists
43344+ */
43345+extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
43346+
43347+/*
43348+ * Return saved PC of a blocked thread.
43349+ * What is this good for? it will be always the scheduler or ret_from_fork.
43350+ */
43351+#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
43352+
43353+extern unsigned long get_wchan(struct task_struct *p);
43354+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
43355+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
43356+#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
43357+
43358+
43359+struct microcode_header {
43360+ unsigned int hdrver;
43361+ unsigned int rev;
43362+ unsigned int date;
43363+ unsigned int sig;
43364+ unsigned int cksum;
43365+ unsigned int ldrver;
43366+ unsigned int pf;
43367+ unsigned int datasize;
43368+ unsigned int totalsize;
43369+ unsigned int reserved[3];
43370+};
43371+
43372+struct microcode {
43373+ struct microcode_header hdr;
43374+ unsigned int bits[0];
43375+};
43376+
43377+typedef struct microcode microcode_t;
43378+typedef struct microcode_header microcode_header_t;
43379+
43380+/* microcode format is extended from prescott processors */
43381+struct extended_signature {
43382+ unsigned int sig;
43383+ unsigned int pf;
43384+ unsigned int cksum;
43385+};
43386+
43387+struct extended_sigtable {
43388+ unsigned int count;
43389+ unsigned int cksum;
43390+ unsigned int reserved[3];
43391+ struct extended_signature sigs[0];
43392+};
43393+
43394+
43395+#define ASM_NOP1 K8_NOP1
43396+#define ASM_NOP2 K8_NOP2
43397+#define ASM_NOP3 K8_NOP3
43398+#define ASM_NOP4 K8_NOP4
43399+#define ASM_NOP5 K8_NOP5
43400+#define ASM_NOP6 K8_NOP6
43401+#define ASM_NOP7 K8_NOP7
43402+#define ASM_NOP8 K8_NOP8
43403+
43404+/* Opteron nops */
43405+#define K8_NOP1 ".byte 0x90\n"
43406+#define K8_NOP2 ".byte 0x66,0x90\n"
43407+#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
43408+#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
43409+#define K8_NOP5 K8_NOP3 K8_NOP2
43410+#define K8_NOP6 K8_NOP3 K8_NOP3
43411+#define K8_NOP7 K8_NOP4 K8_NOP3
43412+#define K8_NOP8 K8_NOP4 K8_NOP4
43413+
43414+#define ASM_NOP_MAX 8
43415+
43416+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
43417+static inline void rep_nop(void)
43418+{
43419+ __asm__ __volatile__("rep;nop": : :"memory");
43420+}
43421+
43422+/* Stop speculative execution */
43423+static inline void sync_core(void)
43424+{
43425+ int tmp;
43426+ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
43427+}
43428+
43429+#define cpu_has_fpu 1
43430+
43431+#define ARCH_HAS_PREFETCH
43432+static inline void prefetch(void *x)
43433+{
43434+ asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
43435+}
43436+
43437+#define ARCH_HAS_PREFETCHW 1
43438+static inline void prefetchw(void *x)
43439+{
43440+ alternative_input("prefetcht0 (%1)",
43441+ "prefetchw (%1)",
43442+ X86_FEATURE_3DNOW,
43443+ "r" (x));
43444+}
43445+
43446+#define ARCH_HAS_SPINLOCK_PREFETCH 1
43447+
43448+#define spin_lock_prefetch(x) prefetchw(x)
43449+
43450+#define cpu_relax() rep_nop()
43451+
43452+/*
43453+ * NSC/Cyrix CPU configuration register indexes
43454+ */
43455+#define CX86_CCR0 0xc0
43456+#define CX86_CCR1 0xc1
43457+#define CX86_CCR2 0xc2
43458+#define CX86_CCR3 0xc3
43459+#define CX86_CCR4 0xe8
43460+#define CX86_CCR5 0xe9
43461+#define CX86_CCR6 0xea
43462+#define CX86_CCR7 0xeb
43463+#define CX86_DIR0 0xfe
43464+#define CX86_DIR1 0xff
43465+#define CX86_ARR_BASE 0xc4
43466+#define CX86_RCR_BASE 0xdc
43467+
43468+/*
43469+ * NSC/Cyrix CPU indexed register access macros
43470+ */
43471+
43472+#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
43473+
43474+#define setCx86(reg, data) do { \
43475+ outb((reg), 0x22); \
43476+ outb((data), 0x23); \
43477+} while (0)
43478+
43479+static inline void serialize_cpu(void)
43480+{
43481+ __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
43482+}
43483+
43484+static inline void __monitor(const void *eax, unsigned long ecx,
43485+ unsigned long edx)
43486+{
43487+ /* "monitor %eax,%ecx,%edx;" */
43488+ asm volatile(
43489+ ".byte 0x0f,0x01,0xc8;"
43490+ : :"a" (eax), "c" (ecx), "d"(edx));
43491+}
43492+
43493+static inline void __mwait(unsigned long eax, unsigned long ecx)
43494+{
43495+ /* "mwait %eax,%ecx;" */
43496+ asm volatile(
43497+ ".byte 0x0f,0x01,0xc9;"
43498+ : :"a" (eax), "c" (ecx));
43499+}
43500+
43501+#define stack_current() \
43502+({ \
43503+ struct thread_info *ti; \
43504+ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
43505+ ti->task; \
43506+})
43507+
43508+#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
43509+
43510+extern unsigned long boot_option_idle_override;
43511+/* Boot loader type from the setup header */
43512+extern int bootloader_type;
43513+
43514+#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
43515+
43516+#endif /* __ASM_X86_64_PROCESSOR_H */
43517Index: head-2008-11-25/include/asm-x86/mach-xen/asm/smp_64.h
43518===================================================================
43519--- /dev/null 1970-01-01 00:00:00.000000000 +0000
43520+++ head-2008-11-25/include/asm-x86/mach-xen/asm/smp_64.h 2007-06-12 13:14:13.000000000 +0200
43521@@ -0,0 +1,150 @@
43522+#ifndef __ASM_SMP_H
43523+#define __ASM_SMP_H
43524+
43525+/*
43526+ * We need the APIC definitions automatically as part of 'smp.h'
43527+ */
43528+#ifndef __ASSEMBLY__
43529+#include <linux/threads.h>
43530+#include <linux/cpumask.h>
43531+#include <linux/bitops.h>
43532+extern int disable_apic;
43533+#endif
43534+
43535+#ifdef CONFIG_X86_LOCAL_APIC
43536+#ifndef __ASSEMBLY__
43537+#include <asm/fixmap.h>
43538+#include <asm/mpspec.h>
43539+#ifdef CONFIG_X86_IO_APIC
43540+#include <asm/io_apic.h>
43541+#endif
43542+#include <asm/apic.h>
43543+#include <asm/thread_info.h>
43544+#endif
43545+#endif
43546+
43547+#ifdef CONFIG_SMP
43548+#ifndef ASSEMBLY
43549+
43550+#include <asm/pda.h>
43551+
43552+struct pt_regs;
43553+
43554+extern cpumask_t cpu_present_mask;
43555+extern cpumask_t cpu_possible_map;
43556+extern cpumask_t cpu_online_map;
43557+extern cpumask_t cpu_initialized;
43558+
43559+/*
43560+ * Private routines/data
43561+ */
43562+
43563+extern void smp_alloc_memory(void);
43564+extern volatile unsigned long smp_invalidate_needed;
43565+extern int pic_mode;
43566+extern void lock_ipi_call_lock(void);
43567+extern void unlock_ipi_call_lock(void);
43568+extern int smp_num_siblings;
43569+extern void smp_send_reschedule(int cpu);
43570+void smp_stop_cpu(void);
43571+extern int smp_call_function_single(int cpuid, void (*func) (void *info),
43572+ void *info, int retry, int wait);
43573+
43574+extern cpumask_t cpu_sibling_map[NR_CPUS];
43575+extern cpumask_t cpu_core_map[NR_CPUS];
43576+extern u8 cpu_llc_id[NR_CPUS];
43577+
43578+#define SMP_TRAMPOLINE_BASE 0x6000
43579+
43580+/*
43581+ * On x86 all CPUs are mapped 1:1 to the APIC space.
43582+ * This simplifies scheduling and IPI sending and
43583+ * compresses data structures.
43584+ */
43585+
43586+static inline int num_booting_cpus(void)
43587+{
43588+ return cpus_weight(cpu_possible_map);
43589+}
43590+
43591+#define raw_smp_processor_id() read_pda(cpunumber)
43592+
43593+#ifdef CONFIG_X86_LOCAL_APIC
43594+static inline int hard_smp_processor_id(void)
43595+{
43596+ /* we don't want to mark this access volatile - bad code generation */
43597+ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
43598+}
43599+#endif
43600+
43601+extern int safe_smp_processor_id(void);
43602+extern int __cpu_disable(void);
43603+extern void __cpu_die(unsigned int cpu);
43604+extern void prefill_possible_map(void);
43605+extern unsigned num_processors;
43606+extern unsigned disabled_cpus;
43607+
43608+#endif /* !ASSEMBLY */
43609+
43610+#define NO_PROC_ID 0xFF /* No processor magic marker */
43611+
43612+#endif
43613+
43614+#ifndef ASSEMBLY
43615+/*
43616+ * Some lowlevel functions might want to know about
43617+ * the real APIC ID <-> CPU # mapping.
43618+ */
43619+extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
43620+extern u8 x86_cpu_to_log_apicid[NR_CPUS];
43621+extern u8 bios_cpu_apicid[];
43622+
43623+#ifdef CONFIG_X86_LOCAL_APIC
43624+static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
43625+{
43626+ return cpus_addr(cpumask)[0];
43627+}
43628+
43629+static inline int cpu_present_to_apicid(int mps_cpu)
43630+{
43631+ if (mps_cpu < NR_CPUS)
43632+ return (int)bios_cpu_apicid[mps_cpu];
43633+ else
43634+ return BAD_APICID;
43635+}
43636+#endif
43637+
43638+#endif /* !ASSEMBLY */
43639+
43640+#ifndef CONFIG_SMP
43641+#define stack_smp_processor_id() 0
43642+#define safe_smp_processor_id() 0
43643+#define cpu_logical_map(x) (x)
43644+#else
43645+#include <asm/thread_info.h>
43646+#define stack_smp_processor_id() \
43647+({ \
43648+ struct thread_info *ti; \
43649+ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
43650+ ti->cpu; \
43651+})
43652+#endif
43653+
43654+#ifndef __ASSEMBLY__
43655+#ifdef CONFIG_X86_LOCAL_APIC
43656+static __inline int logical_smp_processor_id(void)
43657+{
43658+ /* we don't want to mark this access volatile - bad code generation */
43659+ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
43660+}
43661+#endif
43662+#endif
43663+
43664+#ifdef CONFIG_SMP
43665+#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
43666+#else
43667+#define cpu_physical_id(cpu) boot_cpu_id
43668+#endif
43669+
43670+#endif
43671+
43672Index: head-2008-11-25/include/asm-x86/mach-xen/asm/system_64.h
43673===================================================================
43674--- /dev/null 1970-01-01 00:00:00.000000000 +0000
43675+++ head-2008-11-25/include/asm-x86/mach-xen/asm/system_64.h 2007-11-26 16:59:25.000000000 +0100
43676@@ -0,0 +1,256 @@
43677+#ifndef __ASM_SYSTEM_H
43678+#define __ASM_SYSTEM_H
43679+
43680+#include <linux/kernel.h>
43681+#include <asm/segment.h>
43682+#include <asm/alternative.h>
43683+
43684+#include <asm/synch_bitops.h>
43685+#include <asm/hypervisor.h>
43686+#include <xen/interface/arch-x86_64.h>
43687+
43688+#ifdef __KERNEL__
43689+
43690+#define __STR(x) #x
43691+#define STR(x) __STR(x)
43692+
43693+#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
43694+#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
43695+
43696+/* frame pointer must be last for get_wchan */
43697+#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
43698+#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\n\t"
43699+
43700+#define __EXTRA_CLOBBER \
43701+ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
43702+
43703+#define switch_to(prev,next,last) \
43704+ asm volatile(SAVE_CONTEXT \
43705+ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
43706+ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
43707+ "call __switch_to\n\t" \
43708+ ".globl thread_return\n" \
43709+ "thread_return:\n\t" \
43710+ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
43711+ "movq %P[thread_info](%%rsi),%%r8\n\t" \
43712+ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
43713+ "movq %%rax,%%rdi\n\t" \
43714+ "jc ret_from_fork\n\t" \
43715+ RESTORE_CONTEXT \
43716+ : "=a" (last) \
43717+ : [next] "S" (next), [prev] "D" (prev), \
43718+ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
43719+ [ti_flags] "i" (offsetof(struct thread_info, flags)),\
43720+ [tif_fork] "i" (TIF_FORK), \
43721+ [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
43722+ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
43723+ : "memory", "cc" __EXTRA_CLOBBER)
43724+
43725+extern void load_gs_index(unsigned);
43726+
43727+/*
43728+ * Load a segment. Fall back on loading the zero
43729+ * segment if something goes wrong..
43730+ */
43731+#define loadsegment(seg,value) \
43732+ asm volatile("\n" \
43733+ "1:\t" \
43734+ "movl %k0,%%" #seg "\n" \
43735+ "2:\n" \
43736+ ".section .fixup,\"ax\"\n" \
43737+ "3:\t" \
43738+ "movl %1,%%" #seg "\n\t" \
43739+ "jmp 2b\n" \
43740+ ".previous\n" \
43741+ ".section __ex_table,\"a\"\n\t" \
43742+ ".align 8\n\t" \
43743+ ".quad 1b,3b\n" \
43744+ ".previous" \
43745+ : :"r" (value), "r" (0))
43746+
43747+/*
43748+ * Clear and set 'TS' bit respectively
43749+ */
43750+#define clts() (HYPERVISOR_fpu_taskswitch(0))
43751+
43752+static inline unsigned long read_cr0(void)
43753+{
43754+ unsigned long cr0;
43755+ asm volatile("movq %%cr0,%0" : "=r" (cr0));
43756+ return cr0;
43757+}
43758+
43759+static inline void write_cr0(unsigned long val)
43760+{
43761+ asm volatile("movq %0,%%cr0" :: "r" (val));
43762+}
43763+
43764+#define read_cr3() ({ \
43765+ unsigned long __dummy; \
43766+ asm("movq %%cr3,%0" : "=r" (__dummy)); \
43767+ machine_to_phys(__dummy); \
43768+})
43769+
43770+static inline unsigned long read_cr4(void)
43771+{
43772+ unsigned long cr4;
43773+ asm("movq %%cr4,%0" : "=r" (cr4));
43774+ return cr4;
43775+}
43776+
43777+static inline void write_cr4(unsigned long val)
43778+{
43779+ asm volatile("movq %0,%%cr4" :: "r" (val));
43780+}
43781+
43782+#define stts() (HYPERVISOR_fpu_taskswitch(1))
43783+
43784+#define wbinvd() \
43785+ __asm__ __volatile__ ("wbinvd": : :"memory");
43786+
43787+/*
43788+ * On SMP systems, when the scheduler does migration-cost autodetection,
43789+ * it needs a way to flush as much of the CPU's caches as possible.
43790+ */
43791+static inline void sched_cacheflush(void)
43792+{
43793+ wbinvd();
43794+}
43795+
43796+#endif /* __KERNEL__ */
43797+
43798+#define nop() __asm__ __volatile__ ("nop")
43799+
43800+#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
43801+
43802+#define tas(ptr) (xchg((ptr),1))
43803+
43804+#define __xg(x) ((volatile long *)(x))
43805+
43806+static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
43807+{
43808+ *ptr = val;
43809+}
43810+
43811+#define _set_64bit set_64bit
43812+
43813+/*
43814+ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
43815+ * Note 2: xchg has side effect, so that attribute volatile is necessary,
43816+ * but generally the primitive is invalid, *ptr is output argument. --ANK
43817+ */
43818+static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
43819+{
43820+ switch (size) {
43821+ case 1:
43822+ __asm__ __volatile__("xchgb %b0,%1"
43823+ :"=q" (x)
43824+ :"m" (*__xg(ptr)), "0" (x)
43825+ :"memory");
43826+ break;
43827+ case 2:
43828+ __asm__ __volatile__("xchgw %w0,%1"
43829+ :"=r" (x)
43830+ :"m" (*__xg(ptr)), "0" (x)
43831+ :"memory");
43832+ break;
43833+ case 4:
43834+ __asm__ __volatile__("xchgl %k0,%1"
43835+ :"=r" (x)
43836+ :"m" (*__xg(ptr)), "0" (x)
43837+ :"memory");
43838+ break;
43839+ case 8:
43840+ __asm__ __volatile__("xchgq %0,%1"
43841+ :"=r" (x)
43842+ :"m" (*__xg(ptr)), "0" (x)
43843+ :"memory");
43844+ break;
43845+ }
43846+ return x;
43847+}
43848+
43849+/*
43850+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
43851+ * store NEW in MEM. Return the initial value in MEM. Success is
43852+ * indicated by comparing RETURN with OLD.
43853+ */
43854+
43855+#define __HAVE_ARCH_CMPXCHG 1
43856+
43857+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
43858+ unsigned long new, int size)
43859+{
43860+ unsigned long prev;
43861+ switch (size) {
43862+ case 1:
43863+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
43864+ : "=a"(prev)
43865+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
43866+ : "memory");
43867+ return prev;
43868+ case 2:
43869+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
43870+ : "=a"(prev)
43871+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
43872+ : "memory");
43873+ return prev;
43874+ case 4:
43875+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
43876+ : "=a"(prev)
43877+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
43878+ : "memory");
43879+ return prev;
43880+ case 8:
43881+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
43882+ : "=a"(prev)
43883+ : "r"(new), "m"(*__xg(ptr)), "0"(old)
43884+ : "memory");
43885+ return prev;
43886+ }
43887+ return old;
43888+}
43889+
43890+#define cmpxchg(ptr,o,n)\
43891+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
43892+ (unsigned long)(n),sizeof(*(ptr))))
43893+
43894+#ifdef CONFIG_SMP
43895+#define smp_mb() mb()
43896+#define smp_rmb() rmb()
43897+#define smp_wmb() wmb()
43898+#define smp_read_barrier_depends() do {} while(0)
43899+#else
43900+#define smp_mb() barrier()
43901+#define smp_rmb() barrier()
43902+#define smp_wmb() barrier()
43903+#define smp_read_barrier_depends() do {} while(0)
43904+#endif
43905+
43906+
43907+/*
43908+ * Force strict CPU ordering.
43909+ * And yes, this is required on UP too when we're talking
43910+ * to devices.
43911+ */
43912+#define mb() asm volatile("mfence":::"memory")
43913+#define rmb() asm volatile("lfence":::"memory")
43914+
43915+#ifdef CONFIG_UNORDERED_IO
43916+#define wmb() asm volatile("sfence" ::: "memory")
43917+#else
43918+#define wmb() asm volatile("" ::: "memory")
43919+#endif
43920+#define read_barrier_depends() do {} while(0)
43921+#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
43922+
43923+#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
43924+
43925+#include <linux/irqflags.h>
43926+
43927+void cpu_idle_wait(void);
43928+
43929+extern unsigned long arch_align_stack(unsigned long sp);
43930+extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
43931+
43932+#endif
43933Index: head-2008-11-25/include/asm-x86/mach-xen/asm/tlbflush_64.h
43934===================================================================
43935--- /dev/null 1970-01-01 00:00:00.000000000 +0000
43936+++ head-2008-11-25/include/asm-x86/mach-xen/asm/tlbflush_64.h 2007-11-26 16:59:25.000000000 +0100
43937@@ -0,0 +1,103 @@
43938+#ifndef _X8664_TLBFLUSH_H
43939+#define _X8664_TLBFLUSH_H
43940+
43941+#include <linux/mm.h>
43942+#include <asm/processor.h>
43943+
43944+#define __flush_tlb() xen_tlb_flush()
43945+
43946+/*
43947+ * Global pages have to be flushed a bit differently. Not a real
43948+ * performance problem because this does not happen often.
43949+ */
43950+#define __flush_tlb_global() xen_tlb_flush()
43951+
43952+
43953+extern unsigned long pgkern_mask;
43954+
43955+#define __flush_tlb_all() __flush_tlb_global()
43956+
43957+#define __flush_tlb_one(addr) xen_invlpg((unsigned long)addr)
43958+
43959+
43960+/*
43961+ * TLB flushing:
43962+ *
43963+ * - flush_tlb() flushes the current mm struct TLBs
43964+ * - flush_tlb_all() flushes all processes TLBs
43965+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
43966+ * - flush_tlb_page(vma, vmaddr) flushes one page
43967+ * - flush_tlb_range(vma, start, end) flushes a range of pages
43968+ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
43969+ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
43970+ *
43971+ * x86-64 can only flush individual pages or full VMs. For a range flush
43972+ * we always do the full VM. Might be worth trying if for a small
43973+ * range a few INVLPGs in a row are a win.
43974+ */
43975+
43976+#ifndef CONFIG_SMP
43977+
43978+#define flush_tlb() __flush_tlb()
43979+#define flush_tlb_all() __flush_tlb_all()
43980+#define local_flush_tlb() __flush_tlb()
43981+
43982+static inline void flush_tlb_mm(struct mm_struct *mm)
43983+{
43984+ if (mm == current->active_mm)
43985+ __flush_tlb();
43986+}
43987+
43988+static inline void flush_tlb_page(struct vm_area_struct *vma,
43989+ unsigned long addr)
43990+{
43991+ if (vma->vm_mm == current->active_mm)
43992+ __flush_tlb_one(addr);
43993+}
43994+
43995+static inline void flush_tlb_range(struct vm_area_struct *vma,
43996+ unsigned long start, unsigned long end)
43997+{
43998+ if (vma->vm_mm == current->active_mm)
43999+ __flush_tlb();
44000+}
44001+
44002+#else
44003+
44004+#include <asm/smp.h>
44005+
44006+#define local_flush_tlb() \
44007+ __flush_tlb()
44008+
44009+#define flush_tlb_all xen_tlb_flush_all
44010+#define flush_tlb_current_task() xen_tlb_flush_mask(&current->mm->cpu_vm_mask)
44011+#define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask)
44012+#define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va)
44013+
44014+#define flush_tlb() flush_tlb_current_task()
44015+
44016+static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
44017+{
44018+ flush_tlb_mm(vma->vm_mm);
44019+}
44020+
44021+#define TLBSTATE_OK 1
44022+#define TLBSTATE_LAZY 2
44023+
44024+/* Roughly an IPI every 20MB with 4k pages for freeing page table
44025+ ranges. Cost is about 42k of memory for each CPU. */
44026+#define ARCH_FREE_PTE_NR 5350
44027+
44028+#endif
44029+
44030+#define flush_tlb_kernel_range(start, end) flush_tlb_all()
44031+
44032+static inline void flush_tlb_pgtables(struct mm_struct *mm,
44033+ unsigned long start, unsigned long end)
44034+{
44035+ /* x86_64 does not keep any page table caches in a software TLB.
44036+ The CPUs do in their hardware TLBs, but they are handled
44037+ by the normal TLB flushing algorithms. */
44038+}
44039+
44040+#endif /* _X8664_TLBFLUSH_H */
44041Index: head-2008-11-25/include/asm-x86/mach-xen/asm/xor_64.h
44042===================================================================
44043--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44044+++ head-2008-11-25/include/asm-x86/mach-xen/asm/xor_64.h 2007-06-12 13:14:13.000000000 +0200
44045@@ -0,0 +1,328 @@
44046+/*
44047+ * x86-64 changes / gcc fixes from Andi Kleen.
44048+ * Copyright 2002 Andi Kleen, SuSE Labs.
44049+ *
44050+ * This hasn't been optimized for the hammer yet, but there are likely
44051+ * no advantages to be gotten from x86-64 here anyways.
44052+ */
44053+
44054+typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
44055+
44056+/* Doesn't use gcc to save the XMM registers, because there is no easy way to
44057+ tell it to do a clts before the register saving. */
44058+#define XMMS_SAVE do { \
44059+ preempt_disable(); \
44060+ if (!(current_thread_info()->status & TS_USEDFPU)) \
44061+ clts(); \
44062+ __asm__ __volatile__ ( \
44063+ "movups %%xmm0,(%1) ;\n\t" \
44064+ "movups %%xmm1,0x10(%1) ;\n\t" \
44065+ "movups %%xmm2,0x20(%1) ;\n\t" \
44066+ "movups %%xmm3,0x30(%1) ;\n\t" \
44067+ : "=&r" (cr0) \
44068+ : "r" (xmm_save) \
44069+ : "memory"); \
44070+} while(0)
44071+
44072+#define XMMS_RESTORE do { \
44073+ asm volatile ( \
44074+ "sfence ;\n\t" \
44075+ "movups (%1),%%xmm0 ;\n\t" \
44076+ "movups 0x10(%1),%%xmm1 ;\n\t" \
44077+ "movups 0x20(%1),%%xmm2 ;\n\t" \
44078+ "movups 0x30(%1),%%xmm3 ;\n\t" \
44079+ : \
44080+ : "r" (cr0), "r" (xmm_save) \
44081+ : "memory"); \
44082+ if (!(current_thread_info()->status & TS_USEDFPU)) \
44083+ stts(); \
44084+ preempt_enable(); \
44085+} while(0)
44086+
44087+#define OFFS(x) "16*("#x")"
44088+#define PF_OFFS(x) "256+16*("#x")"
44089+#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
44090+#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
44091+#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
44092+#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
44093+#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
44094+#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
44095+#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
44096+#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
44097+#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
44098+#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
44099+#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
44100+#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
44101+#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
44102+
44103+
44104+static void
44105+xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
44106+{
44107+ unsigned int lines = bytes >> 8;
44108+ unsigned long cr0;
44109+ xmm_store_t xmm_save[4];
44110+
44111+ XMMS_SAVE;
44112+
44113+ asm volatile (
44114+#undef BLOCK
44115+#define BLOCK(i) \
44116+ LD(i,0) \
44117+ LD(i+1,1) \
44118+ PF1(i) \
44119+ PF1(i+2) \
44120+ LD(i+2,2) \
44121+ LD(i+3,3) \
44122+ PF0(i+4) \
44123+ PF0(i+6) \
44124+ XO1(i,0) \
44125+ XO1(i+1,1) \
44126+ XO1(i+2,2) \
44127+ XO1(i+3,3) \
44128+ ST(i,0) \
44129+ ST(i+1,1) \
44130+ ST(i+2,2) \
44131+ ST(i+3,3) \
44132+
44133+
44134+ PF0(0)
44135+ PF0(2)
44136+
44137+ " .align 32 ;\n"
44138+ " 1: ;\n"
44139+
44140+ BLOCK(0)
44141+ BLOCK(4)
44142+ BLOCK(8)
44143+ BLOCK(12)
44144+
44145+ " addq %[inc], %[p1] ;\n"
44146+ " addq %[inc], %[p2] ;\n"
44147+ " decl %[cnt] ; jnz 1b"
44148+ : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
44149+ : [inc] "r" (256UL)
44150+ : "memory");
44151+
44152+ XMMS_RESTORE;
44153+}
44154+
44155+static void
44156+xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
44157+ unsigned long *p3)
44158+{
44159+ unsigned int lines = bytes >> 8;
44160+ xmm_store_t xmm_save[4];
44161+ unsigned long cr0;
44162+
44163+ XMMS_SAVE;
44164+
44165+ __asm__ __volatile__ (
44166+#undef BLOCK
44167+#define BLOCK(i) \
44168+ PF1(i) \
44169+ PF1(i+2) \
44170+ LD(i,0) \
44171+ LD(i+1,1) \
44172+ LD(i+2,2) \
44173+ LD(i+3,3) \
44174+ PF2(i) \
44175+ PF2(i+2) \
44176+ PF0(i+4) \
44177+ PF0(i+6) \
44178+ XO1(i,0) \
44179+ XO1(i+1,1) \
44180+ XO1(i+2,2) \
44181+ XO1(i+3,3) \
44182+ XO2(i,0) \
44183+ XO2(i+1,1) \
44184+ XO2(i+2,2) \
44185+ XO2(i+3,3) \
44186+ ST(i,0) \
44187+ ST(i+1,1) \
44188+ ST(i+2,2) \
44189+ ST(i+3,3) \
44190+
44191+
44192+ PF0(0)
44193+ PF0(2)
44194+
44195+ " .align 32 ;\n"
44196+ " 1: ;\n"
44197+
44198+ BLOCK(0)
44199+ BLOCK(4)
44200+ BLOCK(8)
44201+ BLOCK(12)
44202+
44203+ " addq %[inc], %[p1] ;\n"
44204+ " addq %[inc], %[p2] ;\n"
44205+ " addq %[inc], %[p3] ;\n"
44206+ " decl %[cnt] ; jnz 1b"
44207+ : [cnt] "+r" (lines),
44208+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
44209+ : [inc] "r" (256UL)
44210+ : "memory");
44211+ XMMS_RESTORE;
44212+}
44213+
44214+static void
44215+xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
44216+ unsigned long *p3, unsigned long *p4)
44217+{
44218+ unsigned int lines = bytes >> 8;
44219+ xmm_store_t xmm_save[4];
44220+ unsigned long cr0;
44221+
44222+ XMMS_SAVE;
44223+
44224+ __asm__ __volatile__ (
44225+#undef BLOCK
44226+#define BLOCK(i) \
44227+ PF1(i) \
44228+ PF1(i+2) \
44229+ LD(i,0) \
44230+ LD(i+1,1) \
44231+ LD(i+2,2) \
44232+ LD(i+3,3) \
44233+ PF2(i) \
44234+ PF2(i+2) \
44235+ XO1(i,0) \
44236+ XO1(i+1,1) \
44237+ XO1(i+2,2) \
44238+ XO1(i+3,3) \
44239+ PF3(i) \
44240+ PF3(i+2) \
44241+ PF0(i+4) \
44242+ PF0(i+6) \
44243+ XO2(i,0) \
44244+ XO2(i+1,1) \
44245+ XO2(i+2,2) \
44246+ XO2(i+3,3) \
44247+ XO3(i,0) \
44248+ XO3(i+1,1) \
44249+ XO3(i+2,2) \
44250+ XO3(i+3,3) \
44251+ ST(i,0) \
44252+ ST(i+1,1) \
44253+ ST(i+2,2) \
44254+ ST(i+3,3) \
44255+
44256+
44257+ PF0(0)
44258+ PF0(2)
44259+
44260+ " .align 32 ;\n"
44261+ " 1: ;\n"
44262+
44263+ BLOCK(0)
44264+ BLOCK(4)
44265+ BLOCK(8)
44266+ BLOCK(12)
44267+
44268+ " addq %[inc], %[p1] ;\n"
44269+ " addq %[inc], %[p2] ;\n"
44270+ " addq %[inc], %[p3] ;\n"
44271+ " addq %[inc], %[p4] ;\n"
44272+ " decl %[cnt] ; jnz 1b"
44273+ : [cnt] "+c" (lines),
44274+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
44275+ : [inc] "r" (256UL)
44276+ : "memory" );
44277+
44278+ XMMS_RESTORE;
44279+}
44280+
44281+static void
44282+xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
44283+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
44284+{
44285+ unsigned int lines = bytes >> 8;
44286+ xmm_store_t xmm_save[4];
44287+ unsigned long cr0;
44288+
44289+ XMMS_SAVE;
44290+
44291+ __asm__ __volatile__ (
44292+#undef BLOCK
44293+#define BLOCK(i) \
44294+ PF1(i) \
44295+ PF1(i+2) \
44296+ LD(i,0) \
44297+ LD(i+1,1) \
44298+ LD(i+2,2) \
44299+ LD(i+3,3) \
44300+ PF2(i) \
44301+ PF2(i+2) \
44302+ XO1(i,0) \
44303+ XO1(i+1,1) \
44304+ XO1(i+2,2) \
44305+ XO1(i+3,3) \
44306+ PF3(i) \
44307+ PF3(i+2) \
44308+ XO2(i,0) \
44309+ XO2(i+1,1) \
44310+ XO2(i+2,2) \
44311+ XO2(i+3,3) \
44312+ PF4(i) \
44313+ PF4(i+2) \
44314+ PF0(i+4) \
44315+ PF0(i+6) \
44316+ XO3(i,0) \
44317+ XO3(i+1,1) \
44318+ XO3(i+2,2) \
44319+ XO3(i+3,3) \
44320+ XO4(i,0) \
44321+ XO4(i+1,1) \
44322+ XO4(i+2,2) \
44323+ XO4(i+3,3) \
44324+ ST(i,0) \
44325+ ST(i+1,1) \
44326+ ST(i+2,2) \
44327+ ST(i+3,3) \
44328+
44329+
44330+ PF0(0)
44331+ PF0(2)
44332+
44333+ " .align 32 ;\n"
44334+ " 1: ;\n"
44335+
44336+ BLOCK(0)
44337+ BLOCK(4)
44338+ BLOCK(8)
44339+ BLOCK(12)
44340+
44341+ " addq %[inc], %[p1] ;\n"
44342+ " addq %[inc], %[p2] ;\n"
44343+ " addq %[inc], %[p3] ;\n"
44344+ " addq %[inc], %[p4] ;\n"
44345+ " addq %[inc], %[p5] ;\n"
44346+ " decl %[cnt] ; jnz 1b"
44347+ : [cnt] "+c" (lines),
44348+ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
44349+ [p5] "+r" (p5)
44350+ : [inc] "r" (256UL)
44351+ : "memory");
44352+
44353+ XMMS_RESTORE;
44354+}
44355+
44356+static struct xor_block_template xor_block_sse = {
44357+ .name = "generic_sse",
44358+ .do_2 = xor_sse_2,
44359+ .do_3 = xor_sse_3,
44360+ .do_4 = xor_sse_4,
44361+ .do_5 = xor_sse_5,
44362+};
44363+
44364+#undef XOR_TRY_TEMPLATES
44365+#define XOR_TRY_TEMPLATES \
44366+ do { \
44367+ xor_speed(&xor_block_sse); \
44368+ } while (0)
44369+
44370+/* We force the use of the SSE xor block because it can write around L2.
44371+ We may also be able to load into the L1 only depending on how the cpu
44372+ deals with a load to a line that is being prefetched. */
44373+#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
44374Index: head-2008-11-25/include/asm-x86/mach-xen/mach_time.h
44375===================================================================
44376--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44377+++ head-2008-11-25/include/asm-x86/mach-xen/mach_time.h 2007-06-12 13:14:13.000000000 +0200
44378@@ -0,0 +1,111 @@
44379+/*
44380+ * include/asm-i386/mach-default/mach_time.h
44381+ *
44382+ * Machine specific set RTC function for generic.
44383+ * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
44384+ */
44385+#ifndef _MACH_TIME_H
44386+#define _MACH_TIME_H
44387+
44388+#include <asm-i386/mc146818rtc.h>
44389+
44390+/* for check timing call set_rtc_mmss() 500ms */
44391+/* used in arch/i386/time.c::do_timer_interrupt() */
44392+#define USEC_AFTER 500000
44393+#define USEC_BEFORE 500000
44394+
44395+/*
44396+ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
44397+ * called 500 ms after the second nowtime has started, because when
44398+ * nowtime is written into the registers of the CMOS clock, it will
44399+ * jump to the next second precisely 500 ms later. Check the Motorola
44400+ * MC146818A or Dallas DS12887 data sheet for details.
44401+ *
44402+ * BUG: This routine does not handle hour overflow properly; it just
44403+ * sets the minutes. Usually you'll only notice that after reboot!
44404+ */
44405+static inline int mach_set_rtc_mmss(unsigned long nowtime)
44406+{
44407+ int retval = 0;
44408+ int real_seconds, real_minutes, cmos_minutes;
44409+ unsigned char save_control, save_freq_select;
44410+
44411+ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
44412+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
44413+
44414+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
44415+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
44416+
44417+ cmos_minutes = CMOS_READ(RTC_MINUTES);
44418+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
44419+ BCD_TO_BIN(cmos_minutes);
44420+
44421+ /*
44422+ * since we're only adjusting minutes and seconds,
44423+ * don't interfere with hour overflow. This avoids
44424+ * messing with unknown time zones but requires your
44425+ * RTC not to be off by more than 15 minutes
44426+ */
44427+ real_seconds = nowtime % 60;
44428+ real_minutes = nowtime / 60;
44429+ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
44430+ real_minutes += 30; /* correct for half hour time zone */
44431+ real_minutes %= 60;
44432+
44433+ if (abs(real_minutes - cmos_minutes) < 30) {
44434+ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
44435+ BIN_TO_BCD(real_seconds);
44436+ BIN_TO_BCD(real_minutes);
44437+ }
44438+ CMOS_WRITE(real_seconds,RTC_SECONDS);
44439+ CMOS_WRITE(real_minutes,RTC_MINUTES);
44440+ } else {
44441+ printk(KERN_WARNING
44442+ "set_rtc_mmss: can't update from %d to %d\n",
44443+ cmos_minutes, real_minutes);
44444+ retval = -1;
44445+ }
44446+
44447+ /* The following flags have to be released exactly in this order,
44448+ * otherwise the DS12887 (popular MC146818A clone with integrated
44449+ * battery and quartz) will not reset the oscillator and will not
44450+ * update precisely 500 ms later. You won't find this mentioned in
44451+ * the Dallas Semiconductor data sheets, but who believes data
44452+ * sheets anyway ... -- Markus Kuhn
44453+ */
44454+ CMOS_WRITE(save_control, RTC_CONTROL);
44455+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
44456+
44457+ return retval;
44458+}
44459+
44460+static inline unsigned long mach_get_cmos_time(void)
44461+{
44462+ unsigned int year, mon, day, hour, min, sec;
44463+
44464+ do {
44465+ sec = CMOS_READ(RTC_SECONDS);
44466+ min = CMOS_READ(RTC_MINUTES);
44467+ hour = CMOS_READ(RTC_HOURS);
44468+ day = CMOS_READ(RTC_DAY_OF_MONTH);
44469+ mon = CMOS_READ(RTC_MONTH);
44470+ year = CMOS_READ(RTC_YEAR);
44471+ } while (sec != CMOS_READ(RTC_SECONDS));
44472+
44473+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
44474+ BCD_TO_BIN(sec);
44475+ BCD_TO_BIN(min);
44476+ BCD_TO_BIN(hour);
44477+ BCD_TO_BIN(day);
44478+ BCD_TO_BIN(mon);
44479+ BCD_TO_BIN(year);
44480+ }
44481+
44482+ year += 1900;
44483+ if (year < 1970)
44484+ year += 100;
44485+
44486+ return mktime(year, mon, day, hour, min, sec);
44487+}
44488+
44489+#endif /* !_MACH_TIME_H */
44490Index: head-2008-11-25/include/asm-x86/mach-xen/setup_arch_post.h
44491===================================================================
44492--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44493+++ head-2008-11-25/include/asm-x86/mach-xen/setup_arch_post.h 2007-06-12 13:14:13.000000000 +0200
44494@@ -0,0 +1,63 @@
44495+/**
44496+ * machine_specific_* - Hooks for machine specific setup.
44497+ *
44498+ * Description:
44499+ * This is included late in kernel/setup.c so that it can make
44500+ * use of all of the static functions.
44501+ **/
44502+
44503+#include <xen/interface/callback.h>
44504+
44505+extern void hypervisor_callback(void);
44506+extern void failsafe_callback(void);
44507+extern void nmi(void);
44508+
44509+static void __init machine_specific_arch_setup(void)
44510+{
44511+ int ret;
44512+ static struct callback_register __initdata event = {
44513+ .type = CALLBACKTYPE_event,
44514+ .address = (unsigned long) hypervisor_callback,
44515+ };
44516+ static struct callback_register __initdata failsafe = {
44517+ .type = CALLBACKTYPE_failsafe,
44518+ .address = (unsigned long)failsafe_callback,
44519+ };
44520+ static struct callback_register __initdata syscall = {
44521+ .type = CALLBACKTYPE_syscall,
44522+ .address = (unsigned long)system_call,
44523+ };
44524+#ifdef CONFIG_X86_LOCAL_APIC
44525+ static struct callback_register __initdata nmi_cb = {
44526+ .type = CALLBACKTYPE_nmi,
44527+ .address = (unsigned long)nmi,
44528+ };
44529+#endif
44530+
44531+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
44532+ if (ret == 0)
44533+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
44534+ if (ret == 0)
44535+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &syscall);
44536+#if CONFIG_XEN_COMPAT <= 0x030002
44537+ if (ret == -ENOSYS)
44538+ ret = HYPERVISOR_set_callbacks(
44539+ event.address,
44540+ failsafe.address,
44541+ syscall.address);
44542+#endif
44543+ BUG_ON(ret);
44544+
44545+#ifdef CONFIG_X86_LOCAL_APIC
44546+ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
44547+#if CONFIG_XEN_COMPAT <= 0x030002
44548+ if (ret == -ENOSYS) {
44549+ static struct xennmi_callback __initdata cb = {
44550+ .handler_address = (unsigned long)nmi
44551+ };
44552+
44553+ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
44554+ }
44555+#endif
44556+#endif
44557+}
44558Index: head-2008-11-25/include/asm-x86/mach-xen/setup_arch_pre.h
44559===================================================================
44560--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44561+++ head-2008-11-25/include/asm-x86/mach-xen/setup_arch_pre.h 2007-06-12 13:14:13.000000000 +0200
44562@@ -0,0 +1,5 @@
44563+/* Hook to call BIOS initialisation function */
44564+
44565+#define ARCH_SETUP machine_specific_arch_setup();
44566+
44567+static void __init machine_specific_arch_setup(void);
44568Index: head-2008-11-25/include/xen/blkif.h
44569===================================================================
44570--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44571+++ head-2008-11-25/include/xen/blkif.h 2008-07-21 11:00:33.000000000 +0200
44572@@ -0,0 +1,123 @@
44573+/*
44574+ * Permission is hereby granted, free of charge, to any person obtaining a copy
44575+ * of this software and associated documentation files (the "Software"), to
44576+ * deal in the Software without restriction, including without limitation the
44577+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
44578+ * sell copies of the Software, and to permit persons to whom the Software is
44579+ * furnished to do so, subject to the following conditions:
44580+ *
44581+ * The above copyright notice and this permission notice shall be included in
44582+ * all copies or substantial portions of the Software.
44583+ *
44584+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
44585+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
44586+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
44587+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
44588+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
44589+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
44590+ * DEALINGS IN THE SOFTWARE.
44591+ */
44592+
44593+#ifndef __XEN_BLKIF_H__
44594+#define __XEN_BLKIF_H__
44595+
44596+#include <xen/interface/io/ring.h>
44597+#include <xen/interface/io/blkif.h>
44598+#include <xen/interface/io/protocols.h>
44599+
44600+/* Not a real protocol. Used to generate ring structs which contain
44601+ * the elements common to all protocols only. This way we get a
44602+ * compiler-checkable way to use common struct elements, so we can
44603+ * avoid using switch(protocol) in a number of places. */
44604+struct blkif_common_request {
44605+ char dummy;
44606+};
44607+struct blkif_common_response {
44608+ char dummy;
44609+};
44610+
44611+/* i386 protocol version */
44612+#pragma pack(push, 4)
44613+struct blkif_x86_32_request {
44614+ uint8_t operation; /* BLKIF_OP_??? */
44615+ uint8_t nr_segments; /* number of segments */
44616+ blkif_vdev_t handle; /* only for read/write requests */
44617+ uint64_t id; /* private guest value, echoed in resp */
44618+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
44619+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
44620+};
44621+struct blkif_x86_32_response {
44622+ uint64_t id; /* copied from request */
44623+ uint8_t operation; /* copied from request */
44624+ int16_t status; /* BLKIF_RSP_??? */
44625+};
44626+typedef struct blkif_x86_32_request blkif_x86_32_request_t;
44627+typedef struct blkif_x86_32_response blkif_x86_32_response_t;
44628+#pragma pack(pop)
44629+
44630+/* x86_64 protocol version */
44631+struct blkif_x86_64_request {
44632+ uint8_t operation; /* BLKIF_OP_??? */
44633+ uint8_t nr_segments; /* number of segments */
44634+ blkif_vdev_t handle; /* only for read/write requests */
44635+ uint64_t __attribute__((__aligned__(8))) id;
44636+ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
44637+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
44638+};
44639+struct blkif_x86_64_response {
44640+ uint64_t __attribute__((__aligned__(8))) id;
44641+ uint8_t operation; /* copied from request */
44642+ int16_t status; /* BLKIF_RSP_??? */
44643+};
44644+typedef struct blkif_x86_64_request blkif_x86_64_request_t;
44645+typedef struct blkif_x86_64_response blkif_x86_64_response_t;
44646+
44647+DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
44648+DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
44649+DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
44650+
44651+union blkif_back_rings {
44652+ blkif_back_ring_t native;
44653+ blkif_common_back_ring_t common;
44654+ blkif_x86_32_back_ring_t x86_32;
44655+ blkif_x86_64_back_ring_t x86_64;
44656+};
44657+typedef union blkif_back_rings blkif_back_rings_t;
44658+
44659+enum blkif_protocol {
44660+ BLKIF_PROTOCOL_NATIVE = 1,
44661+ BLKIF_PROTOCOL_X86_32 = 2,
44662+ BLKIF_PROTOCOL_X86_64 = 3,
44663+};
44664+
44665+static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
44666+{
44667+ int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
44668+ dst->operation = src->operation;
44669+ dst->nr_segments = src->nr_segments;
44670+ dst->handle = src->handle;
44671+ dst->id = src->id;
44672+ dst->sector_number = src->sector_number;
44673+ barrier();
44674+ if (n > dst->nr_segments)
44675+ n = dst->nr_segments;
44676+ for (i = 0; i < n; i++)
44677+ dst->seg[i] = src->seg[i];
44678+}
44679+
44680+static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
44681+{
44682+ int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
44683+ dst->operation = src->operation;
44684+ dst->nr_segments = src->nr_segments;
44685+ dst->handle = src->handle;
44686+ dst->id = src->id;
44687+ dst->sector_number = src->sector_number;
44688+ barrier();
44689+ if (n > dst->nr_segments)
44690+ n = dst->nr_segments;
44691+ for (i = 0; i < n; i++)
44692+ dst->seg[i] = src->seg[i];
44693+}
44694+
44695+#endif /* __XEN_BLKIF_H__ */
44696Index: head-2008-11-25/include/xen/compat_ioctl.h
44697===================================================================
44698--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44699+++ head-2008-11-25/include/xen/compat_ioctl.h 2007-07-10 09:42:30.000000000 +0200
44700@@ -0,0 +1,45 @@
44701+/*
44702+ * This program is free software; you can redistribute it and/or
44703+ * modify it under the terms of the GNU General Public License as
44704+ * published by the Free Software Foundation; either version 2 of the
44705+ * License, or (at your option) any later version.
44706+ *
44707+ * This program is distributed in the hope that it will be useful,
44708+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
44709+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
44710+ * GNU General Public License for more details.
44711+ *
44712+ * You should have received a copy of the GNU General Public License
44713+ * along with this program; if not, write to the Free Software
44714+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
44715+ *
44716+ * Copyright IBM Corp. 2007
44717+ *
44718+ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
44719+ * Hollis Blanchard <hollisb@us.ibm.com>
44720+ */
44721+
44722+#ifndef __LINUX_XEN_COMPAT_H__
44723+#define __LINUX_XEN_COMPAT_H__
44724+
44725+#include <linux/compat.h>
44726+
44727+extern int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg);
44728+struct privcmd_mmap_32 {
44729+ int num;
44730+ domid_t dom;
44731+ compat_uptr_t entry;
44732+};
44733+
44734+struct privcmd_mmapbatch_32 {
44735+ int num; /* number of pages to populate */
44736+ domid_t dom; /* target domain */
44737+ __u64 addr; /* virtual address */
44738+ compat_uptr_t arr; /* array of mfns - top nibble set on err */
44739+};
44740+#define IOCTL_PRIVCMD_MMAP_32 \
44741+ _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32))
44742+#define IOCTL_PRIVCMD_MMAPBATCH_32 \
44743+ _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32))
44744+
44745+#endif /* __LINUX_XEN_COMPAT_H__ */
44746Index: head-2008-11-25/include/xen/cpu_hotplug.h
44747===================================================================
44748--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44749+++ head-2008-11-25/include/xen/cpu_hotplug.h 2007-08-16 18:07:01.000000000 +0200
44750@@ -0,0 +1,41 @@
44751+#ifndef __XEN_CPU_HOTPLUG_H__
44752+#define __XEN_CPU_HOTPLUG_H__
44753+
44754+#include <linux/kernel.h>
44755+#include <linux/cpumask.h>
44756+
44757+#if defined(CONFIG_X86) && defined(CONFIG_SMP)
44758+extern cpumask_t cpu_initialized_map;
44759+#endif
44760+
44761+#if defined(CONFIG_HOTPLUG_CPU)
44762+
44763+int cpu_up_check(unsigned int cpu);
44764+void init_xenbus_allowed_cpumask(void);
44765+int smp_suspend(void);
44766+void smp_resume(void);
44767+
44768+void cpu_bringup(void);
44769+
44770+#else /* !defined(CONFIG_HOTPLUG_CPU) */
44771+
44772+#define cpu_up_check(cpu) (0)
44773+#define init_xenbus_allowed_cpumask() ((void)0)
44774+
44775+static inline int smp_suspend(void)
44776+{
44777+ if (num_online_cpus() > 1) {
44778+ printk(KERN_WARNING "Can't suspend SMP guests "
44779+ "without CONFIG_HOTPLUG_CPU\n");
44780+ return -EOPNOTSUPP;
44781+ }
44782+ return 0;
44783+}
44784+
44785+static inline void smp_resume(void)
44786+{
44787+}
44788+
44789+#endif /* !defined(CONFIG_HOTPLUG_CPU) */
44790+
44791+#endif /* __XEN_CPU_HOTPLUG_H__ */
44792Index: head-2008-11-25/include/xen/driver_util.h
44793===================================================================
44794--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44795+++ head-2008-11-25/include/xen/driver_util.h 2007-06-12 13:14:19.000000000 +0200
44796@@ -0,0 +1,14 @@
44797+
44798+#ifndef __ASM_XEN_DRIVER_UTIL_H__
44799+#define __ASM_XEN_DRIVER_UTIL_H__
44800+
44801+#include <linux/vmalloc.h>
44802+#include <linux/device.h>
44803+
44804+/* Allocate/destroy a 'vmalloc' VM area. */
44805+extern struct vm_struct *alloc_vm_area(unsigned long size);
44806+extern void free_vm_area(struct vm_struct *area);
44807+
44808+extern struct class *get_xen_class(void);
44809+
44810+#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
44811Index: head-2008-11-25/include/xen/evtchn.h
44812===================================================================
44813--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44814+++ head-2008-11-25/include/xen/evtchn.h 2008-09-15 13:40:15.000000000 +0200
44815@@ -0,0 +1,160 @@
44816+/******************************************************************************
44817+ * evtchn.h
44818+ *
44819+ * Communication via Xen event channels.
44820+ * Also definitions for the device that demuxes notifications to userspace.
44821+ *
44822+ * Copyright (c) 2004-2005, K A Fraser
44823+ *
44824+ * This program is free software; you can redistribute it and/or
44825+ * modify it under the terms of the GNU General Public License version 2
44826+ * as published by the Free Software Foundation; or, when distributed
44827+ * separately from the Linux kernel or incorporated into other
44828+ * software packages, subject to the following license:
44829+ *
44830+ * Permission is hereby granted, free of charge, to any person obtaining a copy
44831+ * of this source file (the "Software"), to deal in the Software without
44832+ * restriction, including without limitation the rights to use, copy, modify,
44833+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
44834+ * and to permit persons to whom the Software is furnished to do so, subject to
44835+ * the following conditions:
44836+ *
44837+ * The above copyright notice and this permission notice shall be included in
44838+ * all copies or substantial portions of the Software.
44839+ *
44840+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
44841+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
44842+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
44843+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
44844+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
44845+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
44846+ * IN THE SOFTWARE.
44847+ */
44848+
44849+#ifndef __ASM_EVTCHN_H__
44850+#define __ASM_EVTCHN_H__
44851+
44852+#include <linux/interrupt.h>
44853+#include <asm/hypervisor.h>
44854+#include <asm/ptrace.h>
44855+#include <asm/synch_bitops.h>
44856+#include <xen/interface/event_channel.h>
44857+#include <linux/smp.h>
44858+
44859+/*
44860+ * LOW-LEVEL DEFINITIONS
44861+ */
44862+
44863+/*
44864+ * Dynamically bind an event source to an IRQ-like callback handler.
44865+ * On some platforms this may not be implemented via the Linux IRQ subsystem.
44866+ * The IRQ argument passed to the callback handler is the same as returned
44867+ * from the bind call. It may not correspond to a Linux IRQ number.
44868+ * Returns IRQ or negative errno.
44869+ */
44870+int bind_caller_port_to_irqhandler(
44871+ unsigned int caller_port,
44872+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
44873+ unsigned long irqflags,
44874+ const char *devname,
44875+ void *dev_id);
44876+int bind_listening_port_to_irqhandler(
44877+ unsigned int remote_domain,
44878+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
44879+ unsigned long irqflags,
44880+ const char *devname,
44881+ void *dev_id);
44882+int bind_interdomain_evtchn_to_irqhandler(
44883+ unsigned int remote_domain,
44884+ unsigned int remote_port,
44885+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
44886+ unsigned long irqflags,
44887+ const char *devname,
44888+ void *dev_id);
44889+int bind_virq_to_irqhandler(
44890+ unsigned int virq,
44891+ unsigned int cpu,
44892+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
44893+ unsigned long irqflags,
44894+ const char *devname,
44895+ void *dev_id);
44896+int bind_ipi_to_irqhandler(
44897+ unsigned int ipi,
44898+ unsigned int cpu,
44899+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
44900+ unsigned long irqflags,
44901+ const char *devname,
44902+ void *dev_id);
44903+
44904+/*
44905+ * Common unbind function for all event sources. Takes IRQ to unbind from.
44906+ * Automatically closes the underlying event channel (except for bindings
44907+ * made with bind_caller_port_to_irqhandler()).
44908+ */
44909+void unbind_from_irqhandler(unsigned int irq, void *dev_id);
44910+
44911+void irq_resume(void);
44912+
44913+/* Entry point for notifications into Linux subsystems. */
44914+asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
44915+
44916+/* Entry point for notifications into the userland character device. */
44917+void evtchn_device_upcall(int port);
44918+
44919+/* Mark a PIRQ as unavailable for dynamic allocation. */
44920+void evtchn_register_pirq(int irq);
44921+/* Map a Xen-supplied PIRQ to a dynamically allocated one. */
44922+int evtchn_map_pirq(int irq, int xen_pirq);
44923+/* Look up a Xen-supplied PIRQ for a dynamically allocated one. */
44924+int evtchn_get_xen_pirq(int irq);
44925+
44926+void mask_evtchn(int port);
44927+void disable_all_local_evtchn(void);
44928+void unmask_evtchn(int port);
44929+
44930+#ifdef CONFIG_SMP
44931+void rebind_evtchn_to_cpu(int port, unsigned int cpu);
44932+#else
44933+#define rebind_evtchn_to_cpu(port, cpu) ((void)0)
44934+#endif
44935+
44936+static inline int test_and_set_evtchn_mask(int port)
44937+{
44938+ shared_info_t *s = HYPERVISOR_shared_info;
44939+ return synch_test_and_set_bit(port, s->evtchn_mask);
44940+}
44941+
44942+static inline void clear_evtchn(int port)
44943+{
44944+ shared_info_t *s = HYPERVISOR_shared_info;
44945+ synch_clear_bit(port, s->evtchn_pending);
44946+}
44947+
44948+static inline void notify_remote_via_evtchn(int port)
44949+{
44950+ struct evtchn_send send = { .port = port };
44951+ VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send));
44952+}
44953+
44954+/*
44955+ * Use these to access the event channel underlying the IRQ handle returned
44956+ * by bind_*_to_irqhandler().
44957+ */
44958+void notify_remote_via_irq(int irq);
44959+int irq_to_evtchn_port(int irq);
44960+
44961+#define PIRQ_SET_MAPPING 0x0
44962+#define PIRQ_CLEAR_MAPPING 0x1
44963+#define PIRQ_GET_MAPPING 0x3
44964+int pirq_mapstatus(int pirq, int action);
44965+int set_pirq_hw_action(int pirq, int (*action)(int pirq, int action));
44966+int clear_pirq_hw_action(int pirq);
44967+
44968+#define PIRQ_STARTUP 1
44969+#define PIRQ_SHUTDOWN 2
44970+#define PIRQ_ENABLE 3
44971+#define PIRQ_DISABLE 4
44972+#define PIRQ_END 5
44973+#define PIRQ_ACK 6
44974+
44975+#endif /* __ASM_EVTCHN_H__ */
44976Index: head-2008-11-25/include/xen/firmware.h
44977===================================================================
44978--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44979+++ head-2008-11-25/include/xen/firmware.h 2007-07-02 08:16:19.000000000 +0200
44980@@ -0,0 +1,10 @@
44981+#ifndef __XEN_FIRMWARE_H__
44982+#define __XEN_FIRMWARE_H__
44983+
44984+#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
44985+void copy_edd(void);
44986+#endif
44987+
44988+void copy_edid(void);
44989+
44990+#endif /* __XEN_FIRMWARE_H__ */
44991Index: head-2008-11-25/include/xen/gnttab.h
44992===================================================================
44993--- /dev/null 1970-01-01 00:00:00.000000000 +0000
44994+++ head-2008-11-25/include/xen/gnttab.h 2008-11-04 11:13:10.000000000 +0100
44995@@ -0,0 +1,164 @@
44996+/******************************************************************************
44997+ * gnttab.h
44998+ *
44999+ * Two sets of functionality:
45000+ * 1. Granting foreign access to our memory reservation.
45001+ * 2. Accessing others' memory reservations via grant references.
45002+ * (i.e., mechanisms for both sender and recipient of grant references)
45003+ *
45004+ * Copyright (c) 2004-2005, K A Fraser
45005+ * Copyright (c) 2005, Christopher Clark
45006+ *
45007+ * This program is free software; you can redistribute it and/or
45008+ * modify it under the terms of the GNU General Public License version 2
45009+ * as published by the Free Software Foundation; or, when distributed
45010+ * separately from the Linux kernel or incorporated into other
45011+ * software packages, subject to the following license:
45012+ *
45013+ * Permission is hereby granted, free of charge, to any person obtaining a copy
45014+ * of this source file (the "Software"), to deal in the Software without
45015+ * restriction, including without limitation the rights to use, copy, modify,
45016+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
45017+ * and to permit persons to whom the Software is furnished to do so, subject to
45018+ * the following conditions:
45019+ *
45020+ * The above copyright notice and this permission notice shall be included in
45021+ * all copies or substantial portions of the Software.
45022+ *
45023+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
45024+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
45025+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
45026+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
45027+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
45028+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
45029+ * IN THE SOFTWARE.
45030+ */
45031+
45032+#ifndef __ASM_GNTTAB_H__
45033+#define __ASM_GNTTAB_H__
45034+
45035+#include <asm/hypervisor.h>
45036+#include <asm/maddr.h> /* maddr_t */
45037+#include <linux/mm.h>
45038+#include <xen/interface/grant_table.h>
45039+#include <xen/features.h>
45040+
45041+struct gnttab_free_callback {
45042+ struct gnttab_free_callback *next;
45043+ void (*fn)(void *);
45044+ void *arg;
45045+ u16 count;
45046+ u8 queued;
45047+};
45048+
45049+int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
45050+ int flags);
45051+
45052+/*
45053+ * End access through the given grant reference, iff the grant entry is no
45054+ * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
45055+ * use.
45056+ */
45057+int gnttab_end_foreign_access_ref(grant_ref_t ref);
45058+
45059+/*
45060+ * Eventually end access through the given grant reference, and once that
45061+ * access has been ended, free the given page too. Access will be ended
45062+ * immediately iff the grant entry is not in use, otherwise it will happen
45063+ * some time later. page may be 0, in which case no freeing will occur.
45064+ */
45065+void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page);
45066+
45067+int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
45068+
45069+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
45070+unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
45071+
45072+int gnttab_query_foreign_access(grant_ref_t ref);
45073+
45074+/*
45075+ * operations on reserved batches of grant references
45076+ */
45077+int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
45078+
45079+void gnttab_free_grant_reference(grant_ref_t ref);
45080+
45081+void gnttab_free_grant_references(grant_ref_t head);
45082+
45083+int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
45084+
45085+int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
45086+
45087+void gnttab_release_grant_reference(grant_ref_t *private_head,
45088+ grant_ref_t release);
45089+
45090+void gnttab_request_free_callback(struct gnttab_free_callback *callback,
45091+ void (*fn)(void *), void *arg, u16 count);
45092+void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
45093+
45094+void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
45095+ unsigned long frame, int flags);
45096+
45097+void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
45098+ unsigned long pfn);
45099+
45100+int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep);
45101+void __gnttab_dma_map_page(struct page *page);
45102+static inline void __gnttab_dma_unmap_page(struct page *page)
45103+{
45104+}
45105+
45106+void gnttab_reset_grant_page(struct page *page);
45107+
45108+int gnttab_suspend(void);
45109+int gnttab_resume(void);
45110+
45111+void *arch_gnttab_alloc_shared(unsigned long *frames);
45112+
45113+static inline void
45114+gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr,
45115+ uint32_t flags, grant_ref_t ref, domid_t domid)
45116+{
45117+ if (flags & GNTMAP_contains_pte)
45118+ map->host_addr = addr;
45119+ else if (xen_feature(XENFEAT_auto_translated_physmap))
45120+ map->host_addr = __pa(addr);
45121+ else
45122+ map->host_addr = addr;
45123+
45124+ map->flags = flags;
45125+ map->ref = ref;
45126+ map->dom = domid;
45127+}
45128+
45129+static inline void
45130+gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr,
45131+ uint32_t flags, grant_handle_t handle)
45132+{
45133+ if (flags & GNTMAP_contains_pte)
45134+ unmap->host_addr = addr;
45135+ else if (xen_feature(XENFEAT_auto_translated_physmap))
45136+ unmap->host_addr = __pa(addr);
45137+ else
45138+ unmap->host_addr = addr;
45139+
45140+ unmap->handle = handle;
45141+ unmap->dev_bus_addr = 0;
45142+}
45143+
45144+static inline void
45145+gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr,
45146+ maddr_t new_addr, grant_handle_t handle)
45147+{
45148+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
45149+ unmap->host_addr = __pa(addr);
45150+ unmap->new_addr = __pa(new_addr);
45151+ } else {
45152+ unmap->host_addr = addr;
45153+ unmap->new_addr = new_addr;
45154+ }
45155+
45156+ unmap->handle = handle;
45157+}
45158+
45159+#endif /* __ASM_GNTTAB_H__ */
45160Index: head-2008-11-25/include/xen/hvm.h
45161===================================================================
45162--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45163+++ head-2008-11-25/include/xen/hvm.h 2007-06-12 13:14:19.000000000 +0200
45164@@ -0,0 +1,23 @@
45165+/* Simple wrappers around HVM functions */
45166+#ifndef XEN_HVM_H__
45167+#define XEN_HVM_H__
45168+
45169+#include <xen/interface/hvm/params.h>
45170+
45171+static inline unsigned long hvm_get_parameter(int idx)
45172+{
45173+ struct xen_hvm_param xhv;
45174+ int r;
45175+
45176+ xhv.domid = DOMID_SELF;
45177+ xhv.index = idx;
45178+ r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
45179+ if (r < 0) {
45180+ printk(KERN_ERR "cannot get hvm parameter %d: %d.\n",
45181+ idx, r);
45182+ return 0;
45183+ }
45184+ return xhv.value;
45185+}
45186+
45187+#endif /* XEN_HVM_H__ */
45188Index: head-2008-11-25/include/xen/hypercall.h
45189===================================================================
45190--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45191+++ head-2008-11-25/include/xen/hypercall.h 2008-01-28 12:24:19.000000000 +0100
45192@@ -0,0 +1,30 @@
45193+#ifndef __XEN_HYPERCALL_H__
45194+#define __XEN_HYPERCALL_H__
45195+
45196+#include <asm/hypercall.h>
45197+
45198+static inline int __must_check
45199+HYPERVISOR_multicall_check(
45200+ multicall_entry_t *call_list, unsigned int nr_calls,
45201+ const unsigned long *rc_list)
45202+{
45203+ int rc = HYPERVISOR_multicall(call_list, nr_calls);
45204+
45205+ if (unlikely(rc < 0))
45206+ return rc;
45207+ BUG_ON(rc);
45208+ BUG_ON((int)nr_calls < 0);
45209+
45210+ for ( ; nr_calls > 0; --nr_calls, ++call_list)
45211+ if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0)))
45212+ return nr_calls;
45213+
45214+ return 0;
45215+}
45216+
45217+/* A construct to ignore the return value of hypercall wrappers in a few
45218+ * exceptional cases (simply casting the function result to void doesn't
45219+ * avoid the compiler warning): */
45220+#define VOID(expr) ((void)((expr)?:0))
45221+
45222+#endif /* __XEN_HYPERCALL_H__ */
45223Index: head-2008-11-25/include/xen/hypervisor_sysfs.h
45224===================================================================
45225--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45226+++ head-2008-11-25/include/xen/hypervisor_sysfs.h 2007-06-22 09:08:06.000000000 +0200
45227@@ -0,0 +1,30 @@
45228+/*
45229+ * copyright (c) 2006 IBM Corporation
45230+ * Authored by: Mike D. Day <ncmike@us.ibm.com>
45231+ *
45232+ * This program is free software; you can redistribute it and/or modify
45233+ * it under the terms of the GNU General Public License version 2 as
45234+ * published by the Free Software Foundation.
45235+ */
45236+
45237+#ifndef _HYP_SYSFS_H_
45238+#define _HYP_SYSFS_H_
45239+
45240+#include <linux/kobject.h>
45241+#include <linux/sysfs.h>
45242+
45243+#define HYPERVISOR_ATTR_RO(_name) \
45244+static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
45245+
45246+#define HYPERVISOR_ATTR_RW(_name) \
45247+static struct hyp_sysfs_attr _name##_attr = \
45248+ __ATTR(_name, 0644, _name##_show, _name##_store)
45249+
45250+struct hyp_sysfs_attr {
45251+ struct attribute attr;
45252+ ssize_t (*show)(struct hyp_sysfs_attr *, char *);
45253+ ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
45254+ void *hyp_attr_data;
45255+};
45256+
45257+#endif /* _HYP_SYSFS_H_ */
45258Index: head-2008-11-25/include/xen/pcifront.h
45259===================================================================
45260--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45261+++ head-2008-11-25/include/xen/pcifront.h 2007-06-18 08:38:13.000000000 +0200
45262@@ -0,0 +1,83 @@
45263+/*
45264+ * PCI Frontend - arch-dependendent declarations
45265+ *
45266+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
45267+ */
45268+#ifndef __XEN_ASM_PCIFRONT_H__
45269+#define __XEN_ASM_PCIFRONT_H__
45270+
45271+#include <linux/spinlock.h>
45272+
45273+#ifdef __KERNEL__
45274+
45275+#ifndef __ia64__
45276+
45277+struct pcifront_device;
45278+struct pci_bus;
45279+
45280+struct pcifront_sd {
45281+ int domain;
45282+ struct pcifront_device *pdev;
45283+};
45284+
45285+static inline struct pcifront_device *
45286+pcifront_get_pdev(struct pcifront_sd *sd)
45287+{
45288+ return sd->pdev;
45289+}
45290+
45291+static inline void pcifront_init_sd(struct pcifront_sd *sd,
45292+ unsigned int domain, unsigned int bus,
45293+ struct pcifront_device *pdev)
45294+{
45295+ sd->domain = domain;
45296+ sd->pdev = pdev;
45297+}
45298+
45299+#if defined(CONFIG_PCI_DOMAINS)
45300+static inline int pci_domain_nr(struct pci_bus *bus)
45301+{
45302+ struct pcifront_sd *sd = bus->sysdata;
45303+ return sd->domain;
45304+}
45305+static inline int pci_proc_domain(struct pci_bus *bus)
45306+{
45307+ return pci_domain_nr(bus);
45308+}
45309+#endif /* CONFIG_PCI_DOMAINS */
45310+
45311+static inline void pcifront_setup_root_resources(struct pci_bus *bus,
45312+ struct pcifront_sd *sd)
45313+{
45314+}
45315+
45316+#else /* __ia64__ */
45317+
45318+#include <linux/acpi.h>
45319+#include <asm/pci.h>
45320+#define pcifront_sd pci_controller
45321+
45322+extern void xen_add_resource(struct pci_controller *, unsigned int,
45323+ unsigned int, struct acpi_resource *);
45324+extern void xen_pcibios_setup_root_windows(struct pci_bus *,
45325+ struct pci_controller *);
45326+
45327+static inline struct pcifront_device *
45328+pcifront_get_pdev(struct pcifront_sd *sd)
45329+{
45330+ return (struct pcifront_device *)sd->platform_data;
45331+}
45332+
45333+static inline void pcifront_setup_root_resources(struct pci_bus *bus,
45334+ struct pcifront_sd *sd)
45335+{
45336+ xen_pcibios_setup_root_windows(bus, sd);
45337+}
45338+
45339+#endif /* __ia64__ */
45340+
45341+extern struct rw_semaphore pci_bus_sem;
45342+
45343+#endif /* __KERNEL__ */
45344+
45345+#endif /* __XEN_ASM_PCIFRONT_H__ */
45346Index: head-2008-11-25/include/xen/public/evtchn.h
45347===================================================================
45348--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45349+++ head-2008-11-25/include/xen/public/evtchn.h 2007-06-12 13:14:19.000000000 +0200
45350@@ -0,0 +1,88 @@
45351+/******************************************************************************
45352+ * evtchn.h
45353+ *
45354+ * Interface to /dev/xen/evtchn.
45355+ *
45356+ * Copyright (c) 2003-2005, K A Fraser
45357+ *
45358+ * This program is free software; you can redistribute it and/or
45359+ * modify it under the terms of the GNU General Public License version 2
45360+ * as published by the Free Software Foundation; or, when distributed
45361+ * separately from the Linux kernel or incorporated into other
45362+ * software packages, subject to the following license:
45363+ *
45364+ * Permission is hereby granted, free of charge, to any person obtaining a copy
45365+ * of this source file (the "Software"), to deal in the Software without
45366+ * restriction, including without limitation the rights to use, copy, modify,
45367+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
45368+ * and to permit persons to whom the Software is furnished to do so, subject to
45369+ * the following conditions:
45370+ *
45371+ * The above copyright notice and this permission notice shall be included in
45372+ * all copies or substantial portions of the Software.
45373+ *
45374+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
45375+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
45376+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
45377+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
45378+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
45379+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
45380+ * IN THE SOFTWARE.
45381+ */
45382+
45383+#ifndef __LINUX_PUBLIC_EVTCHN_H__
45384+#define __LINUX_PUBLIC_EVTCHN_H__
45385+
45386+/*
45387+ * Bind a fresh port to VIRQ @virq.
45388+ * Return allocated port.
45389+ */
45390+#define IOCTL_EVTCHN_BIND_VIRQ \
45391+ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
45392+struct ioctl_evtchn_bind_virq {
45393+ unsigned int virq;
45394+};
45395+
45396+/*
45397+ * Bind a fresh port to remote <@remote_domain, @remote_port>.
45398+ * Return allocated port.
45399+ */
45400+#define IOCTL_EVTCHN_BIND_INTERDOMAIN \
45401+ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
45402+struct ioctl_evtchn_bind_interdomain {
45403+ unsigned int remote_domain, remote_port;
45404+};
45405+
45406+/*
45407+ * Allocate a fresh port for binding to @remote_domain.
45408+ * Return allocated port.
45409+ */
45410+#define IOCTL_EVTCHN_BIND_UNBOUND_PORT \
45411+ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
45412+struct ioctl_evtchn_bind_unbound_port {
45413+ unsigned int remote_domain;
45414+};
45415+
45416+/*
45417+ * Unbind previously allocated @port.
45418+ */
45419+#define IOCTL_EVTCHN_UNBIND \
45420+ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
45421+struct ioctl_evtchn_unbind {
45422+ unsigned int port;
45423+};
45424+
45425+/*
45426+ * Unbind previously allocated @port.
45427+ */
45428+#define IOCTL_EVTCHN_NOTIFY \
45429+ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
45430+struct ioctl_evtchn_notify {
45431+ unsigned int port;
45432+};
45433+
45434+/* Clear and reinitialise the event buffer. Clear error condition. */
45435+#define IOCTL_EVTCHN_RESET \
45436+ _IOC(_IOC_NONE, 'E', 5, 0)
45437+
45438+#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
45439Index: head-2008-11-25/include/xen/public/gntdev.h
45440===================================================================
45441--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45442+++ head-2008-11-25/include/xen/public/gntdev.h 2008-04-02 12:34:02.000000000 +0200
45443@@ -0,0 +1,119 @@
45444+/******************************************************************************
45445+ * gntdev.h
45446+ *
45447+ * Interface to /dev/xen/gntdev.
45448+ *
45449+ * Copyright (c) 2007, D G Murray
45450+ *
45451+ * This program is free software; you can redistribute it and/or
45452+ * modify it under the terms of the GNU General Public License version 2
45453+ * as published by the Free Software Foundation; or, when distributed
45454+ * separately from the Linux kernel or incorporated into other
45455+ * software packages, subject to the following license:
45456+ *
45457+ * Permission is hereby granted, free of charge, to any person obtaining a copy
45458+ * of this source file (the "Software"), to deal in the Software without
45459+ * restriction, including without limitation the rights to use, copy, modify,
45460+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
45461+ * and to permit persons to whom the Software is furnished to do so, subject to
45462+ * the following conditions:
45463+ *
45464+ * The above copyright notice and this permission notice shall be included in
45465+ * all copies or substantial portions of the Software.
45466+ *
45467+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
45468+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
45469+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
45470+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
45471+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
45472+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
45473+ * IN THE SOFTWARE.
45474+ */
45475+
45476+#ifndef __LINUX_PUBLIC_GNTDEV_H__
45477+#define __LINUX_PUBLIC_GNTDEV_H__
45478+
45479+struct ioctl_gntdev_grant_ref {
45480+ /* The domain ID of the grant to be mapped. */
45481+ uint32_t domid;
45482+ /* The grant reference of the grant to be mapped. */
45483+ uint32_t ref;
45484+};
45485+
45486+/*
45487+ * Inserts the grant references into the mapping table of an instance
45488+ * of gntdev. N.B. This does not perform the mapping, which is deferred
45489+ * until mmap() is called with @index as the offset.
45490+ */
45491+#define IOCTL_GNTDEV_MAP_GRANT_REF \
45492+_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
45493+struct ioctl_gntdev_map_grant_ref {
45494+ /* IN parameters */
45495+ /* The number of grants to be mapped. */
45496+ uint32_t count;
45497+ uint32_t pad;
45498+ /* OUT parameters */
45499+ /* The offset to be used on a subsequent call to mmap(). */
45500+ uint64_t index;
45501+ /* Variable IN parameter. */
45502+ /* Array of grant references, of size @count. */
45503+ struct ioctl_gntdev_grant_ref refs[1];
45504+};
45505+
45506+/*
45507+ * Removes the grant references from the mapping table of an instance of
45508+ * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
45509+ * before this ioctl is called, or an error will result.
45510+ */
45511+#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
45512+_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
45513+struct ioctl_gntdev_unmap_grant_ref {
45514+ /* IN parameters */
45515+ /* The offset was returned by the corresponding map operation. */
45516+ uint64_t index;
45517+ /* The number of pages to be unmapped. */
45518+ uint32_t count;
45519+ uint32_t pad;
45520+};
45521+
45522+/*
45523+ * Returns the offset in the driver's address space that corresponds
45524+ * to @vaddr. This can be used to perform a munmap(), followed by an
45525+ * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
45526+ * the caller. The number of pages that were allocated at the same time as
45527+ * @vaddr is returned in @count.
45528+ *
45529+ * N.B. Where more than one page has been mapped into a contiguous range, the
45530+ * supplied @vaddr must correspond to the start of the range; otherwise
45531+ * an error will result. It is only possible to munmap() the entire
45532+ * contiguously-allocated range at once, and not any subrange thereof.
45533+ */
45534+#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
45535+_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
45536+struct ioctl_gntdev_get_offset_for_vaddr {
45537+ /* IN parameters */
45538+ /* The virtual address of the first mapped page in a range. */
45539+ uint64_t vaddr;
45540+ /* OUT parameters */
45541+ /* The offset that was used in the initial mmap() operation. */
45542+ uint64_t offset;
45543+ /* The number of pages mapped in the VM area that begins at @vaddr. */
45544+ uint32_t count;
45545+ uint32_t pad;
45546+};
45547+
45548+/*
45549+ * Sets the maximum number of grants that may mapped at once by this gntdev
45550+ * instance.
45551+ *
45552+ * N.B. This must be called before any other ioctl is performed on the device.
45553+ */
45554+#define IOCTL_GNTDEV_SET_MAX_GRANTS \
45555+_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants))
45556+struct ioctl_gntdev_set_max_grants {
45557+ /* IN parameter */
45558+ /* The maximum number of grants that may be mapped at once. */
45559+ uint32_t count;
45560+};
45561+
45562+#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
45563Index: head-2008-11-25/include/xen/public/privcmd.h
45564===================================================================
45565--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45566+++ head-2008-11-25/include/xen/public/privcmd.h 2007-06-12 13:14:19.000000000 +0200
45567@@ -0,0 +1,79 @@
45568+/******************************************************************************
45569+ * privcmd.h
45570+ *
45571+ * Interface to /proc/xen/privcmd.
45572+ *
45573+ * Copyright (c) 2003-2005, K A Fraser
45574+ *
45575+ * This program is free software; you can redistribute it and/or
45576+ * modify it under the terms of the GNU General Public License version 2
45577+ * as published by the Free Software Foundation; or, when distributed
45578+ * separately from the Linux kernel or incorporated into other
45579+ * software packages, subject to the following license:
45580+ *
45581+ * Permission is hereby granted, free of charge, to any person obtaining a copy
45582+ * of this source file (the "Software"), to deal in the Software without
45583+ * restriction, including without limitation the rights to use, copy, modify,
45584+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
45585+ * and to permit persons to whom the Software is furnished to do so, subject to
45586+ * the following conditions:
45587+ *
45588+ * The above copyright notice and this permission notice shall be included in
45589+ * all copies or substantial portions of the Software.
45590+ *
45591+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
45592+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
45593+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
45594+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
45595+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
45596+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
45597+ * IN THE SOFTWARE.
45598+ */
45599+
45600+#ifndef __LINUX_PUBLIC_PRIVCMD_H__
45601+#define __LINUX_PUBLIC_PRIVCMD_H__
45602+
45603+#include <linux/types.h>
45604+
45605+#ifndef __user
45606+#define __user
45607+#endif
45608+
45609+typedef struct privcmd_hypercall
45610+{
45611+ __u64 op;
45612+ __u64 arg[5];
45613+} privcmd_hypercall_t;
45614+
45615+typedef struct privcmd_mmap_entry {
45616+ __u64 va;
45617+ __u64 mfn;
45618+ __u64 npages;
45619+} privcmd_mmap_entry_t;
45620+
45621+typedef struct privcmd_mmap {
45622+ int num;
45623+ domid_t dom; /* target domain */
45624+ privcmd_mmap_entry_t __user *entry;
45625+} privcmd_mmap_t;
45626+
45627+typedef struct privcmd_mmapbatch {
45628+ int num; /* number of pages to populate */
45629+ domid_t dom; /* target domain */
45630+ __u64 addr; /* virtual address */
45631+ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
45632+} privcmd_mmapbatch_t;
45633+
45634+/*
45635+ * @cmd: IOCTL_PRIVCMD_HYPERCALL
45636+ * @arg: &privcmd_hypercall_t
45637+ * Return: Value returned from execution of the specified hypercall.
45638+ */
45639+#define IOCTL_PRIVCMD_HYPERCALL \
45640+ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
45641+#define IOCTL_PRIVCMD_MMAP \
45642+ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
45643+#define IOCTL_PRIVCMD_MMAPBATCH \
45644+ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
45645+
45646+#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
45647Index: head-2008-11-25/include/xen/xen_proc.h
45648===================================================================
45649--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45650+++ head-2008-11-25/include/xen/xen_proc.h 2007-06-12 13:14:19.000000000 +0200
45651@@ -0,0 +1,12 @@
45652+
45653+#ifndef __ASM_XEN_PROC_H__
45654+#define __ASM_XEN_PROC_H__
45655+
45656+#include <linux/proc_fs.h>
45657+
45658+extern struct proc_dir_entry *create_xen_proc_entry(
45659+ const char *name, mode_t mode);
45660+extern void remove_xen_proc_entry(
45661+ const char *name);
45662+
45663+#endif /* __ASM_XEN_PROC_H__ */
45664Index: head-2008-11-25/include/xen/xencons.h
45665===================================================================
45666--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45667+++ head-2008-11-25/include/xen/xencons.h 2007-10-15 09:39:38.000000000 +0200
45668@@ -0,0 +1,17 @@
45669+#ifndef __ASM_XENCONS_H__
45670+#define __ASM_XENCONS_H__
45671+
45672+struct dom0_vga_console_info;
45673+void dom0_init_screen_info(const struct dom0_vga_console_info *, size_t);
45674+
45675+void xencons_force_flush(void);
45676+void xencons_resume(void);
45677+
45678+/* Interrupt work hooks. Receive data, or kick data out. */
45679+void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
45680+void xencons_tx(void);
45681+
45682+int xencons_ring_init(void);
45683+int xencons_ring_send(const char *data, unsigned len);
45684+
45685+#endif /* __ASM_XENCONS_H__ */
45686Index: head-2008-11-25/include/xen/xenoprof.h
45687===================================================================
45688--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45689+++ head-2008-11-25/include/xen/xenoprof.h 2007-06-12 13:14:19.000000000 +0200
45690@@ -0,0 +1,42 @@
45691+/******************************************************************************
45692+ * xen/xenoprof.h
45693+ *
45694+ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
45695+ * VA Linux Systems Japan K.K.
45696+ *
45697+ * This program is free software; you can redistribute it and/or modify
45698+ * it under the terms of the GNU General Public License as published by
45699+ * the Free Software Foundation; either version 2 of the License, or
45700+ * (at your option) any later version.
45701+ *
45702+ * This program is distributed in the hope that it will be useful,
45703+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
45704+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
45705+ * GNU General Public License for more details.
45706+ *
45707+ * You should have received a copy of the GNU General Public License
45708+ * along with this program; if not, write to the Free Software
45709+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
45710+ *
45711+ */
45712+
45713+#ifndef __XEN_XENOPROF_H__
45714+#define __XEN_XENOPROF_H__
45715+#ifdef CONFIG_XEN
45716+
45717+#include <asm/xenoprof.h>
45718+
45719+struct oprofile_operations;
45720+int xenoprofile_init(struct oprofile_operations * ops);
45721+void xenoprofile_exit(void);
45722+
45723+struct xenoprof_shared_buffer {
45724+ char *buffer;
45725+ struct xenoprof_arch_shared_buffer arch;
45726+};
45727+#else
45728+#define xenoprofile_init(ops) (-ENOSYS)
45729+#define xenoprofile_exit() do { } while (0)
45730+
45731+#endif /* CONFIG_XEN */
45732+#endif /* __XEN_XENOPROF_H__ */
45733Index: head-2008-11-25/lib/swiotlb-xen.c
45734===================================================================
45735--- /dev/null 1970-01-01 00:00:00.000000000 +0000
45736+++ head-2008-11-25/lib/swiotlb-xen.c 2008-09-15 13:40:15.000000000 +0200
45737@@ -0,0 +1,739 @@
45738+/*
45739+ * Dynamic DMA mapping support.
45740+ *
45741+ * This implementation is a fallback for platforms that do not support
45742+ * I/O TLBs (aka DMA address translation hardware).
45743+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
45744+ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
45745+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
45746+ * David Mosberger-Tang <davidm@hpl.hp.com>
45747+ * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
45748+ */
45749+
45750+#include <linux/cache.h>
45751+#include <linux/mm.h>
45752+#include <linux/module.h>
45753+#include <linux/pci.h>
45754+#include <linux/spinlock.h>
45755+#include <linux/string.h>
45756+#include <linux/types.h>
45757+#include <linux/ctype.h>
45758+#include <linux/init.h>
45759+#include <linux/bootmem.h>
45760+#include <linux/highmem.h>
45761+#include <asm/io.h>
45762+#include <asm/pci.h>
45763+#include <asm/dma.h>
45764+#include <asm/uaccess.h>
45765+#include <xen/gnttab.h>
45766+#include <xen/interface/memory.h>
45767+#include <asm-i386/mach-xen/asm/gnttab_dma.h>
45768+
45769+int swiotlb;
45770+EXPORT_SYMBOL(swiotlb);
45771+
45772+#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
45773+
45774+/*
45775+ * Maximum allowable number of contiguous slabs to map,
45776+ * must be a power of 2. What is the appropriate value ?
45777+ * The complexity of {map,unmap}_single is linearly dependent on this value.
45778+ */
45779+#define IO_TLB_SEGSIZE 128
45780+
45781+/*
45782+ * log of the size of each IO TLB slab. The number of slabs is command line
45783+ * controllable.
45784+ */
45785+#define IO_TLB_SHIFT 11
45786+
45787+int swiotlb_force;
45788+
45789+static char *iotlb_virt_start;
45790+static unsigned long iotlb_nslabs;
45791+
45792+/*
45793+ * Used to do a quick range check in swiotlb_unmap_single and
45794+ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
45795+ * API.
45796+ */
45797+static unsigned long iotlb_pfn_start, iotlb_pfn_end;
45798+
45799+/* Does the given dma address reside within the swiotlb aperture? */
45800+static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
45801+{
45802+ unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
45803+ return (pfn_valid(pfn)
45804+ && (pfn >= iotlb_pfn_start)
45805+ && (pfn < iotlb_pfn_end));
45806+}
45807+
45808+/*
45809+ * When the IOMMU overflows we return a fallback buffer. This sets the size.
45810+ */
45811+static unsigned long io_tlb_overflow = 32*1024;
45812+
45813+void *io_tlb_overflow_buffer;
45814+
45815+/*
45816+ * This is a free list describing the number of free entries available from
45817+ * each index
45818+ */
45819+static unsigned int *io_tlb_list;
45820+static unsigned int io_tlb_index;
45821+
45822+/*
45823+ * We need to save away the original address corresponding to a mapped entry
45824+ * for the sync operations.
45825+ */
45826+static struct phys_addr {
45827+ struct page *page;
45828+ unsigned int offset;
45829+} *io_tlb_orig_addr;
45830+
45831+/*
45832+ * Protect the above data structures in the map and unmap calls
45833+ */
45834+static DEFINE_SPINLOCK(io_tlb_lock);
45835+
45836+static unsigned int dma_bits;
45837+static unsigned int __initdata max_dma_bits = 32;
45838+static int __init
45839+setup_dma_bits(char *str)
45840+{
45841+ max_dma_bits = simple_strtoul(str, NULL, 0);
45842+ return 0;
45843+}
45844+__setup("dma_bits=", setup_dma_bits);
45845+
45846+static int __init
45847+setup_io_tlb_npages(char *str)
45848+{
45849+ /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
45850+ if (isdigit(*str)) {
45851+ iotlb_nslabs = simple_strtoul(str, &str, 0) <<
45852+ (20 - IO_TLB_SHIFT);
45853+ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
45854+ }
45855+ if (*str == ',')
45856+ ++str;
45857+ /*
45858+ * NB. 'force' enables the swiotlb, but doesn't force its use for
45859+ * every DMA like it does on native Linux. 'off' forcibly disables
45860+ * use of the swiotlb.
45861+ */
45862+ if (!strcmp(str, "force"))
45863+ swiotlb_force = 1;
45864+ else if (!strcmp(str, "off"))
45865+ swiotlb_force = -1;
45866+ return 1;
45867+}
45868+__setup("swiotlb=", setup_io_tlb_npages);
45869+/* make io_tlb_overflow tunable too? */
45870+
45871+/*
45872+ * Statically reserve bounce buffer space and initialize bounce buffer data
45873+ * structures for the software IO TLB used to implement the PCI DMA API.
45874+ */
45875+void
45876+swiotlb_init_with_default_size (size_t default_size)
45877+{
45878+ unsigned long i, bytes;
45879+ int rc;
45880+
45881+ if (!iotlb_nslabs) {
45882+ iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
45883+ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
45884+ }
45885+
45886+ bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
45887+
45888+ /*
45889+ * Get IO TLB memory from the low pages
45890+ */
45891+ iotlb_virt_start = alloc_bootmem_low_pages(bytes);
45892+ if (!iotlb_virt_start)
45893+ panic("Cannot allocate SWIOTLB buffer!\n");
45894+
45895+ dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
45896+ for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
45897+ do {
45898+ rc = xen_create_contiguous_region(
45899+ (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
45900+ get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
45901+ dma_bits);
45902+ } while (rc && dma_bits++ < max_dma_bits);
45903+ if (rc) {
45904+ if (i == 0)
45905+ panic("No suitable physical memory available for SWIOTLB buffer!\n"
45906+ "Use dom0_mem Xen boot parameter to reserve\n"
45907+ "some DMA memory (e.g., dom0_mem=-128M).\n");
45908+ iotlb_nslabs = i;
45909+ i <<= IO_TLB_SHIFT;
45910+ free_bootmem(__pa(iotlb_virt_start + i), bytes - i);
45911+ bytes = i;
45912+ for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) {
45913+ unsigned int bits = fls64(virt_to_bus(iotlb_virt_start + i - 1));
45914+
45915+ if (bits > dma_bits)
45916+ dma_bits = bits;
45917+ }
45918+ break;
45919+ }
45920+ }
45921+
45922+ /*
45923+ * Allocate and initialize the free list array. This array is used
45924+ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
45925+ */
45926+ io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
45927+ for (i = 0; i < iotlb_nslabs; i++)
45928+ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
45929+ io_tlb_index = 0;
45930+ io_tlb_orig_addr = alloc_bootmem(
45931+ iotlb_nslabs * sizeof(*io_tlb_orig_addr));
45932+
45933+ /*
45934+ * Get the overflow emergency buffer
45935+ */
45936+ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
45937+ if (!io_tlb_overflow_buffer)
45938+ panic("Cannot allocate SWIOTLB overflow buffer!\n");
45939+
45940+ do {
45941+ rc = xen_create_contiguous_region(
45942+ (unsigned long)io_tlb_overflow_buffer,
45943+ get_order(io_tlb_overflow),
45944+ dma_bits);
45945+ } while (rc && dma_bits++ < max_dma_bits);
45946+ if (rc)
45947+ panic("No suitable physical memory available for SWIOTLB overflow buffer!\n");
45948+
45949+ iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
45950+ iotlb_pfn_end = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
45951+
45952+ printk(KERN_INFO "Software IO TLB enabled: \n"
45953+ " Aperture: %lu megabytes\n"
45954+ " Kernel range: %p - %p\n"
45955+ " Address size: %u bits\n",
45956+ bytes >> 20,
45957+ iotlb_virt_start, iotlb_virt_start + bytes,
45958+ dma_bits);
45959+}
45960+
45961+void
45962+swiotlb_init(void)
45963+{
45964+ long ram_end;
45965+ size_t defsz = 64 * (1 << 20); /* 64MB default size */
45966+
45967+ if (swiotlb_force == 1) {
45968+ swiotlb = 1;
45969+ } else if ((swiotlb_force != -1) &&
45970+ is_running_on_xen() &&
45971+ is_initial_xendomain()) {
45972+ /* Domain 0 always has a swiotlb. */
45973+ ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
45974+ if (ram_end <= 0x7ffff)
45975+ defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
45976+ swiotlb = 1;
45977+ }
45978+
45979+ if (swiotlb)
45980+ swiotlb_init_with_default_size(defsz);
45981+ else
45982+ printk(KERN_INFO "Software IO TLB disabled\n");
45983+}
45984+
45985+/*
45986+ * We use __copy_to_user_inatomic to transfer to the host buffer because the
45987+ * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
45988+ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
45989+ * unnecessary copy from the aperture to the host buffer, and a page fault.
45990+ */
45991+static void
45992+__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
45993+{
45994+ if (PageHighMem(buffer.page)) {
45995+ size_t len, bytes;
45996+ char *dev, *host, *kmp;
45997+ len = size;
45998+ while (len != 0) {
45999+ unsigned long flags;
46000+
46001+ if (((bytes = len) + buffer.offset) > PAGE_SIZE)
46002+ bytes = PAGE_SIZE - buffer.offset;
46003+ local_irq_save(flags); /* protects KM_BOUNCE_READ */
46004+ kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
46005+ dev = dma_addr + size - len;
46006+ host = kmp + buffer.offset;
46007+ if (dir == DMA_FROM_DEVICE) {
46008+ if (__copy_to_user_inatomic(host, dev, bytes))
46009+ /* inaccessible */;
46010+ } else
46011+ memcpy(dev, host, bytes);
46012+ kunmap_atomic(kmp, KM_BOUNCE_READ);
46013+ local_irq_restore(flags);
46014+ len -= bytes;
46015+ buffer.page++;
46016+ buffer.offset = 0;
46017+ }
46018+ } else {
46019+ char *host = (char *)phys_to_virt(
46020+ page_to_pseudophys(buffer.page)) + buffer.offset;
46021+ if (dir == DMA_FROM_DEVICE) {
46022+ if (__copy_to_user_inatomic(host, dma_addr, size))
46023+ /* inaccessible */;
46024+ } else if (dir == DMA_TO_DEVICE)
46025+ memcpy(dma_addr, host, size);
46026+ }
46027+}
46028+
46029+/*
46030+ * Allocates bounce buffer and returns its kernel virtual address.
46031+ */
46032+static void *
46033+map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
46034+{
46035+ unsigned long flags;
46036+ char *dma_addr;
46037+ unsigned int nslots, stride, index, wrap;
46038+ struct phys_addr slot_buf;
46039+ int i;
46040+
46041+ /*
46042+ * For mappings greater than a page, we limit the stride (and
46043+ * hence alignment) to a page size.
46044+ */
46045+ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
46046+ if (size > PAGE_SIZE)
46047+ stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
46048+ else
46049+ stride = 1;
46050+
46051+ BUG_ON(!nslots);
46052+
46053+ /*
46054+ * Find suitable number of IO TLB entries size that will fit this
46055+ * request and allocate a buffer from that IO TLB pool.
46056+ */
46057+ spin_lock_irqsave(&io_tlb_lock, flags);
46058+ {
46059+ wrap = index = ALIGN(io_tlb_index, stride);
46060+
46061+ if (index >= iotlb_nslabs)
46062+ wrap = index = 0;
46063+
46064+ do {
46065+ /*
46066+ * If we find a slot that indicates we have 'nslots'
46067+ * number of contiguous buffers, we allocate the
46068+ * buffers from that slot and mark the entries as '0'
46069+ * indicating unavailable.
46070+ */
46071+ if (io_tlb_list[index] >= nslots) {
46072+ int count = 0;
46073+
46074+ for (i = index; i < (int)(index + nslots); i++)
46075+ io_tlb_list[i] = 0;
46076+ for (i = index - 1;
46077+ (OFFSET(i, IO_TLB_SEGSIZE) !=
46078+ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
46079+ i--)
46080+ io_tlb_list[i] = ++count;
46081+ dma_addr = iotlb_virt_start +
46082+ (index << IO_TLB_SHIFT);
46083+
46084+ /*
46085+ * Update the indices to avoid searching in
46086+ * the next round.
46087+ */
46088+ io_tlb_index =
46089+ ((index + nslots) < iotlb_nslabs
46090+ ? (index + nslots) : 0);
46091+
46092+ goto found;
46093+ }
46094+ index += stride;
46095+ if (index >= iotlb_nslabs)
46096+ index = 0;
46097+ } while (index != wrap);
46098+
46099+ spin_unlock_irqrestore(&io_tlb_lock, flags);
46100+ return NULL;
46101+ }
46102+ found:
46103+ spin_unlock_irqrestore(&io_tlb_lock, flags);
46104+
46105+ /*
46106+ * Save away the mapping from the original address to the DMA address.
46107+ * This is needed when we sync the memory. Then we sync the buffer if
46108+ * needed.
46109+ */
46110+ slot_buf = buffer;
46111+ for (i = 0; i < nslots; i++) {
46112+ slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
46113+ slot_buf.offset &= PAGE_SIZE - 1;
46114+ io_tlb_orig_addr[index+i] = slot_buf;
46115+ slot_buf.offset += 1 << IO_TLB_SHIFT;
46116+ }
46117+ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
46118+ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
46119+
46120+ return dma_addr;
46121+}
46122+
46123+static struct phys_addr dma_addr_to_phys_addr(char *dma_addr)
46124+{
46125+ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
46126+ struct phys_addr buffer = io_tlb_orig_addr[index];
46127+ buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
46128+ buffer.page += buffer.offset >> PAGE_SHIFT;
46129+ buffer.offset &= PAGE_SIZE - 1;
46130+ return buffer;
46131+}
46132+
46133+/*
46134+ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
46135+ */
46136+static void
46137+unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
46138+{
46139+ unsigned long flags;
46140+ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
46141+ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
46142+ struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
46143+
46144+ /*
46145+ * First, sync the memory before unmapping the entry
46146+ */
46147+ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
46148+ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
46149+
46150+ /*
46151+ * Return the buffer to the free list by setting the corresponding
46152+ * entries to indicate the number of contigous entries available.
46153+ * While returning the entries to the free list, we merge the entries
46154+ * with slots below and above the pool being returned.
46155+ */
46156+ spin_lock_irqsave(&io_tlb_lock, flags);
46157+ {
46158+ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
46159+ io_tlb_list[index + nslots] : 0);
46160+ /*
46161+ * Step 1: return the slots to the free list, merging the
46162+ * slots with superceeding slots
46163+ */
46164+ for (i = index + nslots - 1; i >= index; i--)
46165+ io_tlb_list[i] = ++count;
46166+ /*
46167+ * Step 2: merge the returned slots with the preceding slots,
46168+ * if available (non zero)
46169+ */
46170+ for (i = index - 1;
46171+ (OFFSET(i, IO_TLB_SEGSIZE) !=
46172+ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
46173+ i--)
46174+ io_tlb_list[i] = ++count;
46175+ }
46176+ spin_unlock_irqrestore(&io_tlb_lock, flags);
46177+}
46178+
46179+static void
46180+sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
46181+{
46182+ struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
46183+ BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
46184+ __sync_single(buffer, dma_addr, size, dir);
46185+}
46186+
46187+static void
46188+swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
46189+{
46190+ /*
46191+ * Ran out of IOMMU space for this operation. This is very bad.
46192+ * Unfortunately the drivers cannot handle this operation properly.
46193+ * unless they check for pci_dma_mapping_error (most don't)
46194+ * When the mapping is small enough return a static buffer to limit
46195+ * the damage, or panic when the transfer is too big.
46196+ */
46197+ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
46198+ "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
46199+
46200+ if (size > io_tlb_overflow && do_panic) {
46201+ if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
46202+ panic("PCI-DMA: Memory would be corrupted\n");
46203+ if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
46204+ panic("PCI-DMA: Random memory would be DMAed\n");
46205+ }
46206+}
46207+
46208+/*
46209+ * Map a single buffer of the indicated size for DMA in streaming mode. The
46210+ * PCI address to use is returned.
46211+ *
46212+ * Once the device is given the dma address, the device owns this memory until
46213+ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
46214+ */
46215+dma_addr_t
46216+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
46217+{
46218+ dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) +
46219+ offset_in_page(ptr);
46220+ void *map;
46221+ struct phys_addr buffer;
46222+
46223+ BUG_ON(dir == DMA_NONE);
46224+
46225+ /*
46226+ * If the pointer passed in happens to be in the device's DMA window,
46227+ * we can safely return the device addr and not worry about bounce
46228+ * buffering it.
46229+ */
46230+ if (!range_straddles_page_boundary(__pa(ptr), size) &&
46231+ !address_needs_mapping(hwdev, dev_addr))
46232+ return dev_addr;
46233+
46234+ /*
46235+ * Oh well, have to allocate and map a bounce buffer.
46236+ */
46237+ gnttab_dma_unmap_page(dev_addr);
46238+ buffer.page = virt_to_page(ptr);
46239+ buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
46240+ map = map_single(hwdev, buffer, size, dir);
46241+ if (!map) {
46242+ swiotlb_full(hwdev, size, dir, 1);
46243+ map = io_tlb_overflow_buffer;
46244+ }
46245+
46246+ dev_addr = virt_to_bus(map);
46247+ return dev_addr;
46248+}
46249+
46250+/*
46251+ * Unmap a single streaming mode DMA translation. The dma_addr and size must
46252+ * match what was provided for in a previous swiotlb_map_single call. All
46253+ * other usages are undefined.
46254+ *
46255+ * After this call, reads by the cpu to the buffer are guaranteed to see
46256+ * whatever the device wrote there.
46257+ */
46258+void
46259+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
46260+ int dir)
46261+{
46262+ BUG_ON(dir == DMA_NONE);
46263+ if (in_swiotlb_aperture(dev_addr))
46264+ unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
46265+ else
46266+ gnttab_dma_unmap_page(dev_addr);
46267+}
46268+
46269+/*
46270+ * Make physical memory consistent for a single streaming mode DMA translation
46271+ * after a transfer.
46272+ *
46273+ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
46274+ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
46275+ * call this function before doing so. At the next point you give the PCI dma
46276+ * address back to the card, you must first perform a
46277+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
46278+ */
46279+void
46280+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
46281+ size_t size, int dir)
46282+{
46283+ BUG_ON(dir == DMA_NONE);
46284+ if (in_swiotlb_aperture(dev_addr))
46285+ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
46286+}
46287+
46288+void
46289+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
46290+ size_t size, int dir)
46291+{
46292+ BUG_ON(dir == DMA_NONE);
46293+ if (in_swiotlb_aperture(dev_addr))
46294+ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
46295+}
46296+
46297+/*
46298+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
46299+ * This is the scatter-gather version of the above swiotlb_map_single
46300+ * interface. Here the scatter gather list elements are each tagged with the
46301+ * appropriate dma address and length. They are obtained via
46302+ * sg_dma_{address,length}(SG).
46303+ *
46304+ * NOTE: An implementation may be able to use a smaller number of
46305+ * DMA address/length pairs than there are SG table elements.
46306+ * (for example via virtual mapping capabilities)
46307+ * The routine returns the number of addr/length pairs actually
46308+ * used, at most nents.
46309+ *
46310+ * Device ownership issues as mentioned above for swiotlb_map_single are the
46311+ * same here.
46312+ */
46313+int
46314+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
46315+ int dir)
46316+{
46317+ struct phys_addr buffer;
46318+ dma_addr_t dev_addr;
46319+ char *map;
46320+ int i;
46321+
46322+ BUG_ON(dir == DMA_NONE);
46323+
46324+ for (i = 0; i < nelems; i++, sg++) {
46325+ dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
46326+
46327+ if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
46328+ + sg->offset, sg->length)
46329+ || address_needs_mapping(hwdev, dev_addr)) {
46330+ gnttab_dma_unmap_page(dev_addr);
46331+ buffer.page = sg->page;
46332+ buffer.offset = sg->offset;
46333+ map = map_single(hwdev, buffer, sg->length, dir);
46334+ if (!map) {
46335+ /* Don't panic here, we expect map_sg users
46336+ to do proper error handling. */
46337+ swiotlb_full(hwdev, sg->length, dir, 0);
46338+ swiotlb_unmap_sg(hwdev, sg - i, i, dir);
46339+ sg[0].dma_length = 0;
46340+ return 0;
46341+ }
46342+ sg->dma_address = (dma_addr_t)virt_to_bus(map);
46343+ } else
46344+ sg->dma_address = dev_addr;
46345+ sg->dma_length = sg->length;
46346+ }
46347+ return nelems;
46348+}
46349+
46350+/*
46351+ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
46352+ * concerning calls here are the same as for swiotlb_unmap_single() above.
46353+ */
46354+void
46355+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
46356+ int dir)
46357+{
46358+ int i;
46359+
46360+ BUG_ON(dir == DMA_NONE);
46361+
46362+ for (i = 0; i < nelems; i++, sg++)
46363+ if (in_swiotlb_aperture(sg->dma_address))
46364+ unmap_single(hwdev,
46365+ (void *)bus_to_virt(sg->dma_address),
46366+ sg->dma_length, dir);
46367+ else
46368+ gnttab_dma_unmap_page(sg->dma_address);
46369+}
46370+
46371+/*
46372+ * Make physical memory consistent for a set of streaming mode DMA translations
46373+ * after a transfer.
46374+ *
46375+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
46376+ * and usage.
46377+ */
46378+void
46379+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
46380+ int nelems, int dir)
46381+{
46382+ int i;
46383+
46384+ BUG_ON(dir == DMA_NONE);
46385+
46386+ for (i = 0; i < nelems; i++, sg++)
46387+ if (in_swiotlb_aperture(sg->dma_address))
46388+ sync_single(hwdev,
46389+ (void *)bus_to_virt(sg->dma_address),
46390+ sg->dma_length, dir);
46391+}
46392+
46393+void
46394+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
46395+ int nelems, int dir)
46396+{
46397+ int i;
46398+
46399+ BUG_ON(dir == DMA_NONE);
46400+
46401+ for (i = 0; i < nelems; i++, sg++)
46402+ if (in_swiotlb_aperture(sg->dma_address))
46403+ sync_single(hwdev,
46404+ (void *)bus_to_virt(sg->dma_address),
46405+ sg->dma_length, dir);
46406+}
46407+
46408+#ifdef CONFIG_HIGHMEM
46409+
46410+dma_addr_t
46411+swiotlb_map_page(struct device *hwdev, struct page *page,
46412+ unsigned long offset, size_t size,
46413+ enum dma_data_direction direction)
46414+{
46415+ struct phys_addr buffer;
46416+ dma_addr_t dev_addr;
46417+ char *map;
46418+
46419+ dev_addr = gnttab_dma_map_page(page) + offset;
46420+ if (address_needs_mapping(hwdev, dev_addr)) {
46421+ gnttab_dma_unmap_page(dev_addr);
46422+ buffer.page = page;
46423+ buffer.offset = offset;
46424+ map = map_single(hwdev, buffer, size, direction);
46425+ if (!map) {
46426+ swiotlb_full(hwdev, size, direction, 1);
46427+ map = io_tlb_overflow_buffer;
46428+ }
46429+ dev_addr = (dma_addr_t)virt_to_bus(map);
46430+ }
46431+
46432+ return dev_addr;
46433+}
46434+
46435+void
46436+swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
46437+ size_t size, enum dma_data_direction direction)
46438+{
46439+ BUG_ON(direction == DMA_NONE);
46440+ if (in_swiotlb_aperture(dma_address))
46441+ unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
46442+ else
46443+ gnttab_dma_unmap_page(dma_address);
46444+}
46445+
46446+#endif
46447+
46448+int
46449+swiotlb_dma_mapping_error(dma_addr_t dma_addr)
46450+{
46451+ return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
46452+}
46453+
46454+/*
46455+ * Return whether the given PCI device DMA address mask can be supported
46456+ * properly. For example, if your device can only drive the low 24-bits
46457+ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
46458+ * this function.
46459+ */
46460+int
46461+swiotlb_dma_supported (struct device *hwdev, u64 mask)
46462+{
46463+ return (mask >= ((1UL << dma_bits) - 1));
46464+}
46465+
46466+EXPORT_SYMBOL(swiotlb_init);
46467+EXPORT_SYMBOL(swiotlb_map_single);
46468+EXPORT_SYMBOL(swiotlb_unmap_single);
46469+EXPORT_SYMBOL(swiotlb_map_sg);
46470+EXPORT_SYMBOL(swiotlb_unmap_sg);
46471+EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
46472+EXPORT_SYMBOL(swiotlb_sync_single_for_device);
46473+EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
46474+EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
46475+EXPORT_SYMBOL(swiotlb_dma_mapping_error);
46476+EXPORT_SYMBOL(swiotlb_dma_supported);
46477Index: head-2008-11-25/scripts/Makefile.xen.awk
46478===================================================================
46479--- /dev/null 1970-01-01 00:00:00.000000000 +0000
46480+++ head-2008-11-25/scripts/Makefile.xen.awk 2007-08-06 15:10:49.000000000 +0200
46481@@ -0,0 +1,34 @@
46482+BEGIN {
46483+ is_rule = 0
46484+}
46485+
46486+/^[[:space:]]*#/ {
46487+ next
46488+}
46489+
46490+/^[[:space:]]*$/ {
46491+ if (is_rule)
46492+ print("")
46493+ is_rule = 0
46494+ next
46495+}
46496+
46497+/:[[:space:]]*%\.[cS][[:space:]]/ {
46498+ line = gensub(/%.([cS])/, "%-xen.\\1", "g", $0)
46499+ line = gensub(/(single-used-m)/, "xen-\\1", "g", line)
46500+ print line
46501+ is_rule = 1
46502+ next
46503+}
46504+
46505+/^[^\t]$/ {
46506+ if (is_rule)
46507+ print("")
46508+ is_rule = 0
46509+ next
46510+}
46511+
46512+is_rule {
46513+ print $0
46514+ next
46515+}