2 * Core of Xen paravirt_ops implementation.
4 * This file contains the xen_paravirt_ops structure itself, and the
6 * - privileged instructions
11 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
14 #include <linux/cpu.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/smp.h>
18 #include <linux/preempt.h>
19 #include <linux/hardirq.h>
20 #include <linux/percpu.h>
21 #include <linux/delay.h>
22 #include <linux/start_kernel.h>
23 #include <linux/sched.h>
24 #include <linux/kprobes.h>
25 #include <linux/bootmem.h>
26 #include <linux/export.h>
28 #include <linux/page-flags.h>
29 #include <linux/highmem.h>
30 #include <linux/console.h>
31 #include <linux/pci.h>
32 #include <linux/gfp.h>
33 #include <linux/memblock.h>
34 #include <linux/edd.h>
35 #include <linux/frame.h>
38 #include <xen/events.h>
39 #include <xen/interface/xen.h>
40 #include <xen/interface/version.h>
41 #include <xen/interface/physdev.h>
42 #include <xen/interface/vcpu.h>
43 #include <xen/interface/memory.h>
44 #include <xen/interface/nmi.h>
45 #include <xen/interface/xen-mca.h>
46 #include <xen/features.h>
48 #include <xen/hvc-console.h>
51 #include <asm/paravirt.h>
54 #include <asm/xen/pci.h>
55 #include <asm/xen/hypercall.h>
56 #include <asm/xen/hypervisor.h>
57 #include <asm/xen/cpuid.h>
58 #include <asm/fixmap.h>
59 #include <asm/processor.h>
60 #include <asm/proto.h>
61 #include <asm/msr-index.h>
62 #include <asm/traps.h>
63 #include <asm/setup.h>
65 #include <asm/pgalloc.h>
66 #include <asm/pgtable.h>
67 #include <asm/tlbflush.h>
68 #include <asm/reboot.h>
69 #include <asm/stackprotector.h>
70 #include <asm/hypervisor.h>
71 #include <asm/mach_traps.h>
72 #include <asm/mwait.h>
73 #include <asm/pci_x86.h>
77 #include <linux/acpi.h>
79 #include <acpi/pdc_intel.h>
80 #include <acpi/processor.h>
81 #include <xen/interface/platform.h>
87 #include "multicalls.h"
90 void *xen_initial_gdt
;
92 RESERVE_BRK(shared_info_page_brk
, PAGE_SIZE
);
94 static int xen_cpu_up_prepare_pv(unsigned int cpu
);
95 static int xen_cpu_dead_pv(unsigned int cpu
);
98 struct desc_struct desc
[3];
102 * Updating the 3 TLS descriptors in the GDT on every task switch is
103 * surprisingly expensive so we avoid updating them if they haven't
104 * changed. Since Xen writes different descriptors than the one
105 * passed in the update_descriptor hypercall we keep shadow copies to
108 static DEFINE_PER_CPU(struct tls_descs
, shadow_tls_desc
);
111 * On restore, set the vcpu placement up again.
112 * If it fails, then we're in a bad state, since
113 * we can't back out from using it...
115 void xen_vcpu_restore(void)
119 for_each_possible_cpu(cpu
) {
120 bool other_cpu
= (cpu
!= smp_processor_id());
121 bool is_up
= HYPERVISOR_vcpu_op(VCPUOP_is_up
, xen_vcpu_nr(cpu
),
124 if (other_cpu
&& is_up
&&
125 HYPERVISOR_vcpu_op(VCPUOP_down
, xen_vcpu_nr(cpu
), NULL
))
128 xen_setup_runstate_info(cpu
);
130 if (xen_have_vcpu_info_placement
)
133 if (other_cpu
&& is_up
&&
134 HYPERVISOR_vcpu_op(VCPUOP_up
, xen_vcpu_nr(cpu
), NULL
))
139 static void __init
xen_banner(void)
141 unsigned version
= HYPERVISOR_xen_version(XENVER_version
, NULL
);
142 struct xen_extraversion extra
;
143 HYPERVISOR_xen_version(XENVER_extraversion
, &extra
);
145 pr_info("Booting paravirtualized kernel %son %s\n",
146 xen_feature(XENFEAT_auto_translated_physmap
) ?
147 "with PVH extensions " : "", pv_info
.name
);
148 printk(KERN_INFO
"Xen version: %d.%d%s%s\n",
149 version
>> 16, version
& 0xffff, extra
.extraversion
,
150 xen_feature(XENFEAT_mmu_pt_update_preserve_ad
) ? " (preserve-AD)" : "");
152 /* Check if running on Xen version (major, minor) or later */
154 xen_running_on_version_or_later(unsigned int major
, unsigned int minor
)
156 unsigned int version
;
161 version
= HYPERVISOR_xen_version(XENVER_version
, NULL
);
162 if ((((version
>> 16) == major
) && ((version
& 0xffff) >= minor
)) ||
163 ((version
>> 16) > major
))
168 #define CPUID_THERM_POWER_LEAF 6
169 #define APERFMPERF_PRESENT 0
171 static __read_mostly
unsigned int cpuid_leaf1_edx_mask
= ~0;
172 static __read_mostly
unsigned int cpuid_leaf1_ecx_mask
= ~0;
174 static __read_mostly
unsigned int cpuid_leaf1_ecx_set_mask
;
175 static __read_mostly
unsigned int cpuid_leaf5_ecx_val
;
176 static __read_mostly
unsigned int cpuid_leaf5_edx_val
;
178 static void xen_cpuid(unsigned int *ax
, unsigned int *bx
,
179 unsigned int *cx
, unsigned int *dx
)
181 unsigned maskebx
= ~0;
182 unsigned maskecx
= ~0;
183 unsigned maskedx
= ~0;
186 * Mask out inconvenient features, to try and disable as many
187 * unsupported kernel subsystems as possible.
191 maskecx
= cpuid_leaf1_ecx_mask
;
192 setecx
= cpuid_leaf1_ecx_set_mask
;
193 maskedx
= cpuid_leaf1_edx_mask
;
196 case CPUID_MWAIT_LEAF
:
197 /* Synthesize the values.. */
200 *cx
= cpuid_leaf5_ecx_val
;
201 *dx
= cpuid_leaf5_edx_val
;
204 case CPUID_THERM_POWER_LEAF
:
205 /* Disabling APERFMPERF for kernel usage */
206 maskecx
= ~(1 << APERFMPERF_PRESENT
);
210 /* Suppress extended topology stuff */
215 asm(XEN_EMULATE_PREFIX
"cpuid"
220 : "0" (*ax
), "2" (*cx
));
227 STACK_FRAME_NON_STANDARD(xen_cpuid
); /* XEN_EMULATE_PREFIX */
229 static bool __init
xen_check_mwait(void)
232 struct xen_platform_op op
= {
233 .cmd
= XENPF_set_processor_pminfo
,
234 .u
.set_pminfo
.id
= -1,
235 .u
.set_pminfo
.type
= XEN_PM_PDC
,
238 unsigned int ax
, bx
, cx
, dx
;
239 unsigned int mwait_mask
;
241 /* We need to determine whether it is OK to expose the MWAIT
242 * capability to the kernel to harvest deeper than C3 states from ACPI
243 * _CST using the processor_harvest_xen.c module. For this to work, we
244 * need to gather the MWAIT_LEAF values (which the cstate.c code
245 * checks against). The hypervisor won't expose the MWAIT flag because
246 * it would break backwards compatibility; so we will find out directly
247 * from the hardware and hypercall.
249 if (!xen_initial_domain())
253 * When running under platform earlier than Xen4.2, do not expose
254 * mwait, to avoid the risk of loading native acpi pad driver
256 if (!xen_running_on_version_or_later(4, 2))
262 native_cpuid(&ax
, &bx
, &cx
, &dx
);
264 mwait_mask
= (1 << (X86_FEATURE_EST
% 32)) |
265 (1 << (X86_FEATURE_MWAIT
% 32));
267 if ((cx
& mwait_mask
) != mwait_mask
)
270 /* We need to emulate the MWAIT_LEAF and for that we need both
271 * ecx and edx. The hypercall provides only partial information.
274 ax
= CPUID_MWAIT_LEAF
;
279 native_cpuid(&ax
, &bx
, &cx
, &dx
);
281 /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so,
282 * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
284 buf
[0] = ACPI_PDC_REVISION_ID
;
286 buf
[2] = (ACPI_PDC_C_CAPABILITY_SMP
| ACPI_PDC_EST_CAPABILITY_SWSMP
);
288 set_xen_guest_handle(op
.u
.set_pminfo
.pdc
, buf
);
290 if ((HYPERVISOR_platform_op(&op
) == 0) &&
291 (buf
[2] & (ACPI_PDC_C_C1_FFH
| ACPI_PDC_C_C2C3_FFH
))) {
292 cpuid_leaf5_ecx_val
= cx
;
293 cpuid_leaf5_edx_val
= dx
;
300 static void __init
xen_init_cpuid_mask(void)
302 unsigned int ax
, bx
, cx
, dx
;
303 unsigned int xsave_mask
;
305 cpuid_leaf1_edx_mask
=
306 ~((1 << X86_FEATURE_MTRR
) | /* disable MTRR */
307 (1 << X86_FEATURE_ACC
)); /* thermal monitoring */
309 if (!xen_initial_domain())
310 cpuid_leaf1_edx_mask
&=
311 ~((1 << X86_FEATURE_ACPI
)); /* disable ACPI */
313 cpuid_leaf1_ecx_mask
&= ~(1 << (X86_FEATURE_X2APIC
% 32));
317 cpuid(1, &ax
, &bx
, &cx
, &dx
);
320 (1 << (X86_FEATURE_XSAVE
% 32)) |
321 (1 << (X86_FEATURE_OSXSAVE
% 32));
323 /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
324 if ((cx
& xsave_mask
) != xsave_mask
)
325 cpuid_leaf1_ecx_mask
&= ~xsave_mask
; /* disable XSAVE & OSXSAVE */
326 if (xen_check_mwait())
327 cpuid_leaf1_ecx_set_mask
= (1 << (X86_FEATURE_MWAIT
% 32));
330 static void xen_set_debugreg(int reg
, unsigned long val
)
332 HYPERVISOR_set_debugreg(reg
, val
);
335 static unsigned long xen_get_debugreg(int reg
)
337 return HYPERVISOR_get_debugreg(reg
);
340 static void xen_end_context_switch(struct task_struct
*next
)
343 paravirt_end_context_switch(next
);
346 static unsigned long xen_store_tr(void)
352 * Set the page permissions for a particular virtual address. If the
353 * address is a vmalloc mapping (or other non-linear mapping), then
354 * find the linear mapping of the page and also set its protections to
357 static void set_aliased_prot(void *v
, pgprot_t prot
)
366 ptep
= lookup_address((unsigned long)v
, &level
);
367 BUG_ON(ptep
== NULL
);
369 pfn
= pte_pfn(*ptep
);
370 page
= pfn_to_page(pfn
);
372 pte
= pfn_pte(pfn
, prot
);
375 * Careful: update_va_mapping() will fail if the virtual address
376 * we're poking isn't populated in the page tables. We don't
377 * need to worry about the direct map (that's always in the page
378 * tables), but we need to be careful about vmap space. In
379 * particular, the top level page table can lazily propagate
380 * entries between processes, so if we've switched mms since we
381 * vmapped the target in the first place, we might not have the
382 * top-level page table entry populated.
384 * We disable preemption because we want the same mm active when
385 * we probe the target and when we issue the hypercall. We'll
386 * have the same nominal mm, but if we're a kernel thread, lazy
387 * mm dropping could change our pgd.
389 * Out of an abundance of caution, this uses __get_user() to fault
390 * in the target address just in case there's some obscure case
391 * in which the target address isn't readable.
396 probe_kernel_read(&dummy
, v
, 1);
398 if (HYPERVISOR_update_va_mapping((unsigned long)v
, pte
, 0))
401 if (!PageHighMem(page
)) {
402 void *av
= __va(PFN_PHYS(pfn
));
405 if (HYPERVISOR_update_va_mapping((unsigned long)av
, pte
, 0))
413 static void xen_alloc_ldt(struct desc_struct
*ldt
, unsigned entries
)
415 const unsigned entries_per_page
= PAGE_SIZE
/ LDT_ENTRY_SIZE
;
419 * We need to mark the all aliases of the LDT pages RO. We
420 * don't need to call vm_flush_aliases(), though, since that's
421 * only responsible for flushing aliases out the TLBs, not the
422 * page tables, and Xen will flush the TLB for us if needed.
424 * To avoid confusing future readers: none of this is necessary
425 * to load the LDT. The hypervisor only checks this when the
426 * LDT is faulted in due to subsequent descriptor access.
429 for (i
= 0; i
< entries
; i
+= entries_per_page
)
430 set_aliased_prot(ldt
+ i
, PAGE_KERNEL_RO
);
433 static void xen_free_ldt(struct desc_struct
*ldt
, unsigned entries
)
435 const unsigned entries_per_page
= PAGE_SIZE
/ LDT_ENTRY_SIZE
;
438 for (i
= 0; i
< entries
; i
+= entries_per_page
)
439 set_aliased_prot(ldt
+ i
, PAGE_KERNEL
);
442 static void xen_set_ldt(const void *addr
, unsigned entries
)
444 struct mmuext_op
*op
;
445 struct multicall_space mcs
= xen_mc_entry(sizeof(*op
));
447 trace_xen_cpu_set_ldt(addr
, entries
);
450 op
->cmd
= MMUEXT_SET_LDT
;
451 op
->arg1
.linear_addr
= (unsigned long)addr
;
452 op
->arg2
.nr_ents
= entries
;
454 MULTI_mmuext_op(mcs
.mc
, op
, 1, NULL
, DOMID_SELF
);
456 xen_mc_issue(PARAVIRT_LAZY_CPU
);
459 static void xen_load_gdt(const struct desc_ptr
*dtr
)
461 unsigned long va
= dtr
->address
;
462 unsigned int size
= dtr
->size
+ 1;
463 unsigned pages
= DIV_ROUND_UP(size
, PAGE_SIZE
);
464 unsigned long frames
[pages
];
468 * A GDT can be up to 64k in size, which corresponds to 8192
469 * 8-byte entries, or 16 4k pages..
472 BUG_ON(size
> 65536);
473 BUG_ON(va
& ~PAGE_MASK
);
475 for (f
= 0; va
< dtr
->address
+ size
; va
+= PAGE_SIZE
, f
++) {
478 unsigned long pfn
, mfn
;
482 * The GDT is per-cpu and is in the percpu data area.
483 * That can be virtually mapped, so we need to do a
484 * page-walk to get the underlying MFN for the
485 * hypercall. The page can also be in the kernel's
486 * linear range, so we need to RO that mapping too.
488 ptep
= lookup_address(va
, &level
);
489 BUG_ON(ptep
== NULL
);
491 pfn
= pte_pfn(*ptep
);
492 mfn
= pfn_to_mfn(pfn
);
493 virt
= __va(PFN_PHYS(pfn
));
497 make_lowmem_page_readonly((void *)va
);
498 make_lowmem_page_readonly(virt
);
501 if (HYPERVISOR_set_gdt(frames
, size
/ sizeof(struct desc_struct
)))
506 * load_gdt for early boot, when the gdt is only mapped once
508 static void __init
xen_load_gdt_boot(const struct desc_ptr
*dtr
)
510 unsigned long va
= dtr
->address
;
511 unsigned int size
= dtr
->size
+ 1;
512 unsigned pages
= DIV_ROUND_UP(size
, PAGE_SIZE
);
513 unsigned long frames
[pages
];
517 * A GDT can be up to 64k in size, which corresponds to 8192
518 * 8-byte entries, or 16 4k pages..
521 BUG_ON(size
> 65536);
522 BUG_ON(va
& ~PAGE_MASK
);
524 for (f
= 0; va
< dtr
->address
+ size
; va
+= PAGE_SIZE
, f
++) {
526 unsigned long pfn
, mfn
;
528 pfn
= virt_to_pfn(va
);
529 mfn
= pfn_to_mfn(pfn
);
531 pte
= pfn_pte(pfn
, PAGE_KERNEL_RO
);
533 if (HYPERVISOR_update_va_mapping((unsigned long)va
, pte
, 0))
539 if (HYPERVISOR_set_gdt(frames
, size
/ sizeof(struct desc_struct
)))
543 static inline bool desc_equal(const struct desc_struct
*d1
,
544 const struct desc_struct
*d2
)
546 return d1
->a
== d2
->a
&& d1
->b
== d2
->b
;
549 static void load_TLS_descriptor(struct thread_struct
*t
,
550 unsigned int cpu
, unsigned int i
)
552 struct desc_struct
*shadow
= &per_cpu(shadow_tls_desc
, cpu
).desc
[i
];
553 struct desc_struct
*gdt
;
555 struct multicall_space mc
;
557 if (desc_equal(shadow
, &t
->tls_array
[i
]))
560 *shadow
= t
->tls_array
[i
];
562 gdt
= get_cpu_gdt_rw(cpu
);
563 maddr
= arbitrary_virt_to_machine(&gdt
[GDT_ENTRY_TLS_MIN
+i
]);
564 mc
= __xen_mc_entry(0);
566 MULTI_update_descriptor(mc
.mc
, maddr
.maddr
, t
->tls_array
[i
]);
569 static void xen_load_tls(struct thread_struct
*t
, unsigned int cpu
)
572 * XXX sleazy hack: If we're being called in a lazy-cpu zone
573 * and lazy gs handling is enabled, it means we're in a
574 * context switch, and %gs has just been saved. This means we
575 * can zero it out to prevent faults on exit from the
576 * hypervisor if the next process has no %gs. Either way, it
577 * has been saved, and the new value will get loaded properly.
578 * This will go away as soon as Xen has been modified to not
579 * save/restore %gs for normal hypercalls.
581 * On x86_64, this hack is not used for %gs, because gs points
582 * to KERNEL_GS_BASE (and uses it for PDA references), so we
583 * must not zero %gs on x86_64
585 * For x86_64, we need to zero %fs, otherwise we may get an
586 * exception between the new %fs descriptor being loaded and
587 * %fs being effectively cleared at __switch_to().
589 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU
) {
599 load_TLS_descriptor(t
, cpu
, 0);
600 load_TLS_descriptor(t
, cpu
, 1);
601 load_TLS_descriptor(t
, cpu
, 2);
603 xen_mc_issue(PARAVIRT_LAZY_CPU
);
607 static void xen_load_gs_index(unsigned int idx
)
609 if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL
, idx
))
614 static void xen_write_ldt_entry(struct desc_struct
*dt
, int entrynum
,
617 xmaddr_t mach_lp
= arbitrary_virt_to_machine(&dt
[entrynum
]);
618 u64 entry
= *(u64
*)ptr
;
620 trace_xen_cpu_write_ldt_entry(dt
, entrynum
, entry
);
625 if (HYPERVISOR_update_descriptor(mach_lp
.maddr
, entry
))
631 static int cvt_gate_to_trap(int vector
, const gate_desc
*val
,
632 struct trap_info
*info
)
636 if (val
->type
!= GATE_TRAP
&& val
->type
!= GATE_INTERRUPT
)
639 info
->vector
= vector
;
641 addr
= gate_offset(*val
);
644 * Look for known traps using IST, and substitute them
645 * appropriately. The debugger ones are the only ones we care
646 * about. Xen will handle faults like double_fault,
647 * so we should never see them. Warn if
648 * there's an unexpected IST-using fault handler.
650 if (addr
== (unsigned long)debug
)
651 addr
= (unsigned long)xen_debug
;
652 else if (addr
== (unsigned long)int3
)
653 addr
= (unsigned long)xen_int3
;
654 else if (addr
== (unsigned long)stack_segment
)
655 addr
= (unsigned long)xen_stack_segment
;
656 else if (addr
== (unsigned long)double_fault
) {
657 /* Don't need to handle these */
659 #ifdef CONFIG_X86_MCE
660 } else if (addr
== (unsigned long)machine_check
) {
662 * when xen hypervisor inject vMCE to guest,
663 * use native mce handler to handle it
667 } else if (addr
== (unsigned long)nmi
)
669 * Use the native version as well.
673 /* Some other trap using IST? */
674 if (WARN_ON(val
->ist
!= 0))
677 #endif /* CONFIG_X86_64 */
678 info
->address
= addr
;
680 info
->cs
= gate_segment(*val
);
681 info
->flags
= val
->dpl
;
682 /* interrupt gates clear IF */
683 if (val
->type
== GATE_INTERRUPT
)
684 info
->flags
|= 1 << 2;
689 /* Locations of each CPU's IDT */
690 static DEFINE_PER_CPU(struct desc_ptr
, idt_desc
);
692 /* Set an IDT entry. If the entry is part of the current IDT, then
694 static void xen_write_idt_entry(gate_desc
*dt
, int entrynum
, const gate_desc
*g
)
696 unsigned long p
= (unsigned long)&dt
[entrynum
];
697 unsigned long start
, end
;
699 trace_xen_cpu_write_idt_entry(dt
, entrynum
, g
);
703 start
= __this_cpu_read(idt_desc
.address
);
704 end
= start
+ __this_cpu_read(idt_desc
.size
) + 1;
708 native_write_idt_entry(dt
, entrynum
, g
);
710 if (p
>= start
&& (p
+ 8) <= end
) {
711 struct trap_info info
[2];
715 if (cvt_gate_to_trap(entrynum
, g
, &info
[0]))
716 if (HYPERVISOR_set_trap_table(info
))
723 static void xen_convert_trap_info(const struct desc_ptr
*desc
,
724 struct trap_info
*traps
)
726 unsigned in
, out
, count
;
728 count
= (desc
->size
+1) / sizeof(gate_desc
);
731 for (in
= out
= 0; in
< count
; in
++) {
732 gate_desc
*entry
= (gate_desc
*)(desc
->address
) + in
;
734 if (cvt_gate_to_trap(in
, entry
, &traps
[out
]))
737 traps
[out
].address
= 0;
740 void xen_copy_trap_info(struct trap_info
*traps
)
742 const struct desc_ptr
*desc
= this_cpu_ptr(&idt_desc
);
744 xen_convert_trap_info(desc
, traps
);
747 /* Load a new IDT into Xen. In principle this can be per-CPU, so we
748 hold a spinlock to protect the static traps[] array (static because
749 it avoids allocation, and saves stack space). */
750 static void xen_load_idt(const struct desc_ptr
*desc
)
752 static DEFINE_SPINLOCK(lock
);
753 static struct trap_info traps
[257];
755 trace_xen_cpu_load_idt(desc
);
759 memcpy(this_cpu_ptr(&idt_desc
), desc
, sizeof(idt_desc
));
761 xen_convert_trap_info(desc
, traps
);
764 if (HYPERVISOR_set_trap_table(traps
))
770 /* Write a GDT descriptor entry. Ignore LDT descriptors, since
771 they're handled differently. */
772 static void xen_write_gdt_entry(struct desc_struct
*dt
, int entry
,
773 const void *desc
, int type
)
775 trace_xen_cpu_write_gdt_entry(dt
, entry
, desc
, type
);
786 xmaddr_t maddr
= arbitrary_virt_to_machine(&dt
[entry
]);
789 if (HYPERVISOR_update_descriptor(maddr
.maddr
, *(u64
*)desc
))
799 * Version of write_gdt_entry for use at early boot-time needed to
800 * update an entry as simply as possible.
802 static void __init
xen_write_gdt_entry_boot(struct desc_struct
*dt
, int entry
,
803 const void *desc
, int type
)
805 trace_xen_cpu_write_gdt_entry(dt
, entry
, desc
, type
);
814 xmaddr_t maddr
= virt_to_machine(&dt
[entry
]);
816 if (HYPERVISOR_update_descriptor(maddr
.maddr
, *(u64
*)desc
))
817 dt
[entry
] = *(struct desc_struct
*)desc
;
823 static void xen_load_sp0(struct tss_struct
*tss
,
824 struct thread_struct
*thread
)
826 struct multicall_space mcs
;
828 mcs
= xen_mc_entry(0);
829 MULTI_stack_switch(mcs
.mc
, __KERNEL_DS
, thread
->sp0
);
830 xen_mc_issue(PARAVIRT_LAZY_CPU
);
831 tss
->x86_tss
.sp0
= thread
->sp0
;
834 void xen_set_iopl_mask(unsigned mask
)
836 struct physdev_set_iopl set_iopl
;
838 /* Force the change at ring 0. */
839 set_iopl
.iopl
= (mask
== 0) ? 1 : (mask
>> 12) & 3;
840 HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl
, &set_iopl
);
843 static void xen_io_delay(void)
847 static DEFINE_PER_CPU(unsigned long, xen_cr0_value
);
849 static unsigned long xen_read_cr0(void)
851 unsigned long cr0
= this_cpu_read(xen_cr0_value
);
853 if (unlikely(cr0
== 0)) {
854 cr0
= native_read_cr0();
855 this_cpu_write(xen_cr0_value
, cr0
);
861 static void xen_write_cr0(unsigned long cr0
)
863 struct multicall_space mcs
;
865 this_cpu_write(xen_cr0_value
, cr0
);
867 /* Only pay attention to cr0.TS; everything else is
869 mcs
= xen_mc_entry(0);
871 MULTI_fpu_taskswitch(mcs
.mc
, (cr0
& X86_CR0_TS
) != 0);
873 xen_mc_issue(PARAVIRT_LAZY_CPU
);
876 static void xen_write_cr4(unsigned long cr4
)
878 cr4
&= ~(X86_CR4_PGE
| X86_CR4_PSE
| X86_CR4_PCE
);
880 native_write_cr4(cr4
);
883 static inline unsigned long xen_read_cr8(void)
887 static inline void xen_write_cr8(unsigned long val
)
893 static u64
xen_read_msr_safe(unsigned int msr
, int *err
)
897 if (pmu_msr_read(msr
, &val
, err
))
900 val
= native_read_msr_safe(msr
, err
);
902 case MSR_IA32_APICBASE
:
903 #ifdef CONFIG_X86_X2APIC
904 if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC
& 31))))
906 val
&= ~X2APIC_ENABLE
;
912 static int xen_write_msr_safe(unsigned int msr
, unsigned low
, unsigned high
)
923 case MSR_FS_BASE
: which
= SEGBASE_FS
; goto set
;
924 case MSR_KERNEL_GS_BASE
: which
= SEGBASE_GS_USER
; goto set
;
925 case MSR_GS_BASE
: which
= SEGBASE_GS_KERNEL
; goto set
;
928 base
= ((u64
)high
<< 32) | low
;
929 if (HYPERVISOR_set_segment_base(which
, base
) != 0)
937 case MSR_SYSCALL_MASK
:
938 case MSR_IA32_SYSENTER_CS
:
939 case MSR_IA32_SYSENTER_ESP
:
940 case MSR_IA32_SYSENTER_EIP
:
941 /* Fast syscall setup is all done in hypercalls, so
942 these are all ignored. Stub them out here to stop
943 Xen console noise. */
947 if (!pmu_msr_write(msr
, low
, high
, &ret
))
948 ret
= native_write_msr_safe(msr
, low
, high
);
954 static u64
xen_read_msr(unsigned int msr
)
957 * This will silently swallow a #GP from RDMSR. It may be worth
962 return xen_read_msr_safe(msr
, &err
);
965 static void xen_write_msr(unsigned int msr
, unsigned low
, unsigned high
)
968 * This will silently swallow a #GP from WRMSR. It may be worth
971 xen_write_msr_safe(msr
, low
, high
);
974 void xen_setup_shared_info(void)
976 if (!xen_feature(XENFEAT_auto_translated_physmap
)) {
977 set_fixmap(FIX_PARAVIRT_BOOTMAP
,
978 xen_start_info
->shared_info
);
980 HYPERVISOR_shared_info
=
981 (struct shared_info
*)fix_to_virt(FIX_PARAVIRT_BOOTMAP
);
983 HYPERVISOR_shared_info
=
984 (struct shared_info
*)__va(xen_start_info
->shared_info
);
987 /* In UP this is as good a place as any to set up shared info */
988 xen_setup_vcpu_info_placement();
991 xen_setup_mfn_list_list();
994 /* This is called once we have the cpu_possible_mask */
995 void xen_setup_vcpu_info_placement(void)
999 for_each_possible_cpu(cpu
) {
1000 /* Set up direct vCPU id mapping for PV guests. */
1001 per_cpu(xen_vcpu_id
, cpu
) = cpu
;
1002 xen_vcpu_setup(cpu
);
1006 * xen_vcpu_setup managed to place the vcpu_info within the
1007 * percpu area for all cpus, so make use of it.
1009 if (xen_have_vcpu_info_placement
) {
1010 pv_irq_ops
.save_fl
= __PV_IS_CALLEE_SAVE(xen_save_fl_direct
);
1011 pv_irq_ops
.restore_fl
= __PV_IS_CALLEE_SAVE(xen_restore_fl_direct
);
1012 pv_irq_ops
.irq_disable
= __PV_IS_CALLEE_SAVE(xen_irq_disable_direct
);
1013 pv_irq_ops
.irq_enable
= __PV_IS_CALLEE_SAVE(xen_irq_enable_direct
);
1014 pv_mmu_ops
.read_cr2
= xen_read_cr2_direct
;
1018 static unsigned xen_patch(u8 type
, u16 clobbers
, void *insnbuf
,
1019 unsigned long addr
, unsigned len
)
1021 char *start
, *end
, *reloc
;
1024 start
= end
= reloc
= NULL
;
1026 #define SITE(op, x) \
1027 case PARAVIRT_PATCH(op.x): \
1028 if (xen_have_vcpu_info_placement) { \
1029 start = (char *)xen_##x##_direct; \
1030 end = xen_##x##_direct_end; \
1031 reloc = xen_##x##_direct_reloc; \
1036 SITE(pv_irq_ops
, irq_enable
);
1037 SITE(pv_irq_ops
, irq_disable
);
1038 SITE(pv_irq_ops
, save_fl
);
1039 SITE(pv_irq_ops
, restore_fl
);
1043 if (start
== NULL
|| (end
-start
) > len
)
1046 ret
= paravirt_patch_insns(insnbuf
, len
, start
, end
);
1048 /* Note: because reloc is assigned from something that
1049 appears to be an array, gcc assumes it's non-null,
1050 but doesn't know its relationship with start and
1052 if (reloc
> start
&& reloc
< end
) {
1053 int reloc_off
= reloc
- start
;
1054 long *relocp
= (long *)(insnbuf
+ reloc_off
);
1055 long delta
= start
- (char *)addr
;
1063 ret
= paravirt_patch_default(type
, clobbers
, insnbuf
,
1071 static const struct pv_info xen_info __initconst
= {
1072 .shared_kernel_pmd
= 0,
1074 #ifdef CONFIG_X86_64
1075 .extra_user_64bit_cs
= FLAT_USER_CS64
,
1080 static const struct pv_init_ops xen_init_ops __initconst
= {
1084 static const struct pv_cpu_ops xen_cpu_ops __initconst
= {
1087 .set_debugreg
= xen_set_debugreg
,
1088 .get_debugreg
= xen_get_debugreg
,
1090 .read_cr0
= xen_read_cr0
,
1091 .write_cr0
= xen_write_cr0
,
1093 .read_cr4
= native_read_cr4
,
1094 .write_cr4
= xen_write_cr4
,
1096 #ifdef CONFIG_X86_64
1097 .read_cr8
= xen_read_cr8
,
1098 .write_cr8
= xen_write_cr8
,
1101 .wbinvd
= native_wbinvd
,
1103 .read_msr
= xen_read_msr
,
1104 .write_msr
= xen_write_msr
,
1106 .read_msr_safe
= xen_read_msr_safe
,
1107 .write_msr_safe
= xen_write_msr_safe
,
1109 .read_pmc
= xen_read_pmc
,
1112 #ifdef CONFIG_X86_64
1113 .usergs_sysret64
= xen_sysret64
,
1116 .load_tr_desc
= paravirt_nop
,
1117 .set_ldt
= xen_set_ldt
,
1118 .load_gdt
= xen_load_gdt
,
1119 .load_idt
= xen_load_idt
,
1120 .load_tls
= xen_load_tls
,
1121 #ifdef CONFIG_X86_64
1122 .load_gs_index
= xen_load_gs_index
,
1125 .alloc_ldt
= xen_alloc_ldt
,
1126 .free_ldt
= xen_free_ldt
,
1128 .store_idt
= native_store_idt
,
1129 .store_tr
= xen_store_tr
,
1131 .write_ldt_entry
= xen_write_ldt_entry
,
1132 .write_gdt_entry
= xen_write_gdt_entry
,
1133 .write_idt_entry
= xen_write_idt_entry
,
1134 .load_sp0
= xen_load_sp0
,
1136 .set_iopl_mask
= xen_set_iopl_mask
,
1137 .io_delay
= xen_io_delay
,
1139 /* Xen takes care of %gs when switching to usermode for us */
1140 .swapgs
= paravirt_nop
,
1142 .start_context_switch
= paravirt_start_context_switch
,
1143 .end_context_switch
= xen_end_context_switch
,
1146 static void xen_restart(char *msg
)
1148 xen_reboot(SHUTDOWN_reboot
);
1151 static void xen_machine_halt(void)
1153 xen_reboot(SHUTDOWN_poweroff
);
1156 static void xen_machine_power_off(void)
1160 xen_reboot(SHUTDOWN_poweroff
);
1163 static void xen_crash_shutdown(struct pt_regs
*regs
)
1165 xen_reboot(SHUTDOWN_crash
);
1168 static const struct machine_ops xen_machine_ops __initconst
= {
1169 .restart
= xen_restart
,
1170 .halt
= xen_machine_halt
,
1171 .power_off
= xen_machine_power_off
,
1172 .shutdown
= xen_machine_halt
,
1173 .crash_shutdown
= xen_crash_shutdown
,
1174 .emergency_restart
= xen_emergency_restart
,
1177 static unsigned char xen_get_nmi_reason(void)
1179 unsigned char reason
= 0;
1181 /* Construct a value which looks like it came from port 0x61. */
1182 if (test_bit(_XEN_NMIREASON_io_error
,
1183 &HYPERVISOR_shared_info
->arch
.nmi_reason
))
1184 reason
|= NMI_REASON_IOCHK
;
1185 if (test_bit(_XEN_NMIREASON_pci_serr
,
1186 &HYPERVISOR_shared_info
->arch
.nmi_reason
))
1187 reason
|= NMI_REASON_SERR
;
1192 static void __init
xen_boot_params_init_edd(void)
1194 #if IS_ENABLED(CONFIG_EDD)
1195 struct xen_platform_op op
;
1196 struct edd_info
*edd_info
;
1201 edd_info
= boot_params
.eddbuf
;
1202 mbr_signature
= boot_params
.edd_mbr_sig_buffer
;
1204 op
.cmd
= XENPF_firmware_info
;
1206 op
.u
.firmware_info
.type
= XEN_FW_DISK_INFO
;
1207 for (nr
= 0; nr
< EDDMAXNR
; nr
++) {
1208 struct edd_info
*info
= edd_info
+ nr
;
1210 op
.u
.firmware_info
.index
= nr
;
1211 info
->params
.length
= sizeof(info
->params
);
1212 set_xen_guest_handle(op
.u
.firmware_info
.u
.disk_info
.edd_params
,
1214 ret
= HYPERVISOR_platform_op(&op
);
1218 #define C(x) info->x = op.u.firmware_info.u.disk_info.x
1221 C(interface_support
);
1222 C(legacy_max_cylinder
);
1224 C(legacy_sectors_per_track
);
1227 boot_params
.eddbuf_entries
= nr
;
1229 op
.u
.firmware_info
.type
= XEN_FW_DISK_MBR_SIGNATURE
;
1230 for (nr
= 0; nr
< EDD_MBR_SIG_MAX
; nr
++) {
1231 op
.u
.firmware_info
.index
= nr
;
1232 ret
= HYPERVISOR_platform_op(&op
);
1235 mbr_signature
[nr
] = op
.u
.firmware_info
.u
.disk_mbr_signature
.mbr_signature
;
1237 boot_params
.edd_mbr_sig_buf_entries
= nr
;
1242 * Set up the GDT and segment registers for -fstack-protector. Until
1243 * we do this, we have to be careful not to call any stack-protected
1244 * function, which is most of the kernel.
1246 static void xen_setup_gdt(int cpu
)
1248 pv_cpu_ops
.write_gdt_entry
= xen_write_gdt_entry_boot
;
1249 pv_cpu_ops
.load_gdt
= xen_load_gdt_boot
;
1251 setup_stack_canary_segment(0);
1252 switch_to_new_gdt(0);
1254 pv_cpu_ops
.write_gdt_entry
= xen_write_gdt_entry
;
1255 pv_cpu_ops
.load_gdt
= xen_load_gdt
;
1258 static void __init
xen_dom0_set_legacy_features(void)
1260 x86_platform
.legacy
.rtc
= 1;
1263 /* First C function to be called on Xen boot */
1264 asmlinkage __visible
void __init
xen_start_kernel(void)
1266 struct physdev_set_iopl set_iopl
;
1267 unsigned long initrd_start
= 0;
1270 if (!xen_start_info
)
1273 xen_domain_type
= XEN_PV_DOMAIN
;
1275 xen_setup_features();
1277 xen_setup_machphys_mapping();
1279 /* Install Xen paravirt ops */
1281 pv_init_ops
= xen_init_ops
;
1282 pv_cpu_ops
= xen_cpu_ops
;
1284 x86_platform
.get_nmi_reason
= xen_get_nmi_reason
;
1286 x86_init
.resources
.memory_setup
= xen_memory_setup
;
1287 x86_init
.oem
.arch_setup
= xen_arch_setup
;
1288 x86_init
.oem
.banner
= xen_banner
;
1290 xen_init_time_ops();
1293 * Set up some pagetable state before starting to set any ptes.
1298 /* Prevent unwanted bits from being set in PTEs. */
1299 __supported_pte_mask
&= ~_PAGE_GLOBAL
;
1302 * Prevent page tables from being allocated in highmem, even
1303 * if CONFIG_HIGHPTE is enabled.
1305 __userpte_alloc_gfp
&= ~__GFP_HIGHMEM
;
1307 /* Work out if we support NX */
1311 xen_build_dynamic_phys_to_machine();
1314 * Set up kernel GDT and segment registers, mainly so that
1315 * -fstack-protector code can be executed.
1320 xen_init_cpuid_mask();
1322 #ifdef CONFIG_X86_LOCAL_APIC
1324 * set up the basic apic ops.
1329 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad
)) {
1330 pv_mmu_ops
.ptep_modify_prot_start
= xen_ptep_modify_prot_start
;
1331 pv_mmu_ops
.ptep_modify_prot_commit
= xen_ptep_modify_prot_commit
;
1334 machine_ops
= xen_machine_ops
;
1337 * The only reliable way to retain the initial address of the
1338 * percpu gdt_page is to remember it here, so we can go and
1339 * mark it RW later, when the initial percpu area is freed.
1341 xen_initial_gdt
= &per_cpu(gdt_page
, 0);
1345 #ifdef CONFIG_ACPI_NUMA
1347 * The pages we from Xen are not related to machine pages, so
1348 * any NUMA information the kernel tries to get from ACPI will
1349 * be meaningless. Prevent it from trying.
1353 /* Don't do the full vcpu_info placement stuff until we have a
1354 possible map and a non-dummy shared_info. */
1355 per_cpu(xen_vcpu
, 0) = &HYPERVISOR_shared_info
->vcpu_info
[0];
1357 WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv
, xen_cpu_dead_pv
));
1359 local_irq_disable();
1360 early_boot_irqs_disabled
= true;
1362 xen_raw_console_write("mapping kernel into physical memory\n");
1363 xen_setup_kernel_pagetable((pgd_t
*)xen_start_info
->pt_base
,
1364 xen_start_info
->nr_pages
);
1365 xen_reserve_special_pages();
1367 /* keep using Xen gdt for now; no urgent need to change it */
1369 #ifdef CONFIG_X86_32
1370 pv_info
.kernel_rpl
= 1;
1371 if (xen_feature(XENFEAT_supervisor_mode_kernel
))
1372 pv_info
.kernel_rpl
= 0;
1374 pv_info
.kernel_rpl
= 0;
1376 /* set the limit of our address space */
1380 * We used to do this in xen_arch_setup, but that is too late
1381 * on AMD were early_cpu_init (run before ->arch_setup()) calls
1382 * early_amd_init which pokes 0xcf8 port.
1385 rc
= HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl
, &set_iopl
);
1387 xen_raw_printk("physdev_op failed %d\n", rc
);
1389 #ifdef CONFIG_X86_32
1390 /* set up basic CPUID stuff */
1391 cpu_detect(&new_cpu_data
);
1392 set_cpu_cap(&new_cpu_data
, X86_FEATURE_FPU
);
1393 new_cpu_data
.x86_capability
[CPUID_1_EDX
] = cpuid_edx(1);
1396 if (xen_start_info
->mod_start
) {
1397 if (xen_start_info
->flags
& SIF_MOD_START_PFN
)
1398 initrd_start
= PFN_PHYS(xen_start_info
->mod_start
);
1400 initrd_start
= __pa(xen_start_info
->mod_start
);
1403 /* Poke various useful things into boot_params */
1404 boot_params
.hdr
.type_of_loader
= (9 << 4) | 0;
1405 boot_params
.hdr
.ramdisk_image
= initrd_start
;
1406 boot_params
.hdr
.ramdisk_size
= xen_start_info
->mod_len
;
1407 boot_params
.hdr
.cmd_line_ptr
= __pa(xen_start_info
->cmd_line
);
1408 boot_params
.hdr
.hardware_subarch
= X86_SUBARCH_XEN
;
1410 if (!xen_initial_domain()) {
1411 add_preferred_console("xenboot", 0, NULL
);
1412 add_preferred_console("tty", 0, NULL
);
1413 add_preferred_console("hvc", 0, NULL
);
1415 x86_init
.pci
.arch_init
= pci_xen_init
;
1417 const struct dom0_vga_console_info
*info
=
1418 (void *)((char *)xen_start_info
+
1419 xen_start_info
->console
.dom0
.info_off
);
1420 struct xen_platform_op op
= {
1421 .cmd
= XENPF_firmware_info
,
1422 .interface_version
= XENPF_INTERFACE_VERSION
,
1423 .u
.firmware_info
.type
= XEN_FW_KBD_SHIFT_FLAGS
,
1426 x86_platform
.set_legacy_features
=
1427 xen_dom0_set_legacy_features
;
1428 xen_init_vga(info
, xen_start_info
->console
.dom0
.info_size
);
1429 xen_start_info
->console
.domU
.mfn
= 0;
1430 xen_start_info
->console
.domU
.evtchn
= 0;
1432 if (HYPERVISOR_platform_op(&op
) == 0)
1433 boot_params
.kbd_status
= op
.u
.firmware_info
.u
.kbd_shift_flags
;
1435 /* Make sure ACS will be enabled */
1438 xen_acpi_sleep_register();
1440 /* Avoid searching for BIOS MP tables */
1441 x86_init
.mpparse
.find_smp_config
= x86_init_noop
;
1442 x86_init
.mpparse
.get_smp_config
= x86_init_uint_noop
;
1444 xen_boot_params_init_edd();
1447 /* PCI BIOS service won't work from a PV guest. */
1448 pci_probe
&= ~PCI_PROBE_BIOS
;
1450 xen_raw_console_write("about to get started...\n");
1452 /* Let's presume PV guests always boot on vCPU with id 0. */
1453 per_cpu(xen_vcpu_id
, 0) = 0;
1455 xen_setup_runstate_info(0);
1459 /* Start the world */
1460 #ifdef CONFIG_X86_32
1461 i386_start_kernel();
1463 cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
1464 x86_64_start_reservations((char *)__pa_symbol(&boot_params
));
1468 static int xen_cpu_up_prepare_pv(unsigned int cpu
)
1472 xen_setup_timer(cpu
);
1474 rc
= xen_smp_intr_init(cpu
);
1476 WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
1483 static int xen_cpu_dead_pv(unsigned int cpu
)
1485 xen_smp_intr_free(cpu
);
1487 xen_teardown_timer(cpu
);
1492 static uint32_t __init
xen_platform_pv(void)
1494 if (xen_pv_domain())
1495 return xen_cpuid_base();
1500 static void xen_set_cpu_features(struct cpuinfo_x86
*c
)
1502 clear_cpu_bug(c
, X86_BUG_SYSRET_SS_ATTRS
);
1503 set_cpu_cap(c
, X86_FEATURE_XENPV
);
1506 const struct hypervisor_x86 x86_hyper_xen_pv
= {
1508 .detect
= xen_platform_pv
,
1509 .set_cpu_features
= xen_set_cpu_features
,
1510 .pin_vcpu
= xen_pin_vcpu
,
1512 EXPORT_SYMBOL(x86_hyper_xen_pv
);