]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
x86/xen: split xen_smp_prepare_boot_cpu()
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 14 Mar 2017 17:35:43 +0000 (18:35 +0100)
committerJuergen Gross <jgross@suse.com>
Tue, 2 May 2017 09:04:31 +0000 (11:04 +0200)
Split xen_smp_prepare_boot_cpu() into xen_pv_smp_prepare_boot_cpu() and
xen_hvm_smp_prepare_boot_cpu() to support further splitting of smp.c.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
arch/x86/xen/smp.c

index 18fbec1fca342743736cb2f5d4b3badcd5b9131d..00c623bef72f86afa7ed49e5b44588a8891cd09e 100644 (file)
@@ -297,35 +297,46 @@ static void __init xen_filter_cpu_maps(void)
 
 }
 
-static void __init xen_smp_prepare_boot_cpu(void)
+static void __init xen_pv_smp_prepare_boot_cpu(void)
 {
        BUG_ON(smp_processor_id() != 0);
        native_smp_prepare_boot_cpu();
 
-       if (xen_pv_domain()) {
-               if (!xen_feature(XENFEAT_writable_page_tables))
-                       /* We've switched to the "real" per-cpu gdt, so make
-                        * sure the old memory can be recycled. */
-                       make_lowmem_page_readwrite(xen_initial_gdt);
+       if (!xen_feature(XENFEAT_writable_page_tables))
+               /* We've switched to the "real" per-cpu gdt, so make
+                * sure the old memory can be recycled. */
+               make_lowmem_page_readwrite(xen_initial_gdt);
 
 #ifdef CONFIG_X86_32
-               /*
-                * Xen starts us with XEN_FLAT_RING1_DS, but linux code
-                * expects __USER_DS
-                */
-               loadsegment(ds, __USER_DS);
-               loadsegment(es, __USER_DS);
+       /*
+        * Xen starts us with XEN_FLAT_RING1_DS, but linux code
+        * expects __USER_DS
+        */
+       loadsegment(ds, __USER_DS);
+       loadsegment(es, __USER_DS);
 #endif
 
-               xen_filter_cpu_maps();
-               xen_setup_vcpu_info_placement();
-       }
+       xen_filter_cpu_maps();
+       xen_setup_vcpu_info_placement();
+
+       /*
+        * The alternative logic (which patches the unlock/lock) runs before
+        * the smp bootup up code is activated. Hence we need to set this up
+        * the core kernel is being patched. Otherwise we will have only
+        * modules patched but not core code.
+        */
+       xen_init_spinlocks();
+}
+
+static void __init xen_hvm_smp_prepare_boot_cpu(void)
+{
+       BUG_ON(smp_processor_id() != 0);
+       native_smp_prepare_boot_cpu();
 
        /*
         * Setup vcpu_info for boot CPU.
         */
-       if (xen_hvm_domain())
-               xen_vcpu_setup(0);
+       xen_vcpu_setup(0);
 
        /*
         * The alternative logic (which patches the unlock/lock) runs before
@@ -717,7 +728,7 @@ static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
 }
 
 static const struct smp_ops xen_smp_ops __initconst = {
-       .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
+       .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
        .smp_prepare_cpus = xen_smp_prepare_cpus,
        .smp_cpus_done = xen_smp_cpus_done,
 
@@ -754,5 +765,5 @@ void __init xen_hvm_smp_init(void)
        smp_ops.cpu_die = xen_cpu_die;
        smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
        smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
-       smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
+       smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
 }