]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
x86/pvlocks: Move paravirt spinlock functions into own header
authorJuergen Gross <jgross@suse.com>
Mon, 5 Jan 2026 11:05:20 +0000 (12:05 +0100)
committerBorislav Petkov (AMD) <bp@alien8.de>
Tue, 13 Jan 2026 13:57:45 +0000 (14:57 +0100)
Instead of having the pv spinlock function definitions in paravirt.h,
move them into the new header paravirt-spinlock.h.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://patch.msgid.link/20260105110520.21356-22-jgross@suse.com
12 files changed:
arch/x86/hyperv/hv_spinlock.c
arch/x86/include/asm/paravirt-base.h
arch/x86/include/asm/paravirt-spinlock.h [new file with mode: 0644]
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/qspinlock.h
arch/x86/kernel/Makefile
arch/x86/kernel/kvm.c
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt.c
arch/x86/xen/spinlock.c
tools/objtool/check.c

index 2a3c2afb015450afa9b92fbc33ec718c18af73ad..210b494e4de0228d85b9dca183595a096cbdeb73 100644 (file)
@@ -78,11 +78,11 @@ void __init hv_init_spinlocks(void)
        pr_info("PV spinlocks enabled\n");
 
        __pv_init_lock_hash();
-       pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
-       pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
-       pv_ops.lock.wait = hv_qlock_wait;
-       pv_ops.lock.kick = hv_qlock_kick;
-       pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
+       pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+       pv_ops_lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+       pv_ops_lock.wait = hv_qlock_wait;
+       pv_ops_lock.kick = hv_qlock_kick;
+       pv_ops_lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted);
 }
 
 static __init int hv_parse_nopvspin(char *arg)
index 3827ea20de187efddc04d8d086380478b2d57867..982a0b93bc766210e4c9d9dd7f29c560c61e4b40 100644 (file)
@@ -26,4 +26,10 @@ u64 _paravirt_ident_64(u64);
 #endif
 #define paravirt_nop   ((void *)nop_func)
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void paravirt_set_cap(void);
+#else
+static inline void paravirt_set_cap(void) { }
+#endif
+
 #endif /* _ASM_X86_PARAVIRT_BASE_H */
diff --git a/arch/x86/include/asm/paravirt-spinlock.h b/arch/x86/include/asm/paravirt-spinlock.h
new file mode 100644 (file)
index 0000000..a5011ef
--- /dev/null
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_X86_PARAVIRT_SPINLOCK_H
+#define _ASM_X86_PARAVIRT_SPINLOCK_H
+
+#include <asm/paravirt_types.h>
+
+#ifdef CONFIG_SMP
+#include <asm/spinlock_types.h>
+#endif
+
+struct qspinlock;
+
+struct pv_lock_ops {
+       void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
+       struct paravirt_callee_save queued_spin_unlock;
+
+       void (*wait)(u8 *ptr, u8 val);
+       void (*kick)(int cpu);
+
+       struct paravirt_callee_save vcpu_is_preempted;
+} __no_randomize_layout;
+
+extern struct pv_lock_ops pv_ops_lock;
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_init_lock_hash(void);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
+extern bool nopvspin;
+
+static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
+                                                        u32 val)
+{
+       PVOP_VCALL2(pv_ops_lock, queued_spin_lock_slowpath, lock, val);
+}
+
+static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+       PVOP_ALT_VCALLEE1(pv_ops_lock, queued_spin_unlock, lock,
+                         "movb $0, (%%" _ASM_ARG1 ");",
+                         ALT_NOT(X86_FEATURE_PVUNLOCK));
+}
+
+static __always_inline bool pv_vcpu_is_preempted(long cpu)
+{
+       return PVOP_ALT_CALLEE1(bool, pv_ops_lock, vcpu_is_preempted, cpu,
+                               "xor %%" _ASM_AX ", %%" _ASM_AX ";",
+                               ALT_NOT(X86_FEATURE_VCPUPREEMPT));
+}
+
+#define queued_spin_unlock queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * A smp_store_release() on the least-significant byte.
+ */
+static inline void native_queued_spin_unlock(struct qspinlock *lock)
+{
+       smp_store_release(&lock->locked, 0);
+}
+
+static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+       pv_queued_spin_lock_slowpath(lock, val);
+}
+
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+       kcsan_release();
+       pv_queued_spin_unlock(lock);
+}
+
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(long cpu)
+{
+       return pv_vcpu_is_preempted(cpu);
+}
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+       PVOP_VCALL2(pv_ops_lock, wait, ptr, val);
+}
+
+static __always_inline void pv_kick(int cpu)
+{
+       PVOP_VCALL1(pv_ops_lock, kick, cpu);
+}
+
+void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
+bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+void __init native_pv_lock_init(void);
+__visible void __native_queued_spin_unlock(struct qspinlock *lock);
+bool pv_is_native_spin_unlock(void);
+__visible bool __native_vcpu_is_preempted(long cpu);
+bool pv_is_native_vcpu_is_preempted(void);
+
+/*
+ * virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
+ *
+ * Native (and PV wanting native due to vCPU pinning) should keep this key
+ * disabled. Native does not touch the key.
+ *
+ * When in a guest then native_pv_lock_init() enables the key first and
+ * KVM/XEN might conditionally disable it later in the boot process again.
+ */
+DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
+
+/*
+ * Shortcut for the queued_spin_lock_slowpath() function that allows
+ * virt to hijack it.
+ *
+ * Returns:
+ *   true - lock has been negotiated, all done;
+ *   false - queued_spin_lock_slowpath() will do its thing.
+ */
+#define virt_spin_lock virt_spin_lock
+static inline bool virt_spin_lock(struct qspinlock *lock)
+{
+       int val;
+
+       if (!static_branch_likely(&virt_spin_lock_key))
+               return false;
+
+       /*
+        * On hypervisors without PARAVIRT_SPINLOCKS support we fall
+        * back to a Test-and-Set spinlock, because fair locks have
+        * horrible lock 'holder' preemption issues.
+        */
+
+ __retry:
+       val = atomic_read(&lock->val);
+
+       if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
+               cpu_relax();
+               goto __retry;
+       }
+
+       return true;
+}
+
+#endif /* _ASM_X86_PARAVIRT_SPINLOCK_H */
index ec274d13bae08dc9d5f4500a78e1cf107d26d62f..b21072af731da0e3bcff0d4bb10f8c961812f29e 100644 (file)
 #include <linux/cpumask.h>
 #include <asm/frame.h>
 
-__visible void __native_queued_spin_unlock(struct qspinlock *lock);
-bool pv_is_native_spin_unlock(void);
-__visible bool __native_vcpu_is_preempted(long cpu);
-bool pv_is_native_vcpu_is_preempted(void);
-
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-void __init paravirt_set_cap(void);
-#endif
-
 /* The paravirtualized I/O functions */
 static inline void slow_down_io(void)
 {
@@ -522,46 +513,7 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 {
        pv_ops.mmu.set_fixmap(idx, phys, flags);
 }
-#endif
-
-#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-
-static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
-                                                       u32 val)
-{
-       PVOP_VCALL2(pv_ops, lock.queued_spin_lock_slowpath, lock, val);
-}
-
-static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
-{
-       PVOP_ALT_VCALLEE1(pv_ops, lock.queued_spin_unlock, lock,
-                         "movb $0, (%%" _ASM_ARG1 ");",
-                         ALT_NOT(X86_FEATURE_PVUNLOCK));
-}
-
-static __always_inline void pv_wait(u8 *ptr, u8 val)
-{
-       PVOP_VCALL2(pv_ops, lock.wait, ptr, val);
-}
-
-static __always_inline void pv_kick(int cpu)
-{
-       PVOP_VCALL1(pv_ops, lock.kick, cpu);
-}
-
-static __always_inline bool pv_vcpu_is_preempted(long cpu)
-{
-       return PVOP_ALT_CALLEE1(bool, pv_ops, lock.vcpu_is_preempted, cpu,
-                               "xor %%" _ASM_AX ", %%" _ASM_AX ";",
-                               ALT_NOT(X86_FEATURE_VCPUPREEMPT));
-}
 
-void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
-bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
-
-#endif /* SMP && PARAVIRT_SPINLOCKS */
-
-#ifdef CONFIG_PARAVIRT_XXL
 static __always_inline unsigned long arch_local_save_flags(void)
 {
        return PVOP_ALT_CALLEE0(unsigned long, pv_ops, irq.save_fl, "pushf; pop %%rax;",
@@ -588,8 +540,6 @@ static __always_inline unsigned long arch_local_irq_save(void)
 }
 #endif
 
-void native_pv_lock_init(void) __init;
-
 #else  /* __ASSEMBLER__ */
 
 #ifdef CONFIG_X86_64
@@ -613,12 +563,6 @@ void native_pv_lock_init(void) __init;
 #endif /* __ASSEMBLER__ */
 #else  /* CONFIG_PARAVIRT */
 # define default_banner x86_init_noop
-
-#ifndef __ASSEMBLER__
-static inline void native_pv_lock_init(void)
-{
-}
-#endif
 #endif /* !CONFIG_PARAVIRT */
 
 #ifndef __ASSEMBLER__
@@ -634,10 +578,5 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
 }
 #endif
 
-#ifndef CONFIG_PARAVIRT_SPINLOCKS
-static inline void paravirt_set_cap(void)
-{
-}
-#endif
 #endif /* __ASSEMBLER__ */
 #endif /* _ASM_X86_PARAVIRT_H */
index b36d425d099bafd4523fd6d51998e1f64fb7ff3d..7ccd41628d3691bf0caadd2164e059cfa505eaed 100644 (file)
@@ -184,22 +184,6 @@ struct pv_mmu_ops {
 #endif
 } __no_randomize_layout;
 
-#ifdef CONFIG_SMP
-#include <asm/spinlock_types.h>
-#endif
-
-struct qspinlock;
-
-struct pv_lock_ops {
-       void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
-       struct paravirt_callee_save queued_spin_unlock;
-
-       void (*wait)(u8 *ptr, u8 val);
-       void (*kick)(int cpu);
-
-       struct paravirt_callee_save vcpu_is_preempted;
-} __no_randomize_layout;
-
 /* This contains all the paravirt structures: we get a convenient
  * number for each function using the offset which we use to indicate
  * what to patch. */
@@ -207,7 +191,6 @@ struct paravirt_patch_template {
        struct pv_cpu_ops       cpu;
        struct pv_irq_ops       irq;
        struct pv_mmu_ops       mmu;
-       struct pv_lock_ops      lock;
 } __no_randomize_layout;
 
 extern struct paravirt_patch_template pv_ops;
index 68da67df304d5dc6629d1de4723f0abfc93b5d84..25a1919542d9d5222f44befffd08ed03d508b83d 100644 (file)
@@ -7,6 +7,9 @@
 #include <asm-generic/qspinlock_types.h>
 #include <asm/paravirt.h>
 #include <asm/rmwcc.h>
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt-spinlock.h>
+#endif
 
 #define _Q_PENDING_LOOPS       (1 << 9)
 
@@ -27,90 +30,10 @@ static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lo
        return val;
 }
 
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
-extern void __pv_init_lock_hash(void);
-extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
-extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
-extern bool nopvspin;
-
-#define        queued_spin_unlock queued_spin_unlock
-/**
- * queued_spin_unlock - release a queued spinlock
- * @lock : Pointer to queued spinlock structure
- *
- * A smp_store_release() on the least-significant byte.
- */
-static inline void native_queued_spin_unlock(struct qspinlock *lock)
-{
-       smp_store_release(&lock->locked, 0);
-}
-
-static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
-{
-       pv_queued_spin_lock_slowpath(lock, val);
-}
-
-static inline void queued_spin_unlock(struct qspinlock *lock)
-{
-       kcsan_release();
-       pv_queued_spin_unlock(lock);
-}
-
-#define vcpu_is_preempted vcpu_is_preempted
-static inline bool vcpu_is_preempted(long cpu)
-{
-       return pv_vcpu_is_preempted(cpu);
-}
+#ifndef CONFIG_PARAVIRT
+static inline void native_pv_lock_init(void) { }
 #endif
 
-#ifdef CONFIG_PARAVIRT
-/*
- * virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
- *
- * Native (and PV wanting native due to vCPU pinning) should keep this key
- * disabled. Native does not touch the key.
- *
- * When in a guest then native_pv_lock_init() enables the key first and
- * KVM/XEN might conditionally disable it later in the boot process again.
- */
-DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
-
-/*
- * Shortcut for the queued_spin_lock_slowpath() function that allows
- * virt to hijack it.
- *
- * Returns:
- *   true - lock has been negotiated, all done;
- *   false - queued_spin_lock_slowpath() will do its thing.
- */
-#define virt_spin_lock virt_spin_lock
-static inline bool virt_spin_lock(struct qspinlock *lock)
-{
-       int val;
-
-       if (!static_branch_likely(&virt_spin_lock_key))
-               return false;
-
-       /*
-        * On hypervisors without PARAVIRT_SPINLOCKS support we fall
-        * back to a Test-and-Set spinlock, because fair locks have
-        * horrible lock 'holder' preemption issues.
-        */
-
- __retry:
-       val = atomic_read(&lock->val);
-
-       if (val || !atomic_try_cmpxchg(&lock->val, &val, _Q_LOCKED_VAL)) {
-               cpu_relax();
-               goto __retry;
-       }
-
-       return true;
-}
-
-#endif /* CONFIG_PARAVIRT */
-
 #include <asm-generic/qspinlock.h>
 
 #endif /* _ASM_X86_QSPINLOCK_H */
index bc184dd38d993b4ce4eedd7539dfe72f0bee0920..e9aeeeafad173a3b79abfad21bf064f6b79fd3e4 100644 (file)
@@ -126,7 +126,7 @@ obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o
 
 obj-$(CONFIG_KVM_GUEST)                += kvm.o kvmclock.o
 obj-$(CONFIG_PARAVIRT)         += paravirt.o
-obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
+obj-$(CONFIG_PARAVIRT)         += paravirt-spinlocks.o
 obj-$(CONFIG_PARAVIRT_CLOCK)   += pvclock.o
 obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
 
index 21b4de55f823de6f846a3c64dd86569440826c5b..de550b12d9ab3a20b107f2cb8b6c97e5f3e11395 100644 (file)
@@ -829,8 +829,10 @@ static void __init kvm_guest_init(void)
                has_steal_clock = 1;
                static_call_update(pv_steal_clock, kvm_steal_clock);
 
-               pv_ops.lock.vcpu_is_preempted =
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+               pv_ops_lock.vcpu_is_preempted =
                        PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
+#endif
        }
 
        if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -1126,11 +1128,11 @@ void __init kvm_spinlock_init(void)
        pr_info("PV spinlocks enabled\n");
 
        __pv_init_lock_hash();
-       pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
-       pv_ops.lock.queued_spin_unlock =
+       pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+       pv_ops_lock.queued_spin_unlock =
                PV_CALLEE_SAVE(__pv_queued_spin_unlock);
-       pv_ops.lock.wait = kvm_wait;
-       pv_ops.lock.kick = kvm_kick_cpu;
+       pv_ops_lock.wait = kvm_wait;
+       pv_ops_lock.kick = kvm_kick_cpu;
 
        /*
         * When PV spinlock is enabled which is preferred over
index 9e1ea99ad9df4beca4771705b32b46444e7c925e..95452444868f4e39af513873acdb2c697d90dae2 100644 (file)
@@ -3,12 +3,22 @@
  * Split spinlock implementation out into its own file, so it can be
  * compiled in a FTRACE-compatible way.
  */
+#include <linux/static_call.h>
 #include <linux/spinlock.h>
 #include <linux/export.h>
 #include <linux/jump_label.h>
 
-#include <asm/paravirt.h>
+DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
 
+#ifdef CONFIG_SMP
+void __init native_pv_lock_init(void)
+{
+       if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
+               static_branch_enable(&virt_spin_lock_key);
+}
+#endif
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
 __visible void __native_queued_spin_unlock(struct qspinlock *lock)
 {
        native_queued_spin_unlock(lock);
@@ -17,7 +27,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
 
 bool pv_is_native_spin_unlock(void)
 {
-       return pv_ops.lock.queued_spin_unlock.func ==
+       return pv_ops_lock.queued_spin_unlock.func ==
                __raw_callee_save___native_queued_spin_unlock;
 }
 
@@ -29,7 +39,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
 
 bool pv_is_native_vcpu_is_preempted(void)
 {
-       return pv_ops.lock.vcpu_is_preempted.func ==
+       return pv_ops_lock.vcpu_is_preempted.func ==
                __raw_callee_save___native_vcpu_is_preempted;
 }
 
@@ -41,3 +51,13 @@ void __init paravirt_set_cap(void)
        if (!pv_is_native_vcpu_is_preempted())
                setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
 }
+
+struct pv_lock_ops pv_ops_lock = {
+       .queued_spin_lock_slowpath      = native_queued_spin_lock_slowpath,
+       .queued_spin_unlock             = PV_CALLEE_SAVE(__native_queued_spin_unlock),
+       .wait                           = paravirt_nop,
+       .kick                           = paravirt_nop,
+       .vcpu_is_preempted              = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
+};
+EXPORT_SYMBOL(pv_ops_lock);
+#endif
index 5dfbd3f557924573aae27ad9a36a6020a265d815..a6ed52cae0033ce7638182226453398ec94d662f 100644 (file)
@@ -57,14 +57,6 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
 DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
 #endif
 
-DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
-
-void __init native_pv_lock_init(void)
-{
-       if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
-               static_branch_enable(&virt_spin_lock_key);
-}
-
 static noinstr void pv_native_safe_halt(void)
 {
        native_safe_halt();
@@ -221,19 +213,6 @@ struct paravirt_patch_template pv_ops = {
 
        .mmu.set_fixmap         = native_set_fixmap,
 #endif /* CONFIG_PARAVIRT_XXL */
-
-#if defined(CONFIG_PARAVIRT_SPINLOCKS)
-       /* Lock ops. */
-#ifdef CONFIG_SMP
-       .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
-       .lock.queued_spin_unlock        =
-                               PV_CALLEE_SAVE(__native_queued_spin_unlock),
-       .lock.wait                      = paravirt_nop,
-       .lock.kick                      = paravirt_nop,
-       .lock.vcpu_is_preempted         =
-                               PV_CALLEE_SAVE(__native_vcpu_is_preempted),
-#endif /* SMP */
-#endif
 };
 
 #ifdef CONFIG_PARAVIRT_XXL
index fe56646d6919e54c9b7f85a93a913e688d0a6f2c..83ac24ead289dbb7095d07d1c429076366fc28ec 100644 (file)
@@ -134,10 +134,10 @@ void __init xen_init_spinlocks(void)
        printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
 
        __pv_init_lock_hash();
-       pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
-       pv_ops.lock.queued_spin_unlock =
+       pv_ops_lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+       pv_ops_lock.queued_spin_unlock =
                PV_CALLEE_SAVE(__pv_queued_spin_unlock);
-       pv_ops.lock.wait = xen_qlock_wait;
-       pv_ops.lock.kick = xen_qlock_kick;
-       pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
+       pv_ops_lock.wait = xen_qlock_wait;
+       pv_ops_lock.kick = xen_qlock_kick;
+       pv_ops_lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
 }
index b3fec88d5bd3f3c42d3d7129298b34db2da8cf0c..c2952df6842cae26856d5600ccf04a9b840ab0f3 100644 (file)
@@ -527,6 +527,7 @@ static struct {
        int idx_off;
 } pv_ops_tables[] = {
        { .name = "pv_ops", },
+       { .name = "pv_ops_lock", },
        { .name = NULL, .idx_off = -1 }
 };