#ifndef _ASM_LOONGARCH_QSPINLOCK_H
#define _ASM_LOONGARCH_QSPINLOCK_H
+#include <asm/kvm_para.h>
#include <linux/jump_label.h>
#ifdef CONFIG_PARAVIRT
-
+DECLARE_STATIC_KEY_FALSE(virt_preempt_key);
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
+DECLARE_PER_CPU(struct kvm_steal_time, steal_time);
#define virt_spin_lock virt_spin_lock
return true;
}
-#define vcpu_is_preempted vcpu_is_preempted
-
-bool vcpu_is_preempted(int cpu);
+/*
+ * Macro is better than inline function here
+ * With macro, parameter cpu is parsed only when it is used.
+ * With inline function, parameter cpu is parsed even though it is not used.
+ * This may cause cache line thrashing across NUMA nodes.
+ */
+#define vcpu_is_preempted(cpu) \
+({ \
+ bool __val; \
+ \
+ if (!static_branch_unlikely(&virt_preempt_key)) \
+ __val = false; \
+ else { \
+ struct kvm_steal_time *src; \
+ src = &per_cpu(steal_time, cpu); \
+ __val = !!(READ_ONCE(src->preempted) & KVM_VCPU_PREEMPTED); \
+ } \
+ __val; \
+})
#endif /* CONFIG_PARAVIRT */
#include <asm/paravirt.h>
static int has_steal_clock;
-static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
-static DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
+DEFINE_STATIC_KEY_FALSE(virt_preempt_key);
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
+DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static bool steal_acc = true;
return 0;
}
-
-bool vcpu_is_preempted(int cpu)
-{
- struct kvm_steal_time *src;
-
- if (!static_branch_unlikely(&virt_preempt_key))
- return false;
-
- src = &per_cpu(steal_time, cpu);
- return !!(src->preempted & KVM_VCPU_PREEMPTED);
-}
-EXPORT_SYMBOL(vcpu_is_preempted);
#endif
static void pv_cpu_reboot(void *unused)