]> git.ipfire.org Git - thirdparty/linux.git/blame - arch/powerpc/include/asm/paravirt.h
powerpc/paravirt: vcpu_is_preempted() commentary
[thirdparty/linux.git] / arch / powerpc / include / asm / paravirt.h
CommitLineData
20d444d0
NP
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2#ifndef _ASM_POWERPC_PARAVIRT_H
3#define _ASM_POWERPC_PARAVIRT_H
4
5#include <linux/jump_label.h>
6#include <asm/smp.h>
7#ifdef CONFIG_PPC64
8#include <asm/paca.h>
9#include <asm/hvcall.h>
10#endif
11
12#ifdef CONFIG_PPC_SPLPAR
9899a56f 13#include <linux/smp.h>
ca3f969d
SD
14#include <asm/kvm_guest.h>
15#include <asm/cputhreads.h>
16
20d444d0
NP
17DECLARE_STATIC_KEY_FALSE(shared_processor);
18
19static inline bool is_shared_processor(void)
20{
21 return static_branch_unlikely(&shared_processor);
22}
23
799f9b51 24/* If bit 0 is set, the cpu has been ceded, conferred, or preempted */
20d444d0
NP
25static inline u32 yield_count_of(int cpu)
26{
27 __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
28 return be32_to_cpu(yield_count);
29}
30
2c8c89b9
NP
31/*
32 * Spinlock code confers and prods, so don't trace the hcalls because the
33 * tracing code takes spinlocks which can cause recursion deadlocks.
34 *
35 * These calls are made while the lock is not held: the lock slowpath yields if
36 * it can not acquire the lock, and unlock slow path might prod if a waiter has
37 * yielded). So this may not be a problem for simple spin locks because the
38 * tracing does not technically recurse on the lock, but we avoid it anyway.
39 *
40 * However the queued spin lock contended path is more strictly ordered: the
41 * H_CONFER hcall is made after the task has queued itself on the lock, so then
42 * recursing on that lock will cause the task to then queue up again behind the
43 * first instance (or worse: queued spinlocks use tricks that assume a context
44 * never waits on more than one spinlock, so such recursion may cause random
45 * corruption in the lock code).
46 */
20d444d0
NP
47static inline void yield_to_preempted(int cpu, u32 yield_count)
48{
2c8c89b9 49 plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
20d444d0 50}
20c0e826
NP
51
52static inline void prod_cpu(int cpu)
53{
2c8c89b9 54 plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
20c0e826
NP
55}
56
57static inline void yield_to_any(void)
58{
2c8c89b9 59 plpar_hcall_norets_notrace(H_CONFER, -1, 0);
20c0e826 60}
20d444d0
NP
61#else
62static inline bool is_shared_processor(void)
63{
64 return false;
65}
66
67static inline u32 yield_count_of(int cpu)
68{
69 return 0;
70}
71
72extern void ___bad_yield_to_preempted(void);
73static inline void yield_to_preempted(int cpu, u32 yield_count)
74{
75 ___bad_yield_to_preempted(); /* This would be a bug */
76}
20c0e826
NP
77
78extern void ___bad_yield_to_any(void);
79static inline void yield_to_any(void)
80{
81 ___bad_yield_to_any(); /* This would be a bug */
82}
83
84extern void ___bad_prod_cpu(void);
85static inline void prod_cpu(int cpu)
86{
87 ___bad_prod_cpu(); /* This would be a bug */
88}
89
20d444d0
NP
90#endif
91
92#define vcpu_is_preempted vcpu_is_preempted
93static inline bool vcpu_is_preempted(int cpu)
94{
799f9b51
NL
95 /*
96 * The dispatch/yield bit alone is an imperfect indicator of
97 * whether the hypervisor has dispatched @cpu to run on a physical
98 * processor. When it is clear, @cpu is definitely not preempted.
99 * But when it is set, it means only that it *might* be, subject to
100 * other conditions. So we check other properties of the VM and
101 * @cpu first, resorting to the yield count last.
102 */
103
104 /*
105 * Hypervisor preemption isn't possible in dedicated processor
106 * mode by definition.
107 */
20d444d0
NP
108 if (!is_shared_processor())
109 return false;
ca3f969d
SD
110
111#ifdef CONFIG_PPC_SPLPAR
112 if (!is_kvm_guest()) {
113 int first_cpu = cpu_first_thread_sibling(smp_processor_id());
114
115 /*
799f9b51
NL
116 * The PowerVM hypervisor dispatches VMs on a whole core
117 * basis. So we know that a thread sibling of the local CPU
118 * cannot have been preempted by the hypervisor, even if it
119 * has called H_CONFER, which will set the yield bit.
ca3f969d
SD
120 */
121 if (cpu_first_thread_sibling(cpu) == first_cpu)
122 return false;
123 }
124#endif
125
20d444d0
NP
126 if (yield_count_of(cpu) & 1)
127 return true;
128 return false;
129}
130
20c0e826
NP
131static inline bool pv_is_native_spin_unlock(void)
132{
133 return !is_shared_processor();
134}
135
20d444d0 136#endif /* _ASM_POWERPC_PARAVIRT_H */