1 // SPDX-License-Identifier: GPL-2.0-or-later
3 #include <linux/compiler.h>
4 #include <linux/export.h>
5 #include <linux/percpu.h>
6 #include <linux/processor.h>
8 #include <linux/topology.h>
9 #include <linux/sched/clock.h>
10 #include <asm/qspinlock.h>
11 #include <asm/paravirt.h>
17 struct qspinlock
*lock
;
20 u8 locked
; /* 1 if lock acquired */
25 struct qnode nodes
[MAX_NODES
];
28 /* Tuning parameters */
29 static int steal_spins __read_mostly
= (1 << 5);
30 static int remote_steal_spins __read_mostly
= (1 << 2);
31 #if _Q_SPIN_TRY_LOCK_STEAL == 1
32 static const bool maybe_stealers
= true;
34 static bool maybe_stealers __read_mostly
= true;
36 static int head_spins __read_mostly
= (1 << 8);
38 static bool pv_yield_owner __read_mostly
= true;
39 static bool pv_yield_allow_steal __read_mostly
= false;
40 static bool pv_spin_on_preempted_owner __read_mostly
= false;
41 static bool pv_sleepy_lock __read_mostly
= true;
42 static bool pv_sleepy_lock_sticky __read_mostly
= false;
43 static u64 pv_sleepy_lock_interval_ns __read_mostly
= 0;
44 static int pv_sleepy_lock_factor __read_mostly
= 256;
45 static bool pv_yield_prev __read_mostly
= true;
46 static bool pv_yield_propagate_owner __read_mostly
= true;
47 static bool pv_prod_head __read_mostly
= false;
49 static DEFINE_PER_CPU_ALIGNED(struct qnodes
, qnodes
);
50 static DEFINE_PER_CPU_ALIGNED(u64
, sleepy_lock_seen_clock
);
52 #if _Q_SPIN_SPEC_BARRIER == 1
53 #define spec_barrier() do { asm volatile("ori 31,31,0" ::: "memory"); } while (0)
55 #define spec_barrier() do { } while (0)
58 static __always_inline
bool recently_sleepy(void)
60 /* pv_sleepy_lock is true when this is called */
61 if (pv_sleepy_lock_interval_ns
) {
62 u64 seen
= this_cpu_read(sleepy_lock_seen_clock
);
65 u64 delta
= sched_clock() - seen
;
66 if (delta
< pv_sleepy_lock_interval_ns
)
68 this_cpu_write(sleepy_lock_seen_clock
, 0);
75 static __always_inline
int get_steal_spins(bool paravirt
, bool sleepy
)
77 if (paravirt
&& sleepy
)
78 return steal_spins
* pv_sleepy_lock_factor
;
83 static __always_inline
int get_remote_steal_spins(bool paravirt
, bool sleepy
)
85 if (paravirt
&& sleepy
)
86 return remote_steal_spins
* pv_sleepy_lock_factor
;
88 return remote_steal_spins
;
91 static __always_inline
int get_head_spins(bool paravirt
, bool sleepy
)
93 if (paravirt
&& sleepy
)
94 return head_spins
* pv_sleepy_lock_factor
;
99 static inline u32
encode_tail_cpu(int cpu
)
101 return (cpu
+ 1) << _Q_TAIL_CPU_OFFSET
;
104 static inline int decode_tail_cpu(u32 val
)
106 return (val
>> _Q_TAIL_CPU_OFFSET
) - 1;
109 static inline int get_owner_cpu(u32 val
)
111 return (val
& _Q_OWNER_CPU_MASK
) >> _Q_OWNER_CPU_OFFSET
;
115 * Try to acquire the lock if it was not already locked. If the tail matches
116 * mytail then clear it, otherwise leave it unchnaged. Return previous value.
118 * This is used by the head of the queue to acquire the lock and clean up
119 * its tail if it was the last one queued.
121 static __always_inline u32
trylock_clean_tail(struct qspinlock
*lock
, u32 tail
)
123 u32 newval
= queued_spin_encode_locked_val();
127 "1: lwarx %0,0,%2,%7 # trylock_clean_tail \n"
128 /* This test is necessary if there could be stealers */
131 /* Test whether the lock tail == mytail */
134 /* Merge the new locked value */
137 /* If the lock tail matched, then clear it, otherwise leave it. */
139 "2: stwcx. %1,0,%2 \n"
141 "\t" PPC_ACQUIRE_BARRIER
" \n"
143 : "=&r" (prev
), "=&r" (tmp
)
144 : "r" (&lock
->val
), "r"(tail
), "r" (newval
),
146 "r" (_Q_TAIL_CPU_MASK
),
147 "i" (_Q_SPIN_EH_HINT
)
154 * Publish our tail, replacing previous tail. Return previous value.
156 * This provides a release barrier for publishing node, this pairs with the
157 * acquire barrier in get_tail_qnode() when the next CPU finds this tail
160 static __always_inline u32
publish_tail_cpu(struct qspinlock
*lock
, u32 tail
)
167 "\t" PPC_RELEASE_BARRIER
" \n"
168 "1: lwarx %0,0,%2 # publish_tail_cpu \n"
173 : "=&r" (prev
), "=&r"(tmp
)
174 : "r" (&lock
->val
), "r" (tail
), "r"(_Q_TAIL_CPU_MASK
)
180 static __always_inline u32
set_mustq(struct qspinlock
*lock
)
185 "1: lwarx %0,0,%1 # set_mustq \n"
190 : "r" (&lock
->val
), "r" (_Q_MUST_Q_VAL
)
196 static __always_inline u32
clear_mustq(struct qspinlock
*lock
)
201 "1: lwarx %0,0,%1 # clear_mustq \n"
206 : "r" (&lock
->val
), "r" (_Q_MUST_Q_VAL
)
212 static __always_inline
bool try_set_sleepy(struct qspinlock
*lock
, u32 old
)
215 u32
new = old
| _Q_SLEEPY_VAL
;
217 BUG_ON(!(old
& _Q_LOCKED_VAL
));
218 BUG_ON(old
& _Q_SLEEPY_VAL
);
221 "1: lwarx %0,0,%1 # try_set_sleepy \n"
228 : "r" (&lock
->val
), "r"(old
), "r" (new)
231 return likely(prev
== old
);
234 static __always_inline
void seen_sleepy_owner(struct qspinlock
*lock
, u32 val
)
236 if (pv_sleepy_lock
) {
237 if (pv_sleepy_lock_interval_ns
)
238 this_cpu_write(sleepy_lock_seen_clock
, sched_clock());
239 if (!(val
& _Q_SLEEPY_VAL
))
240 try_set_sleepy(lock
, val
);
244 static __always_inline
void seen_sleepy_lock(void)
246 if (pv_sleepy_lock
&& pv_sleepy_lock_interval_ns
)
247 this_cpu_write(sleepy_lock_seen_clock
, sched_clock());
250 static __always_inline
void seen_sleepy_node(struct qspinlock
*lock
, u32 val
)
252 if (pv_sleepy_lock
) {
253 if (pv_sleepy_lock_interval_ns
)
254 this_cpu_write(sleepy_lock_seen_clock
, sched_clock());
255 if (val
& _Q_LOCKED_VAL
) {
256 if (!(val
& _Q_SLEEPY_VAL
))
257 try_set_sleepy(lock
, val
);
262 static struct qnode
*get_tail_qnode(struct qspinlock
*lock
, u32 val
)
264 int cpu
= decode_tail_cpu(val
);
265 struct qnodes
*qnodesp
= per_cpu_ptr(&qnodes
, cpu
);
269 * After publishing the new tail and finding a previous tail in the
270 * previous val (which is the control dependency), this barrier
271 * orders the release barrier in publish_tail_cpu performed by the
272 * last CPU, with subsequently looking at its qnode structures
275 smp_acquire__after_ctrl_dep();
277 for (idx
= 0; idx
< MAX_NODES
; idx
++) {
278 struct qnode
*qnode
= &qnodesp
->nodes
[idx
];
279 if (qnode
->lock
== lock
)
286 /* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
287 static __always_inline
bool __yield_to_locked_owner(struct qspinlock
*lock
, u32 val
, bool paravirt
, bool mustq
)
291 bool preempted
= false;
293 BUG_ON(!(val
& _Q_LOCKED_VAL
));
301 owner
= get_owner_cpu(val
);
302 yield_count
= yield_count_of(owner
);
304 if ((yield_count
& 1) == 0)
305 goto relax
; /* owner vcpu is running */
309 seen_sleepy_owner(lock
, val
);
313 * Read the lock word after sampling the yield count. On the other side
314 * there may a wmb because the yield count update is done by the
315 * hypervisor preemption and the value update by the OS, however this
316 * ordering might reduce the chance of out of order accesses and
317 * improve the heuristic.
321 if (READ_ONCE(lock
->val
) == val
) {
324 yield_to_preempted(owner
, yield_count
);
329 /* Don't relax if we yielded. Maybe we should? */
339 /* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
340 static __always_inline
bool yield_to_locked_owner(struct qspinlock
*lock
, u32 val
, bool paravirt
)
342 return __yield_to_locked_owner(lock
, val
, paravirt
, false);
345 /* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
346 static __always_inline
bool yield_head_to_locked_owner(struct qspinlock
*lock
, u32 val
, bool paravirt
)
350 if ((val
& _Q_MUST_Q_VAL
) && pv_yield_allow_steal
)
353 return __yield_to_locked_owner(lock
, val
, paravirt
, mustq
);
356 static __always_inline
void propagate_yield_cpu(struct qnode
*node
, u32 val
, int *set_yield_cpu
, bool paravirt
)
363 if (!pv_yield_propagate_owner
)
366 owner
= get_owner_cpu(val
);
367 if (*set_yield_cpu
== owner
)
370 next
= READ_ONCE(node
->next
);
374 if (vcpu_is_preempted(owner
)) {
375 next
->yield_cpu
= owner
;
376 *set_yield_cpu
= owner
;
377 } else if (*set_yield_cpu
!= -1) {
378 next
->yield_cpu
= owner
;
379 *set_yield_cpu
= owner
;
383 /* Called inside spin_begin() */
384 static __always_inline
bool yield_to_prev(struct qspinlock
*lock
, struct qnode
*node
, u32 val
, bool paravirt
)
386 int prev_cpu
= decode_tail_cpu(val
);
389 bool preempted
= false;
394 if (!pv_yield_propagate_owner
)
397 yield_cpu
= READ_ONCE(node
->yield_cpu
);
398 if (yield_cpu
== -1) {
399 /* Propagate back the -1 CPU */
400 if (node
->next
&& node
->next
->yield_cpu
!= -1)
401 node
->next
->yield_cpu
= yield_cpu
;
405 yield_count
= yield_count_of(yield_cpu
);
406 if ((yield_count
& 1) == 0)
407 goto yield_prev
; /* owner vcpu is running */
412 seen_sleepy_node(lock
, val
);
416 if (yield_cpu
== node
->yield_cpu
) {
417 if (node
->next
&& node
->next
->yield_cpu
!= yield_cpu
)
418 node
->next
->yield_cpu
= yield_cpu
;
419 yield_to_preempted(yield_cpu
, yield_count
);
429 yield_count
= yield_count_of(prev_cpu
);
430 if ((yield_count
& 1) == 0)
431 goto relax
; /* owner vcpu is running */
436 seen_sleepy_node(lock
, val
);
438 smp_rmb(); /* See __yield_to_locked_owner comment */
440 if (!READ_ONCE(node
->locked
)) {
441 yield_to_preempted(prev_cpu
, yield_count
);
453 static __always_inline
bool steal_break(u32 val
, int iters
, bool paravirt
, bool sleepy
)
455 if (iters
>= get_steal_spins(paravirt
, sleepy
))
458 if (IS_ENABLED(CONFIG_NUMA
) &&
459 (iters
>= get_remote_steal_spins(paravirt
, sleepy
))) {
460 int cpu
= get_owner_cpu(val
);
461 if (numa_node_id() != cpu_to_node(cpu
))
467 static __always_inline
bool try_to_steal_lock(struct qspinlock
*lock
, bool paravirt
)
469 bool seen_preempted
= false;
475 /* XXX: should spin_on_preempted_owner do anything here? */
479 /* Attempt to steal the lock */
482 bool preempted
= false;
484 val
= READ_ONCE(lock
->val
);
485 if (val
& _Q_MUST_Q_VAL
)
489 if (unlikely(!(val
& _Q_LOCKED_VAL
))) {
491 if (__queued_spin_trylock_steal(lock
))
495 preempted
= yield_to_locked_owner(lock
, val
, paravirt
);
498 if (paravirt
&& pv_sleepy_lock
) {
500 if (val
& _Q_SLEEPY_VAL
) {
503 } else if (recently_sleepy()) {
507 if (pv_sleepy_lock_sticky
&& seen_preempted
&&
508 !(val
& _Q_SLEEPY_VAL
)) {
509 if (try_set_sleepy(lock
, val
))
510 val
|= _Q_SLEEPY_VAL
;
515 seen_preempted
= true;
517 if (!pv_spin_on_preempted_owner
)
520 * pv_spin_on_preempted_owner don't increase iters
521 * while the owner is preempted -- we won't interfere
522 * with it by definition. This could introduce some
523 * latency issue if we continually observe preempted
524 * owners, but hopefully that's a rare corner case of
525 * a badly oversubscribed system.
530 } while (!steal_break(val
, iters
, paravirt
, sleepy
));
537 static __always_inline
void queued_spin_lock_mcs_queue(struct qspinlock
*lock
, bool paravirt
)
539 struct qnodes
*qnodesp
;
540 struct qnode
*next
, *node
;
542 bool seen_preempted
= false;
546 int set_yield_cpu
= -1;
549 BUILD_BUG_ON(CONFIG_NR_CPUS
>= (1U << _Q_TAIL_CPU_BITS
));
551 qnodesp
= this_cpu_ptr(&qnodes
);
552 if (unlikely(qnodesp
->count
>= MAX_NODES
)) {
554 while (!queued_spin_trylock(lock
))
559 idx
= qnodesp
->count
++;
561 * Ensure that we increment the head node->count before initialising
562 * the actual node. If the compiler is kind enough to reorder these
563 * stores, then an IRQ could overwrite our assignments.
566 node
= &qnodesp
->nodes
[idx
];
569 node
->cpu
= smp_processor_id();
570 node
->yield_cpu
= -1;
573 tail
= encode_tail_cpu(node
->cpu
);
576 * Assign all attributes of a node before it can be published.
577 * Issues an lwsync, serving as a release barrier, as well as a
580 old
= publish_tail_cpu(lock
, tail
);
583 * If there was a previous node; link it and wait until reaching the
584 * head of the waitqueue.
586 if (old
& _Q_TAIL_CPU_MASK
) {
587 struct qnode
*prev
= get_tail_qnode(lock
, old
);
589 /* Link @node into the waitqueue. */
590 WRITE_ONCE(prev
->next
, node
);
592 /* Wait for mcs node lock to be released */
594 while (!READ_ONCE(node
->locked
)) {
597 if (yield_to_prev(lock
, node
, old
, paravirt
))
598 seen_preempted
= true;
603 /* Clear out stale propagated yield_cpu */
604 if (paravirt
&& pv_yield_propagate_owner
&& node
->yield_cpu
!= -1)
605 node
->yield_cpu
= -1;
607 smp_rmb(); /* acquire barrier for the mcs lock */
610 * Generic qspinlocks have this prefetch here, but it seems
611 * like it could cause additional line transitions because
612 * the waiter will keep loading from it.
614 if (_Q_SPIN_PREFETCH_NEXT
) {
615 next
= READ_ONCE(node
->next
);
621 /* We're at the head of the waitqueue, wait for the lock. */
627 val
= READ_ONCE(lock
->val
);
628 if (!(val
& _Q_LOCKED_VAL
))
632 if (paravirt
&& pv_sleepy_lock
&& maybe_stealers
) {
634 if (val
& _Q_SLEEPY_VAL
) {
637 } else if (recently_sleepy()) {
641 if (pv_sleepy_lock_sticky
&& seen_preempted
&&
642 !(val
& _Q_SLEEPY_VAL
)) {
643 if (try_set_sleepy(lock
, val
))
644 val
|= _Q_SLEEPY_VAL
;
648 propagate_yield_cpu(node
, val
, &set_yield_cpu
, paravirt
);
649 preempted
= yield_head_to_locked_owner(lock
, val
, paravirt
);
654 seen_preempted
= true;
656 if (paravirt
&& preempted
) {
659 if (!pv_spin_on_preempted_owner
)
665 if (!mustq
&& iters
>= get_head_spins(paravirt
, sleepy
)) {
668 val
|= _Q_MUST_Q_VAL
;
674 /* If we're the last queued, must clean up the tail. */
675 old
= trylock_clean_tail(lock
, tail
);
676 if (unlikely(old
& _Q_LOCKED_VAL
)) {
677 BUG_ON(!maybe_stealers
);
678 goto again
; /* Can only be true if maybe_stealers. */
681 if ((old
& _Q_TAIL_CPU_MASK
) == tail
)
682 goto release
; /* We were the tail, no next. */
684 /* There is a next, must wait for node->next != NULL (MCS protocol) */
685 next
= READ_ONCE(node
->next
);
688 while (!(next
= READ_ONCE(node
->next
)))
695 * Unlock the next mcs waiter node. Release barrier is not required
696 * here because the acquirer is only accessing the lock word, and
697 * the acquire barrier we took the lock with orders that update vs
698 * this store to locked. The corresponding barrier is the smp_rmb()
699 * acquire barrier for mcs lock, above.
701 if (paravirt
&& pv_prod_head
) {
702 int next_cpu
= next
->cpu
;
703 WRITE_ONCE(next
->locked
, 1);
705 asm volatile("miso" ::: "memory");
706 if (vcpu_is_preempted(next_cpu
))
709 WRITE_ONCE(next
->locked
, 1);
711 asm volatile("miso" ::: "memory");
715 qnodesp
->count
--; /* release the node */
718 void queued_spin_lock_slowpath(struct qspinlock
*lock
)
721 * This looks funny, but it induces the compiler to inline both
722 * sides of the branch rather than share code as when the condition
723 * is passed as the paravirt argument to the functions.
725 if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS
) && is_shared_processor()) {
726 if (try_to_steal_lock(lock
, true)) {
730 queued_spin_lock_mcs_queue(lock
, true);
732 if (try_to_steal_lock(lock
, false)) {
736 queued_spin_lock_mcs_queue(lock
, false);
739 EXPORT_SYMBOL(queued_spin_lock_slowpath
);
741 #ifdef CONFIG_PARAVIRT_SPINLOCKS
742 void pv_spinlocks_init(void)
747 #include <linux/debugfs.h>
748 static int steal_spins_set(void *data
, u64 val
)
750 #if _Q_SPIN_TRY_LOCK_STEAL == 1
751 /* MAYBE_STEAL remains true */
754 static DEFINE_MUTEX(lock
);
757 * The lock slow path has a !maybe_stealers case that can assume
758 * the head of queue will not see concurrent waiters. That waiter
759 * is unsafe in the presence of stealers, so must keep them away
764 if (val
&& !steal_spins
) {
765 maybe_stealers
= true;
766 /* wait for queue head waiter to go away */
769 } else if (!val
&& steal_spins
) {
771 /* wait for all possible stealers to go away */
773 maybe_stealers
= false;
783 static int steal_spins_get(void *data
, u64
*val
)
790 DEFINE_SIMPLE_ATTRIBUTE(fops_steal_spins
, steal_spins_get
, steal_spins_set
, "%llu\n");
792 static int remote_steal_spins_set(void *data
, u64 val
)
794 remote_steal_spins
= val
;
799 static int remote_steal_spins_get(void *data
, u64
*val
)
801 *val
= remote_steal_spins
;
806 DEFINE_SIMPLE_ATTRIBUTE(fops_remote_steal_spins
, remote_steal_spins_get
, remote_steal_spins_set
, "%llu\n");
808 static int head_spins_set(void *data
, u64 val
)
815 static int head_spins_get(void *data
, u64
*val
)
822 DEFINE_SIMPLE_ATTRIBUTE(fops_head_spins
, head_spins_get
, head_spins_set
, "%llu\n");
824 static int pv_yield_owner_set(void *data
, u64 val
)
826 pv_yield_owner
= !!val
;
831 static int pv_yield_owner_get(void *data
, u64
*val
)
833 *val
= pv_yield_owner
;
838 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_owner
, pv_yield_owner_get
, pv_yield_owner_set
, "%llu\n");
840 static int pv_yield_allow_steal_set(void *data
, u64 val
)
842 pv_yield_allow_steal
= !!val
;
847 static int pv_yield_allow_steal_get(void *data
, u64
*val
)
849 *val
= pv_yield_allow_steal
;
854 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_allow_steal
, pv_yield_allow_steal_get
, pv_yield_allow_steal_set
, "%llu\n");
856 static int pv_spin_on_preempted_owner_set(void *data
, u64 val
)
858 pv_spin_on_preempted_owner
= !!val
;
863 static int pv_spin_on_preempted_owner_get(void *data
, u64
*val
)
865 *val
= pv_spin_on_preempted_owner
;
870 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_spin_on_preempted_owner
, pv_spin_on_preempted_owner_get
, pv_spin_on_preempted_owner_set
, "%llu\n");
872 static int pv_sleepy_lock_set(void *data
, u64 val
)
874 pv_sleepy_lock
= !!val
;
879 static int pv_sleepy_lock_get(void *data
, u64
*val
)
881 *val
= pv_sleepy_lock
;
886 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock
, pv_sleepy_lock_get
, pv_sleepy_lock_set
, "%llu\n");
888 static int pv_sleepy_lock_sticky_set(void *data
, u64 val
)
890 pv_sleepy_lock_sticky
= !!val
;
895 static int pv_sleepy_lock_sticky_get(void *data
, u64
*val
)
897 *val
= pv_sleepy_lock_sticky
;
902 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_sticky
, pv_sleepy_lock_sticky_get
, pv_sleepy_lock_sticky_set
, "%llu\n");
904 static int pv_sleepy_lock_interval_ns_set(void *data
, u64 val
)
906 pv_sleepy_lock_interval_ns
= val
;
911 static int pv_sleepy_lock_interval_ns_get(void *data
, u64
*val
)
913 *val
= pv_sleepy_lock_interval_ns
;
918 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_interval_ns
, pv_sleepy_lock_interval_ns_get
, pv_sleepy_lock_interval_ns_set
, "%llu\n");
920 static int pv_sleepy_lock_factor_set(void *data
, u64 val
)
922 pv_sleepy_lock_factor
= val
;
927 static int pv_sleepy_lock_factor_get(void *data
, u64
*val
)
929 *val
= pv_sleepy_lock_factor
;
934 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_sleepy_lock_factor
, pv_sleepy_lock_factor_get
, pv_sleepy_lock_factor_set
, "%llu\n");
936 static int pv_yield_prev_set(void *data
, u64 val
)
938 pv_yield_prev
= !!val
;
943 static int pv_yield_prev_get(void *data
, u64
*val
)
945 *val
= pv_yield_prev
;
950 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_prev
, pv_yield_prev_get
, pv_yield_prev_set
, "%llu\n");
952 static int pv_yield_propagate_owner_set(void *data
, u64 val
)
954 pv_yield_propagate_owner
= !!val
;
959 static int pv_yield_propagate_owner_get(void *data
, u64
*val
)
961 *val
= pv_yield_propagate_owner
;
966 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_propagate_owner
, pv_yield_propagate_owner_get
, pv_yield_propagate_owner_set
, "%llu\n");
968 static int pv_prod_head_set(void *data
, u64 val
)
970 pv_prod_head
= !!val
;
975 static int pv_prod_head_get(void *data
, u64
*val
)
982 DEFINE_SIMPLE_ATTRIBUTE(fops_pv_prod_head
, pv_prod_head_get
, pv_prod_head_set
, "%llu\n");
984 static __init
int spinlock_debugfs_init(void)
986 debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir
, NULL
, &fops_steal_spins
);
987 debugfs_create_file("qspl_remote_steal_spins", 0600, arch_debugfs_dir
, NULL
, &fops_remote_steal_spins
);
988 debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir
, NULL
, &fops_head_spins
);
989 if (is_shared_processor()) {
990 debugfs_create_file("qspl_pv_yield_owner", 0600, arch_debugfs_dir
, NULL
, &fops_pv_yield_owner
);
991 debugfs_create_file("qspl_pv_yield_allow_steal", 0600, arch_debugfs_dir
, NULL
, &fops_pv_yield_allow_steal
);
992 debugfs_create_file("qspl_pv_spin_on_preempted_owner", 0600, arch_debugfs_dir
, NULL
, &fops_pv_spin_on_preempted_owner
);
993 debugfs_create_file("qspl_pv_sleepy_lock", 0600, arch_debugfs_dir
, NULL
, &fops_pv_sleepy_lock
);
994 debugfs_create_file("qspl_pv_sleepy_lock_sticky", 0600, arch_debugfs_dir
, NULL
, &fops_pv_sleepy_lock_sticky
);
995 debugfs_create_file("qspl_pv_sleepy_lock_interval_ns", 0600, arch_debugfs_dir
, NULL
, &fops_pv_sleepy_lock_interval_ns
);
996 debugfs_create_file("qspl_pv_sleepy_lock_factor", 0600, arch_debugfs_dir
, NULL
, &fops_pv_sleepy_lock_factor
);
997 debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir
, NULL
, &fops_pv_yield_prev
);
998 debugfs_create_file("qspl_pv_yield_propagate_owner", 0600, arch_debugfs_dir
, NULL
, &fops_pv_yield_propagate_owner
);
999 debugfs_create_file("qspl_pv_prod_head", 0600, arch_debugfs_dir
, NULL
, &fops_pv_prod_head
);
1004 device_initcall(spinlock_debugfs_init
);