]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
29dee3c0 | 2 | /* |
77e9971c | 3 | * Out-of-line refcount functions common to all refcount implementations. |
29dee3c0 PZ |
4 | */ |
5 | ||
75a040ff | 6 | #include <linux/mutex.h> |
29dee3c0 | 7 | #include <linux/refcount.h> |
75a040ff | 8 | #include <linux/spinlock.h> |
29dee3c0 PZ |
9 | #include <linux/bug.h> |
10 | ||
bd174169 DW |
11 | /** |
12 | * refcount_dec_if_one - decrement a refcount if it is 1 | |
13 | * @r: the refcount | |
14 | * | |
29dee3c0 PZ |
15 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the |
16 | * success thereof. | |
17 | * | |
18 | * Like all decrement operations, it provides release memory order and provides | |
19 | * a control dependency. | |
20 | * | |
21 | * It can be used like a try-delete operator; this explicit case is provided | |
22 | * and not cmpxchg in generic, because that would allow implementing unsafe | |
23 | * operations. | |
bd174169 DW |
24 | * |
25 | * Return: true if the resulting refcount is 0, false otherwise | |
29dee3c0 PZ |
26 | */ |
27 | bool refcount_dec_if_one(refcount_t *r) | |
28 | { | |
b78c0d47 PZ |
29 | int val = 1; |
30 | ||
31 | return atomic_try_cmpxchg_release(&r->refs, &val, 0); | |
29dee3c0 | 32 | } |
d557d1b5 | 33 | EXPORT_SYMBOL(refcount_dec_if_one); |
29dee3c0 | 34 | |
bd174169 DW |
35 | /** |
36 | * refcount_dec_not_one - decrement a refcount if it is not 1 | |
37 | * @r: the refcount | |
38 | * | |
29dee3c0 PZ |
39 | * No atomic_t counterpart, it decrements unless the value is 1, in which case |
40 | * it will return false. | |
41 | * | |
42 | * Was often done like: atomic_add_unless(&var, -1, 1) | |
bd174169 DW |
43 | * |
44 | * Return: true if the decrement operation was successful, false otherwise | |
29dee3c0 PZ |
45 | */ |
46 | bool refcount_dec_not_one(refcount_t *r) | |
47 | { | |
b78c0d47 | 48 | unsigned int new, val = atomic_read(&r->refs); |
29dee3c0 | 49 | |
b78c0d47 | 50 | do { |
23e6b169 | 51 | if (unlikely(val == REFCOUNT_SATURATED)) |
29dee3c0 PZ |
52 | return true; |
53 | ||
54 | if (val == 1) | |
55 | return false; | |
56 | ||
57 | new = val - 1; | |
58 | if (new > val) { | |
9dcfe2c7 | 59 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
29dee3c0 PZ |
60 | return true; |
61 | } | |
62 | ||
b78c0d47 | 63 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
29dee3c0 PZ |
64 | |
65 | return true; | |
66 | } | |
d557d1b5 | 67 | EXPORT_SYMBOL(refcount_dec_not_one); |
29dee3c0 | 68 | |
bd174169 DW |
69 | /** |
70 | * refcount_dec_and_mutex_lock - return holding mutex if able to decrement | |
71 | * refcount to 0 | |
72 | * @r: the refcount | |
73 | * @lock: the mutex to be locked | |
74 | * | |
29dee3c0 | 75 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
23e6b169 | 76 | * to decrement when saturated at REFCOUNT_SATURATED. |
29dee3c0 PZ |
77 | * |
78 | * Provides release memory ordering, such that prior loads and stores are done | |
79 | * before, and provides a control dependency such that free() must come after. | |
80 | * See the comment on top. | |
bd174169 DW |
81 | * |
82 | * Return: true and hold mutex if able to decrement refcount to 0, false | |
83 | * otherwise | |
29dee3c0 PZ |
84 | */ |
85 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) | |
86 | { | |
87 | if (refcount_dec_not_one(r)) | |
88 | return false; | |
89 | ||
90 | mutex_lock(lock); | |
91 | if (!refcount_dec_and_test(r)) { | |
92 | mutex_unlock(lock); | |
93 | return false; | |
94 | } | |
95 | ||
96 | return true; | |
97 | } | |
d557d1b5 | 98 | EXPORT_SYMBOL(refcount_dec_and_mutex_lock); |
29dee3c0 | 99 | |
bd174169 DW |
100 | /** |
101 | * refcount_dec_and_lock - return holding spinlock if able to decrement | |
102 | * refcount to 0 | |
103 | * @r: the refcount | |
104 | * @lock: the spinlock to be locked | |
105 | * | |
29dee3c0 | 106 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
23e6b169 | 107 | * decrement when saturated at REFCOUNT_SATURATED. |
29dee3c0 PZ |
108 | * |
109 | * Provides release memory ordering, such that prior loads and stores are done | |
110 | * before, and provides a control dependency such that free() must come after. | |
111 | * See the comment on top. | |
bd174169 DW |
112 | * |
113 | * Return: true and hold spinlock if able to decrement refcount to 0, false | |
114 | * otherwise | |
29dee3c0 PZ |
115 | */ |
116 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) | |
117 | { | |
118 | if (refcount_dec_not_one(r)) | |
119 | return false; | |
120 | ||
121 | spin_lock(lock); | |
122 | if (!refcount_dec_and_test(r)) { | |
123 | spin_unlock(lock); | |
124 | return false; | |
125 | } | |
126 | ||
127 | return true; | |
128 | } | |
d557d1b5 | 129 | EXPORT_SYMBOL(refcount_dec_and_lock); |
29dee3c0 | 130 | |
7ea959c4 AMG |
131 | /** |
132 | * refcount_dec_and_lock_irqsave - return holding spinlock with disabled | |
133 | * interrupts if able to decrement refcount to 0 | |
134 | * @r: the refcount | |
135 | * @lock: the spinlock to be locked | |
136 | * @flags: saved IRQ-flags if the is acquired | |
137 | * | |
138 | * Same as refcount_dec_and_lock() above except that the spinlock is acquired | |
139 | * with disabled interupts. | |
140 | * | |
141 | * Return: true and hold spinlock if able to decrement refcount to 0, false | |
142 | * otherwise | |
143 | */ | |
144 | bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, | |
145 | unsigned long *flags) | |
146 | { | |
147 | if (refcount_dec_not_one(r)) | |
148 | return false; | |
149 | ||
150 | spin_lock_irqsave(lock, *flags); | |
151 | if (!refcount_dec_and_test(r)) { | |
152 | spin_unlock_irqrestore(lock, *flags); | |
153 | return false; | |
154 | } | |
155 | ||
156 | return true; | |
157 | } | |
158 | EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); |