return &futex_queues[hash & futex_hashmask];
}
+void futex_hash_get(struct futex_hash_bucket *hb) { }
+void futex_hash_put(struct futex_hash_bucket *hb) { }
/**
* futex_setup_timer - set up the sleeping hrtimer.
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
if (1) {
- struct futex_hash_bucket *hb;
-
- hb = futex_hash(&key);
+ CLASS(hb, hb)(&key);
/*
* We can race against put_pi_state() removing itself from the
#include <linux/sched/wake_q.h>
#include <linux/compat.h>
#include <linux/uaccess.h>
+#include <linux/cleanup.h>
#ifdef CONFIG_PREEMPT_RT
#include <linux/rcuwait.h>
int flags, u64 range_ns);
extern struct futex_hash_bucket *futex_hash(union futex_key *key);
+extern void futex_hash_get(struct futex_hash_bucket *hb);
+extern void futex_hash_put(struct futex_hash_bucket *hb);
+
+DEFINE_CLASS(hb, struct futex_hash_bucket *,
+ if (_T) futex_hash_put(_T),
+ futex_hash(key), union futex_key *key);
/**
* futex_match - Check whether two futex keys are equal
retry_private:
if (1) {
- struct futex_hash_bucket *hb;
+ CLASS(hb, hb)(&q.key);
- hb = futex_hash(&q.key);
futex_q_lock(&q, hb);
ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current,
goto no_block;
}
+ /*
+ * Caution; releasing @hb in-scope. The hb->lock is still locked
+ * while the reference is dropped. The reference can not be dropped
+ * after the unlock because if a user initiated resize is in progress
+ * then we might need to wake him. This can not be done after the
+ * rt_mutex_pre_schedule() invocation. The hb will remain valid because
+ * the thread, performing resize, will block on hb->lock during
+ * the requeue.
+ */
+ futex_hash_put(no_free_ptr(hb));
/*
* Must be done before we enqueue the waiter, here is unfortunately
* under the hb lock, but that *should* work because it does nothing.
{
u32 curval, uval, vpid = task_pid_vnr(current);
union futex_key key = FUTEX_KEY_INIT;
- struct futex_hash_bucket *hb;
struct futex_q *top_waiter;
int ret;
if (ret)
return ret;
- hb = futex_hash(&key);
+ CLASS(hb, hb)(&key);
spin_lock(&hb->lock);
retry_hb:
retry_private:
if (1) {
- struct futex_hash_bucket *hb1, *hb2;
-
- hb1 = futex_hash(&key1);
- hb2 = futex_hash(&key2);
+ CLASS(hb, hb1)(&key1);
+ CLASS(hb, hb2)(&key2);
futex_hb_waiters_inc(hb2);
double_lock_hb(hb1, hb2);
switch (futex_requeue_pi_wakeup_sync(&q)) {
case Q_REQUEUE_PI_IGNORE:
{
- struct futex_hash_bucket *hb;
-
- hb = futex_hash(&q.key);
+ CLASS(hb, hb)(&q.key);
/* The waiter is still on uaddr1 */
spin_lock(&hb->lock);
ret = handle_early_requeue_pi_wakeup(hb, &q, to);
*/
int futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
{
- struct futex_hash_bucket *hb;
struct futex_q *this, *next;
union futex_key key = FUTEX_KEY_INIT;
DEFINE_WAKE_Q(wake_q);
if ((flags & FLAGS_STRICT) && !nr_wake)
return 0;
- hb = futex_hash(&key);
+ CLASS(hb, hb)(&key);
/* Make sure we really have tasks to wakeup */
if (!futex_hb_waiters_pending(hb))
retry_private:
if (1) {
- struct futex_hash_bucket *hb1, *hb2;
-
- hb1 = futex_hash(&key1);
- hb2 = futex_hash(&key2);
+ CLASS(hb, hb1)(&key1);
+ CLASS(hb, hb2)(&key2);
double_lock_hb(hb1, hb2);
op_ret = futex_atomic_op_inuser(op, uaddr2);
u32 val = vs[i].w.val;
if (1) {
- struct futex_hash_bucket *hb;
+ CLASS(hb, hb)(&q->key);
- hb = futex_hash(&q->key);
futex_q_lock(q, hb);
ret = futex_get_value_locked(&uval, uaddr);
retry_private:
if (1) {
- struct futex_hash_bucket *hb;
+ CLASS(hb, hb)(&q->key);
- hb = futex_hash(&q->key);
futex_q_lock(q, hb);
ret = futex_get_value_locked(&uval, uaddr);