HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
};
-/*
- * Values to track state of the timer
- *
- * Possible states:
- *
- * 0x00 inactive
- * 0x01 enqueued into rbtree
- *
- * The callback state is not part of the timer->state because clearing it would
- * mean touching the timer after the callback, this makes it impossible to free
- * the timer from the callback function.
- *
- * Therefore we track the callback state in:
- *
- * timer->base->cpu_base->running == timer
- *
- * On SMP it is possible to have a "callback function running and enqueued"
- * status. It happens for example when a posix timer expired and the callback
- * queued a signal. Between dropping the lock which protects the posix timer
- * and reacquiring the base lock of the hrtimer, another CPU can deliver the
- * signal and rearm the timer.
- *
- * All state transitions are protected by cpu_base->lock.
- */
-#define HRTIMER_STATE_INACTIVE 0x00
-#define HRTIMER_STATE_ENQUEUED 0x01
-
/**
* struct hrtimer_sleeper - simple sleeper structure
* @timer: embedded timer structure
*/
static inline bool hrtimer_is_queued(struct hrtimer *timer)
{
- /* The READ_ONCE pairs with the update functions of timer->state */
- return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
+ /* The READ_ONCE pairs with the update functions of timer->is_queued */
+ return READ_ONCE(timer->is_queued);
}
/*
#include "tick-internal.h"
+/*
+ * Constants to set the queued state of the timer (INACTIVE, ENQUEUED)
+ *
+ * The callback state is kept separate in the CPU base because having it in
+ * the timer would required touching the timer after the callback, which
+ * makes it impossible to free the timer from the callback function.
+ *
+ * Therefore we track the callback state in:
+ *
+ * timer->base->cpu_base->running == timer
+ *
+ * On SMP it is possible to have a "callback function running and enqueued"
+ * status. It happens for example when a posix timer expired and the callback
+ * queued a signal. Between dropping the lock which protects the posix timer
+ * and reacquiring the base lock of the hrtimer, another CPU can deliver the
+ * signal and rearm the timer.
+ *
+ * All state transitions are protected by cpu_base->lock.
+ */
+#define HRTIMER_STATE_INACTIVE false
+#define HRTIMER_STATE_ENQUEUED true
+
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
if (delta < 0)
return 0;
- if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
+ if (WARN_ON(timer->is_queued))
return 0;
if (interval < hrtimer_resolution)
base->cpu_base->active_bases |= 1 << base->index;
/* Pairs with the lockless read in hrtimer_is_queued() */
- WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
+ WRITE_ONCE(timer->is_queued, HRTIMER_STATE_ENQUEUED);
return timerqueue_add(&base->active, &timer->node);
}
* anyway (e.g. timer interrupt)
*/
static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
- u8 newstate, bool reprogram)
+ bool newstate, bool reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
- u8 state = timer->state;
lockdep_assert_held(&cpu_base->lock);
- /* Pairs with the lockless read in hrtimer_is_queued() */
- WRITE_ONCE(timer->state, newstate);
- if (!(state & HRTIMER_STATE_ENQUEUED))
+ if (!timer->is_queued)
return;
+ /* Pairs with the lockless read in hrtimer_is_queued() */
+ WRITE_ONCE(timer->is_queued, newstate);
+
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
static inline bool remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
bool restart, bool keep_local)
{
- u8 state = timer->state;
+ bool queued_state = timer->is_queued;
lockdep_assert_held(&base->cpu_base->lock);
- if (state & HRTIMER_STATE_ENQUEUED) {
+ if (queued_state) {
bool reprogram;
debug_hrtimer_deactivate(timer);
* and a moment later when it's requeued).
*/
if (!restart)
- state = HRTIMER_STATE_INACTIVE;
+ queued_state = HRTIMER_STATE_INACTIVE;
else
reprogram &= !keep_local;
- __remove_hrtimer(timer, base, state, reprogram);
+ __remove_hrtimer(timer, base, queued_state, reprogram);
return true;
}
return false;
base = READ_ONCE(timer->base);
seq = raw_read_seqcount_begin(&base->seq);
- if (timer->state != HRTIMER_STATE_INACTIVE || base->running == timer)
+ if (timer->is_queued || base->running == timer)
return true;
} while (read_seqcount_retry(&base->seq, seq) || base != READ_ONCE(timer->base));
* - callback: the timer is being ran
* - post: the timer is inactive or (re)queued
*
- * On the read side we ensure we observe timer->state and cpu_base->running
+ * On the read side we ensure we observe timer->is_queued and cpu_base->running
* from the same section, if anything changed while we looked at it, we retry.
* This includes timer->base changing because sequence numbers alone are
* insufficient for that.
base->running = timer;
/*
- * Separate the ->running assignment from the ->state assignment.
+ * Separate the ->running assignment from the ->is_queued assignment.
*
* As with a regular write barrier, this ensures the read side in
* hrtimer_active() cannot observe base->running == NULL &&
- * timer->state == INACTIVE.
+ * timer->is_queued == INACTIVE.
*/
raw_write_seqcount_barrier(&base->seq);
* hrtimer_start_range_ns() can have popped in and enqueued the timer
* for us already.
*/
- if (restart != HRTIMER_NORESTART && !(timer->state & HRTIMER_STATE_ENQUEUED))
+ if (restart == HRTIMER_RESTART && !timer->is_queued)
enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS, false);
/*
- * Separate the ->running assignment from the ->state assignment.
+ * Separate the ->running assignment from the ->is_queued assignment.
*
* As with a regular write barrier, this ensures the read side in
* hrtimer_active() cannot observe base->running.timer == NULL &&
- * timer->state == INACTIVE.
+ * timer->is_queued == INACTIVE.
*/
raw_write_seqcount_barrier(&base->seq);