* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
* @seq: seqcount around __run_hrtimer
+ * @expires_next: Absolute time of the next event in this clock base
* @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
* @offset: offset of this clock to the monotonic base
unsigned int index;
clockid_t clockid;
seqcount_raw_spinlock_t seq;
+ ktime_t expires_next;
struct hrtimer *running;
struct timerqueue_head active;
ktime_t offset;
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->is_queued, HRTIMER_STATE_ENQUEUED);
- return timerqueue_add(&base->active, &timer->node);
+ if (!timerqueue_add(&base->active, &timer->node))
+ return false;
+
+ base->expires_next = hrtimer_get_expires(timer);
+ return true;
+}
+
+static inline void base_update_next_timer(struct hrtimer_clock_base *base)
+{
+ struct timerqueue_node *next = timerqueue_getnext(&base->active);
+
+ base->expires_next = next ? next->expires : KTIME_MAX;
}
/*
bool newstate, bool reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
+ bool was_first;
lockdep_assert_held(&cpu_base->lock);
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->is_queued, newstate);
+ was_first = &timer->node == timerqueue_getnext(&base->active);
+
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
+ /* Nothing to update if this was not the first timer in the base */
+ if (!was_first)
+ return;
+
+ base_update_next_timer(base);
+
/*
* If reprogram is false don't update cpu_base->next_timer and do not
* touch the clock event device.
remove_and_enqueue_same_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
const enum hrtimer_mode mode, ktime_t expires, u64 delta_ns)
{
+ bool was_first = false;
+
/* Remove it from the timer queue if active */
if (timer->is_queued) {
debug_hrtimer_deactivate(timer);
+ was_first = &timer->node == timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
}
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->is_queued, HRTIMER_STATE_ENQUEUED);
- /* Returns true if this is the first expiring timer */
- return timerqueue_add(&base->active, &timer->node);
+ /* If it's the first expiring timer now or again, update base */
+ if (timerqueue_add(&base->active, &timer->node)) {
+ base->expires_next = expires;
+ return true;
+ }
+
+ if (was_first)
+ base_update_next_timer(base);
+
+ return false;
}
static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,