static void ctx_sched_out(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type);
static void ctx_sched_in(struct perf_event_context *ctx, struct pmu *pmu, enum event_type_t event_type);
+static inline void update_perf_time_ctx(struct perf_time_ctx *time, u64 now, bool adv)
+{
+ if (adv)
+ time->time += now - time->stamp;
+ time->stamp = now;
+
+ /*
+ * The above: time' = time + (now - timestamp), can be re-arranged
+ * into: time` = now + (time - timestamp), which gives a single value
+ * offset to compute future time without locks on.
+ *
+ * See perf_event_time_now(), which can be used from NMI context where
+ * it's (obviously) not possible to acquire ctx->lock in order to read
+ * both the above values in a consistent manner.
+ */
+ WRITE_ONCE(time->offset, time->time - time->stamp);
+}
+
#ifdef CONFIG_CGROUP_PERF
static inline bool
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
- return t->time;
+ return t->time.time;
}
static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
t = per_cpu_ptr(event->cgrp->info, event->cpu);
if (!__load_acquire(&t->active))
- return t->time;
- now += READ_ONCE(t->timeoffset);
+ return t->time.time;
+ now += READ_ONCE(t->time.offset);
return now;
}
-static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
-{
- if (adv)
- info->time += now - info->timestamp;
- info->timestamp = now;
- /*
- * see update_context_time()
- */
- WRITE_ONCE(info->timeoffset, info->time - info->timestamp);
-}
-
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
{
struct perf_cgroup *cgrp = cpuctx->cgrp;
cgrp = container_of(css, struct perf_cgroup, css);
info = this_cpu_ptr(cgrp->info);
- __update_cgrp_time(info, now, true);
+ update_perf_time_ctx(&info->time, now, true);
if (final)
__store_release(&info->active, 0);
}
* Do not update time when cgroup is not active
*/
if (info->active)
- __update_cgrp_time(info, perf_clock(), true);
+ update_perf_time_ctx(&info->time, perf_clock(), true);
}
static inline void
for (css = &cgrp->css; css; css = css->parent) {
cgrp = container_of(css, struct perf_cgroup, css);
info = this_cpu_ptr(cgrp->info);
- __update_cgrp_time(info, ctx->timestamp, false);
+ update_perf_time_ctx(&info->time, ctx->time.stamp, false);
__store_release(&info->active, 1);
}
}
lockdep_assert_held(&ctx->lock);
- if (adv)
- ctx->time += now - ctx->timestamp;
- ctx->timestamp = now;
-
- /*
- * The above: time' = time + (now - timestamp), can be re-arranged
- * into: time` = now + (time - timestamp), which gives a single value
- * offset to compute future time without locks on.
- *
- * See perf_event_time_now(), which can be used from NMI context where
- * it's (obviously) not possible to acquire ctx->lock in order to read
- * both the above values in a consistent manner.
- */
- WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
+ update_perf_time_ctx(&ctx->time, now, adv);
}
static void update_context_time(struct perf_event_context *ctx)
if (is_cgroup_event(event))
return perf_cgroup_event_time(event);
- return ctx->time;
+ return ctx->time.time;
}
static u64 perf_event_time_now(struct perf_event *event, u64 now)
return perf_cgroup_event_time_now(event, now);
if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
- return ctx->time;
+ return ctx->time.time;
- now += READ_ONCE(ctx->timeoffset);
+ now += READ_ONCE(ctx->time.offset);
return now;
}
static void task_clock_event_start(struct perf_event *event, int flags)
{
event->hw.state = 0;
- local64_set(&event->hw.prev_count, event->ctx->time);
+ local64_set(&event->hw.prev_count, event->ctx->time.time);
perf_swevent_start_hrtimer(event);
}
event->hw.state = PERF_HES_STOPPED;
perf_swevent_cancel_hrtimer(event);
if (flags & PERF_EF_UPDATE)
- task_clock_event_update(event, event->ctx->time);
+ task_clock_event_update(event, event->ctx->time.time);
}
static int task_clock_event_add(struct perf_event *event, int flags)
static void task_clock_event_read(struct perf_event *event)
{
u64 now = perf_clock();
- u64 delta = now - event->ctx->timestamp;
- u64 time = event->ctx->time + delta;
+ u64 delta = now - event->ctx->time.stamp;
+ u64 time = event->ctx->time.time + delta;
task_clock_event_update(event, time);
}