]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
perf/core: Add this_cpc() helper
authorPeter Zijlstra <peterz@infradead.org>
Mon, 4 Nov 2024 13:39:19 +0000 (14:39 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 4 Mar 2025 08:42:51 +0000 (09:42 +0100)
As a preparation for adding yet another indirection.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Ravi Bangoria <ravi.bangoria@amd.com>
Link: https://lore.kernel.org/r/20241104135518.650051565@infradead.org
kernel/events/core.c

index 8321b719b6abf1a65274729168f9344f28e29ed7..0c7015f3ec030987efcbdd947e8145b55b95dad7 100644 (file)
@@ -1217,23 +1217,28 @@ static int perf_mux_hrtimer_restart_ipi(void *arg)
        return perf_mux_hrtimer_restart(arg);
 }
 
+static __always_inline struct perf_cpu_pmu_context *this_cpc(struct pmu *pmu)
+{
+       return this_cpu_ptr(pmu->cpu_pmu_context);
+}
+
 void perf_pmu_disable(struct pmu *pmu)
 {
-       int *count = &this_cpu_ptr(pmu->cpu_pmu_context)->pmu_disable_count;
+       int *count = &this_cpc(pmu)->pmu_disable_count;
        if (!(*count)++)
                pmu->pmu_disable(pmu);
 }
 
 void perf_pmu_enable(struct pmu *pmu)
 {
-       int *count = &this_cpu_ptr(pmu->cpu_pmu_context)->pmu_disable_count;
+       int *count = &this_cpc(pmu)->pmu_disable_count;
        if (!--(*count))
                pmu->pmu_enable(pmu);
 }
 
 static void perf_assert_pmu_disabled(struct pmu *pmu)
 {
-       int *count = &this_cpu_ptr(pmu->cpu_pmu_context)->pmu_disable_count;
+       int *count = &this_cpc(pmu)->pmu_disable_count;
        WARN_ON_ONCE(*count == 0);
 }
 
@@ -2355,7 +2360,7 @@ static void
 event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
 {
        struct perf_event_pmu_context *epc = event->pmu_ctx;
-       struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
+       struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
        enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
 
        // XXX cpc serialization, probably per-cpu IRQ disabled
@@ -2496,9 +2501,8 @@ __perf_remove_from_context(struct perf_event *event,
                pmu_ctx->rotate_necessary = 0;
 
                if (ctx->task && ctx->is_active) {
-                       struct perf_cpu_pmu_context *cpc;
+                       struct perf_cpu_pmu_context *cpc = this_cpc(pmu_ctx->pmu);
 
-                       cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
                        WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
                        cpc->task_epc = NULL;
                }
@@ -2636,7 +2640,7 @@ static int
 event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
 {
        struct perf_event_pmu_context *epc = event->pmu_ctx;
-       struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
+       struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
        int ret = 0;
 
        WARN_ON_ONCE(event->ctx != ctx);
@@ -2743,7 +2747,7 @@ error:
 static int group_can_go_on(struct perf_event *event, int can_add_hw)
 {
        struct perf_event_pmu_context *epc = event->pmu_ctx;
-       struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
+       struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
 
        /*
         * Groups consisting entirely of software events can always go on.
@@ -3366,9 +3370,8 @@ static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
        struct pmu *pmu = pmu_ctx->pmu;
 
        if (ctx->task && !(ctx->is_active & EVENT_ALL)) {
-               struct perf_cpu_pmu_context *cpc;
+               struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
 
-               cpc = this_cpu_ptr(pmu->cpu_pmu_context);
                WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
                cpc->task_epc = NULL;
        }
@@ -3615,7 +3618,7 @@ static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in
        struct perf_cpu_pmu_context *cpc;
 
        list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
-               cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
+               cpc = this_cpc(pmu_ctx->pmu);
 
                if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task)
                        pmu_ctx->pmu->sched_task(pmu_ctx, sched_in);
@@ -3724,7 +3727,7 @@ static DEFINE_PER_CPU(int, perf_sched_cb_usages);
 
 void perf_sched_cb_dec(struct pmu *pmu)
 {
-       struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
+       struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
 
        this_cpu_dec(perf_sched_cb_usages);
        barrier();
@@ -3736,7 +3739,7 @@ void perf_sched_cb_dec(struct pmu *pmu)
 
 void perf_sched_cb_inc(struct pmu *pmu)
 {
-       struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
+       struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
 
        if (!cpc->sched_cb_usage++)
                list_add(&cpc->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
@@ -3853,7 +3856,7 @@ static void __link_epc(struct perf_event_pmu_context *pmu_ctx)
        if (!pmu_ctx->ctx->task)
                return;
 
-       cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
+       cpc = this_cpc(pmu_ctx->pmu);
        WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
        cpc->task_epc = pmu_ctx;
 }
@@ -3982,10 +3985,9 @@ static int merge_sched_in(struct perf_event *event, void *data)
                        perf_cgroup_event_disable(event, ctx);
                        perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
                } else {
-                       struct perf_cpu_pmu_context *cpc;
+                       struct perf_cpu_pmu_context *cpc = this_cpc(event->pmu_ctx->pmu);
 
                        event->pmu_ctx->rotate_necessary = 1;
-                       cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context);
                        perf_mux_hrtimer_restart(cpc);
                        group_update_userpage(event);
                }